import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / android / ion / mtk / ion_history.c
1 #include <asm/page.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/err.h>
4 #include <linux/highmem.h>
5 #include <linux/mm.h>
6 #include <linux/scatterlist.h>
7 #include <linux/seq_file.h>
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include "ion_priv.h"
11 #include <linux/slab.h>
12 #include <linux/xlog.h>
13 #include <mach/m4u.h>
14 #include <linux/ion_drv.h>
15 #include <linux/mutex.h>
16 #include <linux/mmprofile.h>
17 #include <linux/debugfs.h>
18 #include <linux/kthread.h>
19 #include "ion_profile.h"
20 #include "ion_drv_priv.h"
21
22
23 //==============================================
24 // history record
25 //==============================================
26
27 struct history_record{
28 void *record;
29 unsigned int record_num;
30 unsigned int record_size;
31 unsigned int top;
32 unsigned int wrapped;
33 spinlock_t lock;
34 const char *name;
35 struct dentry *debug_file;
36 int (*show)(struct seq_file *seq, void *record, void *priv);
37 int (*destory_record)(void *record, void *priv);
38 void *private;
39 unsigned long bitmap_busy[0];
40 };
41
42 static unsigned long inline history_record_test_busy(
43 struct history_record *history_record,
44 unsigned int index)
45 {
46 unsigned long *p = history_record->bitmap_busy + index / BITS_PER_LONG;
47 int bit_mask = 1UL << (index % BITS_PER_LONG);
48 return *p & bit_mask;
49 }
50
51 static void inline history_record_set_busy(
52 struct history_record *history_record,
53 unsigned int index)
54 {
55 unsigned long *p = history_record->bitmap_busy + index / BITS_PER_LONG;
56 int bit_mask = 1UL << (index % BITS_PER_LONG);
57 *p |= bit_mask;
58 }
59
60 static void inline history_record_clear_busy(
61 struct history_record *history_record,
62 unsigned int index)
63 {
64 unsigned long *p = history_record->bitmap_busy + index / BITS_PER_LONG;
65 int bit_mask = 1UL << (index % BITS_PER_LONG);
66 *p &= ~bit_mask;
67 }
68
69 static void inline history_record_dump_busy(struct seq_file *seq,
70 struct history_record *history_record)
71 {
72 unsigned long longs = BITS_TO_LONGS(history_record->record_num);
73 unsigned long i;
74 for(i=0; i<longs; i++) {
75 if(seq)
76 seq_printf(seq, "0x%lx, ", history_record->bitmap_busy[i]);
77 else
78 printk("0x%lx, ", history_record->bitmap_busy[i]);
79 }
80 if(seq)
81 seq_printf(seq, "\n");
82 else
83 printk("\n");
84 }
85
86 void* history_record_get_record(struct history_record *history_record)
87 {
88 unsigned int index;
89 void *record;
90
91 spin_lock(&history_record->lock);
92
93 index = history_record->top;
94 history_record->top++;
95 if(history_record->top >= history_record->record_num) {
96 history_record->top = 0;
97 history_record->wrapped = 1;
98 }
99
100 if(history_record_test_busy(history_record, index)) {
101 IONMSG("%s: error to get record %d, bitmap is:\n", __FUNCTION__, index);
102 history_record_dump_busy(NULL, history_record);
103 spin_unlock(&history_record->lock);
104 return NULL;
105 }
106
107 record = history_record->record + history_record->record_size * index;
108
109 if(history_record->wrapped && history_record->destory_record)
110 history_record->destory_record(record, history_record->private);
111
112 history_record_set_busy(history_record, index);
113
114 spin_unlock(&history_record->lock);
115
116 return record;
117 }
118 void history_record_put_record(struct history_record *history_record, void* record)
119 {
120 unsigned int index = (record - history_record->record)
121 / history_record->record_size;
122 spin_lock(&history_record->lock);
123 history_record_clear_busy(history_record, index);
124 spin_unlock(&history_record->lock);
125 }
126
127
128 struct history_seq_priv {
129 struct history_record * history_record;
130 unsigned int start;
131 unsigned int num;
132 };
133
134
135 int history_record_show(struct seq_file *seq, void *record)
136 {
137 struct history_seq_priv *seq_priv = seq->private;
138 struct history_record * history_record = seq_priv->history_record;
139 unsigned int index = (record - history_record->record)
140 / history_record->record_size;
141 spin_lock(&history_record->lock);
142
143 if(history_record_test_busy(history_record, index)){
144 spin_unlock(&history_record->lock);
145 return 0;
146 }
147
148 history_record->show(seq, record, history_record->private);
149
150 spin_unlock(&history_record->lock);
151 return 0;
152 }
153
154 static void *history_seq_start(struct seq_file *p, loff_t *pos)
155 {
156 struct history_seq_priv *seq_priv = p->private;
157 struct history_record * history_record = seq_priv->history_record;
158 unsigned int index;
159
160 if(*pos == 0) {
161 spin_lock(&history_record->lock);
162 if(!history_record->wrapped) {
163 seq_priv->start = 0;
164 seq_priv->num = history_record->top;
165 } else {
166 seq_priv->start = history_record->top;
167 seq_priv->num = history_record->record_num;
168 }
169 spin_unlock(&history_record->lock);
170 }
171
172 if(*pos >= seq_priv->num)
173 return NULL;
174
175 index = seq_priv->start + *pos;
176 if(index >= history_record->record_num)
177 index -= history_record->record_num;
178
179 return history_record->record + history_record->record_size * index;
180 }
181
182 static void *history_seq_next(struct seq_file *p, void *v, loff_t *pos)
183 {
184 struct history_seq_priv *seq_priv = p->private;
185 struct history_record * history_record = seq_priv->history_record;
186 unsigned int index;
187
188 ++*pos;
189 if(*pos >= seq_priv->num)
190 return NULL;
191
192 index = seq_priv->start + *pos;
193 if(index >= history_record->record_num)
194 index -= history_record->record_num;
195
196 return history_record->record + history_record->record_size * index;
197 }
198
199 static void history_seq_stop(struct seq_file *p, void *v)
200 {
201 }
202
203 static struct seq_operations seq_op = {
204 .start = history_seq_start,
205 .next = history_seq_next,
206 .stop = history_seq_stop,
207 .show = history_record_show,
208 };
209 static int history_record_open(struct inode *inode, struct file *file)
210 {
211 struct history_record * history_record = inode->i_private;
212 struct history_seq_priv *seq_priv;
213 int res = -ENOMEM;
214
215 res = seq_open(file, &seq_op);
216 if (res) {
217 IONMSG("%s fail\n", __FUNCTION__);
218 return res;
219 }
220
221 seq_priv = kzalloc(sizeof(*seq_priv), GFP_KERNEL);
222 seq_priv->history_record = history_record;
223 ((struct seq_file *)file->private_data)->private = seq_priv;
224
225 return 0;
226 }
227
228 static int history_record_release(struct inode *inode, struct file *file)
229 {
230 struct history_seq_priv *seq_priv =
231 ((struct seq_file *)file->private_data)->private;
232
233 if(seq_priv)
234 kfree(seq_priv);
235
236 return seq_release(inode, file);
237 }
238
239 static const struct file_operations history_record_fops = {
240 .open = history_record_open,
241 .read = seq_read,
242 .llseek = seq_lseek,
243 .release = history_record_release,
244 };
245
246
247 struct history_record * history_record_create(unsigned int record_num,
248 unsigned int record_size,
249 int (*show)(struct seq_file *seq, void *record, void *priv),
250 int (*destory_record)(void *record, void *priv),
251 void *priv,
252 const char *name,
253 struct dentry *debugfs_parent
254 )
255 {
256 struct history_record *history_record;
257 int num_align;
258 size_t size_align;
259 int bitmap_bytes;
260
261 /* as vmalloc is page align.
262 we will enlarge record_num to num_align
263 to get the utmost of memory allocated
264 */
265 size_align = record_num * record_size;
266 size_align = ALIGN(size_align, PAGE_SIZE);
267 num_align = size_align / record_size;
268
269 bitmap_bytes = BITS_TO_LONGS(num_align) * sizeof(unsigned long);
270
271 history_record = kzalloc(sizeof(struct history_record) + bitmap_bytes, GFP_KERNEL);
272 if(!history_record) {
273 IONMSG("%s error to kzalloc %zd.\n", __FUNCTION__, sizeof(struct history_record));
274 return ERR_PTR(-ENOMEM);
275 }
276
277 history_record->record = vzalloc(size_align);
278 if(!history_record->record) {
279 IONMSG("%s error to valloc %zu.\n", __FUNCTION__, size_align);
280 kfree(history_record);
281 return ERR_PTR(-ENOMEM);
282 }
283
284 history_record->record_num = num_align;
285 history_record->record_size = record_size;
286 history_record->show = show;
287 history_record->destory_record = destory_record;
288 history_record->name = name;
289 spin_lock_init(&history_record->lock);
290
291 history_record->debug_file = debugfs_create_file(name, 0644,
292 debugfs_parent, history_record, &history_record_fops);
293
294 return history_record;
295 }
296
297 void history_record_destroy(struct history_record *history_record)
298 {
299
300 int busy;
301 unsigned int i, bitmap_longs = BITS_TO_LONGS(history_record->record_num);
302 unsigned int end;
303
304 debugfs_remove(history_record->debug_file);
305
306 /* wait untill no busy */
307 do {
308 busy = 0;
309 spin_lock(&history_record->lock);
310 for(i=0; i<bitmap_longs; i++) {
311 if(history_record->bitmap_busy[i]) {
312 /* busy ! */
313 IONMSG("warning: %s when busy %d\n", __FUNCTION__, i);
314 spin_unlock(&history_record->lock);
315 busy = 1;
316 cond_resched();
317 break;
318 }
319 }
320 }while(busy);
321 /* we have history_record->lock locked here */
322
323 if(!history_record->wrapped)
324 end = history_record->top;
325 else
326 end = history_record->record_num;
327
328 for(i=0; i<end; i++) {
329 history_record->destory_record(
330 history_record->record + i * history_record->record_num,
331 history_record->private);
332 }
333
334 vfree(history_record->record);
335 history_record->record = NULL;
336 kfree(history_record);
337
338 return ;
339 }
340
341
342 //====== string hash ==============
343 struct string_struct {
344 unsigned int ref;
345 struct hlist_node list;
346 char str[0];
347 };
348
349 #define STR_HASH_BUCKET_NUM 32
350 static struct hlist_head ion_str_hash[STR_HASH_BUCKET_NUM];
351 DEFINE_SPINLOCK(ion_str_hash_lock);
352
353 /* as tested, simple add hash is better than RS_hash & BKDR_hash ! */
354 static unsigned int ADDHash(char* str, unsigned int len)
355 {
356 unsigned int hash = 0, i;
357 for(i=0; i<len; i++) {
358 hash += str[i];
359 }
360 return hash % STR_HASH_BUCKET_NUM;
361 }
362
363 static struct string_struct * string_hash_get(const char* str)
364 {
365 struct hlist_head *head;
366 struct string_struct *string;
367 unsigned int len = strlen(str);
368 unsigned int hash;
369
370 hash = ADDHash((char *)str, len);
371 head = &ion_str_hash[hash];
372
373 spin_lock(&ion_str_hash_lock);
374 hlist_for_each_entry(string, head, list) {
375 if(!strcmp(str, string->str))
376 break;
377 }
378
379 if(string) {
380 string->ref++;
381 } else {
382 /* add string */
383 string = kzalloc(sizeof(*string) + len + 1, GFP_KERNEL);
384 if(!string) {
385 IONMSG("%s: kzalloc fail size=%zd.\n", __FUNCTION__,
386 sizeof(*string) + len + 1);
387 goto out;
388 }
389 string->ref = 1;
390 INIT_HLIST_NODE(&string->list);
391 memcpy(string->str, str, len);
392 string->str[len] = '\0';
393 hlist_add_head(&string->list, head);
394 }
395
396 out:
397 spin_unlock(&ion_str_hash_lock);
398 return string;
399
400 }
401
402 static int string_hash_put(struct string_struct *string)
403 {
404 spin_lock(&ion_str_hash_lock);
405
406 if(!string->ref) {
407 IONMSG("error %s string_ref is 0!!!\n", __FUNCTION__);
408 spin_unlock(&ion_str_hash_lock);
409 return -EINVAL;
410 }
411 string->ref--;
412
413 if(!string->ref)
414 {
415 hlist_del(&string->list);
416 kfree(string);
417 }
418
419 spin_unlock(&ion_str_hash_lock);
420 return 0;
421
422 }
423
424 int string_hash_debug_show(struct seq_file *seq, void *unused)
425 {
426 struct hlist_head *head;
427 struct string_struct *string;
428 unsigned int hash, num;
429
430 spin_lock(&ion_str_hash_lock);
431
432 for(hash=0; hash<STR_HASH_BUCKET_NUM; hash++) {
433 head = &ion_str_hash[hash];
434 num = 0;
435 hlist_for_each_entry(string, head, list) {
436 seq_printf(seq, "\t%s : %d\n", string->str, string->ref);
437 num++;
438 }
439 seq_printf(seq, "hash %d : %d strings\n", hash, num);
440 }
441
442 spin_unlock(&ion_str_hash_lock);
443 return 0;
444
445 }
446
447 static int string_hash_debug_open(struct inode *inode, struct file *file)
448 {
449 return single_open(file, string_hash_debug_show, inode->i_private);
450 }
451
452 static const struct file_operations string_hash_debug_fops = {
453 .open = string_hash_debug_open,
454 .read = seq_read,
455 .llseek = seq_lseek,
456 .release = single_release,
457 };
458
459
460 //===== ion client history =======
461
462 struct ion_client_record {
463 union {
464 struct {
465 struct string_struct *client_name;
466 struct string_struct *dbg_name;
467 };
468
469 unsigned long long time;
470 };
471 size_t size;
472
473 #define CLIENT_ADDRESS_TOTAL ((void*)1)
474 #define CLIENT_ADDRESS_ORPHAN ((void*)2)
475 #define CLIENT_ADDRESS_FLAG_MAX ((void*)0x1000)
476 void *address;
477 };
478
479
480 static int ion_client_record_show(struct seq_file *seq, void *record, void *priv)
481 {
482 struct ion_client_record *client_record = record;
483
484 if(client_record->address > CLIENT_ADDRESS_FLAG_MAX) {
485 char *client_name = NULL, *dbg_name = NULL;
486 if(client_record->client_name)
487 client_name = client_record->client_name->str;
488 if(client_record->dbg_name)
489 dbg_name = client_record->dbg_name->str;
490 seq_printf(seq, "%16.s(%16.s) %16zu 0x%p\n",
491 client_name,
492 dbg_name,
493 client_record->size,
494 client_record->address);
495 } else {
496 unsigned long long rem_ns, t;
497 char *name;
498
499 if(client_record->address == CLIENT_ADDRESS_TOTAL)
500 name = "total";
501 else if (client_record->address == CLIENT_ADDRESS_ORPHAN)
502 name = "orphan";
503 else
504 name = "error";
505
506 t = client_record->time;
507 rem_ns = do_div(t, 1000000000ULL);
508 seq_printf(seq, "time(%lld.%lld)\t%s %16zu\n",
509 t, rem_ns, name, client_record->size);
510 }
511
512 return 0;
513 }
514
515 static int ion_client_destory_record(void *record, void *priv)
516 {
517 struct ion_client_record *client_record = record;
518
519 if(client_record->address > CLIENT_ADDRESS_FLAG_MAX) {
520 if(client_record->client_name)
521 string_hash_put(client_record->client_name);
522 if(client_record->dbg_name)
523 string_hash_put(client_record->dbg_name);
524 }
525
526 return 0;
527 }
528
529 static int ion_client_write_record(
530 struct history_record *client_history,
531 const char *client_name,
532 const char *dbg_name,
533 size_t size,
534 void* address)
535 {
536 struct ion_client_record *record;
537 record = history_record_get_record(client_history);
538 if(!record)
539 return -1;
540
541 record->address = address;
542 record->size = size;
543
544 if(likely(address > CLIENT_ADDRESS_FLAG_MAX)) {
545 record->client_name = string_hash_get(client_name);
546 record->dbg_name = string_hash_get(dbg_name);
547 } else {
548 //total/orphan record: reuse name field
549 record->time = local_clock();
550 }
551
552 history_record_put_record(client_history, record);
553
554 return 0;
555 }
556
557 static struct history_record *g_client_history;
558 static struct history_record *g_buffer_history;
559 struct task_struct *ion_history_kthread;
560 #define ION_HISTORY_TIME_INTERVAL (HZ) //1s
561
562 static int write_mm_page_pool(int high, int order, int cache, size_t size)
563 {
564 char name[50];
565
566 snprintf(name, sizeof(name), "%smem order_%d pool", high ? "high" : "low", order);
567 if(size)
568 ion_client_write_record(g_client_history, name,
569 cache ? "cache" : "nocache", size, CLIENT_ADDRESS_FLAG_MAX+1);
570 return 0;
571 }
572
573
574 static int ion_history_reocrd(void *data)
575 {
576 struct ion_device *dev = g_ion_device;
577 struct rb_node *n;
578
579 while(1)
580 {
581 if(kthread_should_stop()) {
582 IONMSG("stop ion history threak \n");
583 break;
584 }
585
586 set_current_state(TASK_INTERRUPTIBLE);
587 schedule_timeout(ION_HISTORY_TIME_INTERVAL);
588 if(fatal_signal_pending(current)) {
589 IONMSG("ion history thread being killed\n");
590 break;
591 }
592
593 //== client ==
594 if(g_client_history)
595 {
596 down_read(&dev->lock);
597 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
598 struct ion_client *client = rb_entry(n, struct ion_client,
599 node);
600 size_t size = 0;
601 struct rb_node *nh;
602
603 mutex_lock(&client->lock);
604 for (nh = rb_first(&client->handles); nh; nh = rb_next(nh)) {
605 struct ion_handle *handle = rb_entry(nh,
606 struct ion_handle,
607 node);
608 size += handle->buffer->size;
609 }
610 mutex_unlock(&client->lock);
611
612 if (!size)
613 continue;
614 if (client->task) {
615 char task_comm[TASK_COMM_LEN];
616 get_task_comm(task_comm, client->task);
617
618 ion_client_write_record(g_client_history, task_comm,
619 client->dbg_name, size, client);
620 } else {
621 ion_client_write_record(g_client_history, client->name,
622 "kernel", size, client);
623 }
624 }
625 up_read(&dev->lock);
626 }
627
628
629 if(g_client_history || g_buffer_history) {
630 size_t total_size = 0;
631 size_t total_orphaned_size = 0;
632
633 mutex_lock(&dev->buffer_lock);
634 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
635 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
636 node);
637 total_size += buffer->size;
638 if (!buffer->handle_count) {
639 total_orphaned_size += buffer->size;
640 }
641
642 if(g_buffer_history)
643 {
644 //record buffer here;
645 }
646
647 }
648 mutex_unlock(&dev->buffer_lock);
649
650 if(g_client_history) {
651 /* record page pool info */
652 ion_mm_heap_for_each_pool(write_mm_page_pool);
653
654 if(total_orphaned_size)
655 ion_client_write_record(g_client_history, NULL,
656 NULL, total_orphaned_size, CLIENT_ADDRESS_ORPHAN);
657 /* total size with time stamp */
658 ion_client_write_record(g_client_history, NULL,
659 NULL, total_size, CLIENT_ADDRESS_TOTAL);
660 }
661 }
662
663 }
664
665 return 0;
666 }
667
668
669 int ion_history_init(void)
670 {
671 struct sched_param param = { .sched_priority = 0 };
672
673 g_client_history = history_record_create(
674 2048,
675 sizeof(struct ion_client_record),
676 ion_client_record_show,
677 ion_client_destory_record,
678 NULL,
679 "client_history",
680 g_ion_device->debug_root);
681
682 if(IS_ERR_OR_NULL(g_client_history)) {
683 IONMSG("create client history fail\n");
684 return (long)g_client_history;
685 }
686
687 debugfs_create_file("string_hash", 644, g_ion_device->debug_root,
688 NULL, &string_hash_debug_fops);
689
690 ion_history_kthread = kthread_run(ion_history_reocrd, NULL, "%s", "ion_history");
691 if (IS_ERR(ion_history_kthread)) {
692 IONMSG("%s: creating thread for ion history\n", __func__);
693 return PTR_RET(ion_history_kthread);
694 }
695
696 sched_setscheduler(ion_history_kthread, SCHED_IDLE, &param);
697
698 return 0;
699 }
700
701
702
703
704 #if 0 /* test history record */
705
706 int ion_test_show(struct seq_file *seq, void *record, void *priv)
707 {
708 seq_printf(seq, "%d\n", *(unsigned int*)record);
709 }
710
711
712 int ion_test_write(struct history_record *history_record, int data)
713 {
714 int *record = history_record_get_record(history_record);
715 *record = data;
716 history_record_put_record(history_record, record);
717 return 0;
718 }
719
720 static int debug_set(void *data, u64 val)
721 {
722 struct history_record *history_record = data;
723 int i;
724
725 for(i=0; i<val; i++)
726 {
727 ion_test_write(history_record, i);
728 }
729
730 return 0;
731 }
732
733 static int debug_get(void *data, u64 *val)
734 {
735 return 0;
736 }
737
738 DEFINE_SIMPLE_ATTRIBUTE(debug_test_fops, debug_get,
739 debug_set, "%llu\n");
740
741
742 int ion_test_history_init()
743 {
744 struct history_record * history_record;
745 int i;
746
747 history_record = history_record_create(100, sizeof(int),
748 ion_test_show,
749 NULL,
750 NULL,
751 "test",
752 g_ion_device->debug_root );
753
754 debugfs_create_file(
755 "record", 0644, g_ion_device->debug_root , history_record,
756 &debug_test_fops);
757 return 0;
758 }
759
760 #endif
761
762