Merge "path: add ppmgr [4/5]" into amlogic-4.9-dev
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_media.git] / drivers / frame_provider / decoder / utils / vdec.c
CommitLineData
b9164398
NQ
1/*
2 * drivers/amlogic/media/frame_provider/decoder/utils/vdec.c
3 *
4 * Copyright (C) 2016 Amlogic, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
e0614bf7 16 */
5b851ff9 17#define DEBUG
b9164398
NQ
18#include <linux/kernel.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/kthread.h>
24#include <linux/platform_device.h>
25#include <linux/uaccess.h>
26#include <linux/semaphore.h>
27#include <linux/sched/rt.h>
28#include <linux/interrupt.h>
29#include <linux/amlogic/media/utils/vformat.h>
30#include <linux/amlogic/iomap.h>
31#include <linux/amlogic/media/canvas/canvas.h>
32#include <linux/amlogic/media/vfm/vframe.h>
33#include <linux/amlogic/media/vfm/vframe_provider.h>
34#include <linux/amlogic/media/vfm/vframe_receiver.h>
35#include <linux/amlogic/media/video_sink/ionvideo_ext.h>
36#include <linux/amlogic/media/vfm/vfm_ext.h>
a6c89e96
NQ
37/*for VDEC_DEBUG_SUPPORT*/
38#include <linux/time.h>
b9164398
NQ
39
40#include <linux/amlogic/media/utils/vdec_reg.h>
41#include "vdec.h"
fe96802b
NQ
42#include "vdec_trace.h"
43#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
44#include "vdec_profile.h"
45#endif
46#include <linux/of.h>
47#include <linux/of_fdt.h>
48#include <linux/libfdt_env.h>
49#include <linux/of_reserved_mem.h>
50#include <linux/dma-contiguous.h>
51#include <linux/cma.h>
52#include <linux/module.h>
53#include <linux/slab.h>
54#include <linux/dma-mapping.h>
55#include <linux/dma-contiguous.h>
56#include "../../../stream_input/amports/amports_priv.h"
57
58#include <linux/amlogic/media/utils/amports_config.h>
59#include "../utils/amvdec.h"
60#include "vdec_input.h"
61
62#include "../../../common/media_clock/clk/clk.h"
63#include <linux/reset.h>
fe96802b 64#include <linux/amlogic/cpu_version.h>
b9164398
NQ
65#include <linux/amlogic/media/codec_mm/codec_mm.h>
66#include <linux/amlogic/media/video_sink/video_keeper.h>
fe96802b
NQ
67#include <linux/amlogic/media/codec_mm/configs.h>
68#include <linux/amlogic/media/frame_sync/ptsserv.h>
69#include "secprot.h"
63e810c0 70#include "../../../common/chips/decoder_cpu_ver_info.h"
8458676f 71#include "frame_check.h"
b9164398
NQ
72
73static DEFINE_MUTEX(vdec_mutex);
74
75#define MC_SIZE (4096 * 4)
76#define CMA_ALLOC_SIZE SZ_64M
77#define MEM_NAME "vdec_prealloc"
78static int inited_vcodec_num;
976f3376 79#define jiffies_ms div64_u64(get_jiffies_64() * 1000, HZ)
b9164398
NQ
80static int poweron_clock_level;
81static int keep_vdec_mem;
82static unsigned int debug_trace_num = 16 * 20;
83static int step_mode;
84static unsigned int clk_config;
158de7c4
HZ
85/*
86 &1: sched_priority to MAX_RT_PRIO -1.
87 &2: always reload firmware.
be862a73 88 &4: vdec canvas debug enable
158de7c4
HZ
89 */
90static unsigned int debug;
8247f369 91
b9164398 92static int hevc_max_reset_count;
fe96802b 93
28e318df 94static int no_powerdown;
05afa03d 95static int parallel_decode = 1;
97fe3d16
PY
96static int fps_detection;
97static int fps_clear;
b9164398
NQ
98static DEFINE_SPINLOCK(vdec_spin_lock);
99
100#define HEVC_TEST_LIMIT 100
101#define GXBB_REV_A_MINOR 0xA
102
05afa03d
PY
103#define CANVAS_MAX_SIZE (AMVDEC_CANVAS_MAX1 - AMVDEC_CANVAS_START_INDEX + 1 + AMVDEC_CANVAS_MAX2 + 1)
104
b9164398
NQ
105struct am_reg {
106 char *name;
107 int offset;
108};
109
110struct vdec_isr_context_s {
111 int index;
112 int irq;
113 irq_handler_t dev_isr;
114 irq_handler_t dev_threaded_isr;
115 void *dev_id;
a6c89e96 116 struct vdec_s *vdec;
b9164398
NQ
117};
118
97fe3d16
PY
119struct decode_fps_s {
120 u32 frame_count;
121 u64 start_timestamp;
122 u64 last_timestamp;
123 u32 fps;
124};
125
b9164398
NQ
126struct vdec_core_s {
127 struct list_head connected_vdec_list;
128 spinlock_t lock;
05afa03d 129 spinlock_t canvas_lock;
97fe3d16 130 spinlock_t fps_lock;
fe96802b 131 struct ida ida;
b9164398
NQ
132 atomic_t vdec_nr;
133 struct vdec_s *vfm_vdec;
134 struct vdec_s *active_vdec;
05afa03d 135 struct vdec_s *active_hevc;
fe96802b 136 struct vdec_s *hint_fr_vdec;
b9164398
NQ
137 struct platform_device *vdec_core_platform_device;
138 struct device *cma_dev;
b9164398
NQ
139 struct semaphore sem;
140 struct task_struct *thread;
fe96802b 141 struct workqueue_struct *vdec_core_wq;
b9164398 142
a6c89e96 143 unsigned long sched_mask;
b9164398
NQ
144 struct vdec_isr_context_s isr_context[VDEC_IRQ_MAX];
145 int power_ref_count[VDEC_MAX];
05afa03d
PY
146 struct vdec_s *last_vdec;
147 int parallel_dec;
148 unsigned long power_ref_mask;
149 int vdec_combine_flag;
97fe3d16 150 struct decode_fps_s decode_fps[MAX_INSTANCE_MUN];
05afa03d
PY
151};
152
153struct canvas_status_s {
154 int type;
155 int canvas_used_flag;
156 int id;
b9164398
NQ
157};
158
05afa03d 159
b9164398
NQ
160static struct vdec_core_s *vdec_core;
161
fe96802b
NQ
162static const char * const vdec_status_string[] = {
163 "VDEC_STATUS_UNINITIALIZED",
164 "VDEC_STATUS_DISCONNECTED",
165 "VDEC_STATUS_CONNECTED",
166 "VDEC_STATUS_ACTIVE"
167};
168
169static int debugflags;
170
05afa03d
PY
171static struct canvas_status_s canvas_stat[AMVDEC_CANVAS_MAX1 - AMVDEC_CANVAS_START_INDEX + 1 + AMVDEC_CANVAS_MAX2 + 1];
172
173
fe96802b
NQ
174int vdec_get_debug_flags(void)
175{
176 return debugflags;
177}
178EXPORT_SYMBOL(vdec_get_debug_flags);
179
180unsigned char is_mult_inc(unsigned int type)
181{
182 unsigned char ret = 0;
183 if (vdec_get_debug_flags() & 0xf000)
184 ret = (vdec_get_debug_flags() & 0x1000)
185 ? 1 : 0;
186 else if (type & PORT_TYPE_DECODER_SCHED)
187 ret = 1;
188 return ret;
189}
190EXPORT_SYMBOL(is_mult_inc);
191
a6c89e96
NQ
192static const bool cores_with_input[VDEC_MAX] = {
193 true, /* VDEC_1 */
194 false, /* VDEC_HCODEC */
195 false, /* VDEC_2 */
196 true, /* VDEC_HEVC / VDEC_HEVC_FRONT */
197 false, /* VDEC_HEVC_BACK */
198};
199
200static const int cores_int[VDEC_MAX] = {
201 VDEC_IRQ_1,
202 VDEC_IRQ_2,
203 VDEC_IRQ_0,
204 VDEC_IRQ_0,
205 VDEC_IRQ_HEVC_BACK
206};
207
05afa03d
PY
208unsigned long vdec_canvas_lock(struct vdec_core_s *core)
209{
210 unsigned long flags;
211 spin_lock_irqsave(&core->canvas_lock, flags);
212
213 return flags;
214}
215
216void vdec_canvas_unlock(struct vdec_core_s *core, unsigned long flags)
217{
218 spin_unlock_irqrestore(&core->canvas_lock, flags);
219}
220
97fe3d16
PY
221unsigned long vdec_fps_lock(struct vdec_core_s *core)
222{
223 unsigned long flags;
224 spin_lock_irqsave(&core->fps_lock, flags);
225
226 return flags;
227}
228
229void vdec_fps_unlock(struct vdec_core_s *core, unsigned long flags)
230{
231 spin_unlock_irqrestore(&core->fps_lock, flags);
232}
05afa03d 233
b9164398
NQ
234unsigned long vdec_core_lock(struct vdec_core_s *core)
235{
236 unsigned long flags;
237
238 spin_lock_irqsave(&core->lock, flags);
239
240 return flags;
241}
242
243void vdec_core_unlock(struct vdec_core_s *core, unsigned long flags)
244{
245 spin_unlock_irqrestore(&core->lock, flags);
246}
247
97fe3d16
PY
248static u64 vdec_get_us_time_system(void)
249{
250 struct timeval tv;
251
252 do_gettimeofday(&tv);
253
254 return div64_u64(timeval_to_ns(&tv), 1000);
255}
256
257static void vdec_fps_clear(int id)
258{
259 if (id >= MAX_INSTANCE_MUN)
260 return;
261
262 vdec_core->decode_fps[id].frame_count = 0;
263 vdec_core->decode_fps[id].start_timestamp = 0;
264 vdec_core->decode_fps[id].last_timestamp = 0;
265 vdec_core->decode_fps[id].fps = 0;
266}
267
268static void vdec_fps_clearall(void)
269{
270 int i;
271
272 for (i = 0; i < MAX_INSTANCE_MUN; i++) {
273 vdec_core->decode_fps[i].frame_count = 0;
274 vdec_core->decode_fps[i].start_timestamp = 0;
275 vdec_core->decode_fps[i].last_timestamp = 0;
276 vdec_core->decode_fps[i].fps = 0;
277 }
278}
279
280static void vdec_fps_detec(int id)
281{
282 unsigned long flags;
283
284 if (fps_detection == 0)
285 return;
286
287 if (id >= MAX_INSTANCE_MUN)
288 return;
289
290 flags = vdec_fps_lock(vdec_core);
291
292 if (fps_clear == 1) {
293 vdec_fps_clearall();
294 fps_clear = 0;
295 }
296
297 vdec_core->decode_fps[id].frame_count++;
298 if (vdec_core->decode_fps[id].frame_count == 1) {
299 vdec_core->decode_fps[id].start_timestamp =
300 vdec_get_us_time_system();
301 vdec_core->decode_fps[id].last_timestamp =
302 vdec_core->decode_fps[id].start_timestamp;
303 } else {
304 vdec_core->decode_fps[id].last_timestamp =
305 vdec_get_us_time_system();
306 vdec_core->decode_fps[id].fps =
307 (u32)div_u64(((u64)(vdec_core->decode_fps[id].frame_count) *
308 10000000000),
309 (vdec_core->decode_fps[id].last_timestamp -
310 vdec_core->decode_fps[id].start_timestamp));
311 }
312 vdec_fps_unlock(vdec_core, flags);
313}
314
315
316
b9164398
NQ
317static int get_canvas(unsigned int index, unsigned int base)
318{
319 int start;
320 int canvas_index = index * base;
a35da9f0 321 int ret;
b9164398
NQ
322
323 if ((base > 4) || (base == 0))
324 return -1;
325
326 if ((AMVDEC_CANVAS_START_INDEX + canvas_index + base - 1)
327 <= AMVDEC_CANVAS_MAX1) {
328 start = AMVDEC_CANVAS_START_INDEX + base * index;
329 } else {
330 canvas_index -= (AMVDEC_CANVAS_MAX1 -
331 AMVDEC_CANVAS_START_INDEX + 1) / base * base;
332 if (canvas_index <= AMVDEC_CANVAS_MAX2)
333 start = canvas_index / base;
334 else
335 return -1;
336 }
337
338 if (base == 1) {
a35da9f0 339 ret = start;
b9164398 340 } else if (base == 2) {
a35da9f0 341 ret = ((start + 1) << 16) | ((start + 1) << 8) | start;
b9164398 342 } else if (base == 3) {
a35da9f0 343 ret = ((start + 2) << 16) | ((start + 1) << 8) | start;
b9164398 344 } else if (base == 4) {
a35da9f0 345 ret = (((start + 3) << 24) | (start + 2) << 16) |
b9164398
NQ
346 ((start + 1) << 8) | start;
347 }
348
a35da9f0 349 return ret;
b9164398
NQ
350}
351
05afa03d
PY
352static int get_canvas_ex(int type, int id)
353{
354 int i;
355 unsigned long flags;
356
357 flags = vdec_canvas_lock(vdec_core);
358
359 for (i = 0; i < CANVAS_MAX_SIZE; i++) {
360 /*0x10-0x15 has been used by rdma*/
361 if ((i >= 0x10) && (i <= 0x15))
362 continue;
363 if ((canvas_stat[i].type == type) &&
364 (canvas_stat[i].id & (1 << id)) == 0) {
365 canvas_stat[i].canvas_used_flag++;
366 canvas_stat[i].id |= (1 << id);
be862a73
HZ
367 if (debug & 4)
368 pr_debug("get used canvas %d\n", i);
05afa03d
PY
369 vdec_canvas_unlock(vdec_core, flags);
370 if (i < AMVDEC_CANVAS_MAX2 + 1)
371 return i;
372 else
373 return (i + AMVDEC_CANVAS_START_INDEX - AMVDEC_CANVAS_MAX2 - 1);
374 }
375 }
376
377 for (i = 0; i < CANVAS_MAX_SIZE; i++) {
378 /*0x10-0x15 has been used by rdma*/
379 if ((i >= 0x10) && (i <= 0x15))
380 continue;
381 if (canvas_stat[i].type == 0) {
382 canvas_stat[i].type = type;
383 canvas_stat[i].canvas_used_flag = 1;
384 canvas_stat[i].id = (1 << id);
be862a73
HZ
385 if (debug & 4) {
386 pr_debug("get canvas %d\n", i);
387 pr_debug("canvas_used_flag %d\n",
388 canvas_stat[i].canvas_used_flag);
389 pr_debug("canvas_stat[i].id %d\n",
390 canvas_stat[i].id);
391 }
05afa03d
PY
392 vdec_canvas_unlock(vdec_core, flags);
393 if (i < AMVDEC_CANVAS_MAX2 + 1)
394 return i;
395 else
396 return (i + AMVDEC_CANVAS_START_INDEX - AMVDEC_CANVAS_MAX2 - 1);
397 }
398 }
399 vdec_canvas_unlock(vdec_core, flags);
400
401 pr_info("cannot get canvas\n");
402
403 return -1;
404}
405
406static void free_canvas_ex(int index, int id)
407{
408 unsigned long flags;
409 int offset;
410
411 flags = vdec_canvas_lock(vdec_core);
412 if (index >= 0 &&
413 index < AMVDEC_CANVAS_MAX2 + 1)
414 offset = index;
415 else if ((index >= AMVDEC_CANVAS_START_INDEX) &&
416 (index <= AMVDEC_CANVAS_MAX1))
417 offset = index + AMVDEC_CANVAS_MAX2 + 1 - AMVDEC_CANVAS_START_INDEX;
418 else {
419 vdec_canvas_unlock(vdec_core, flags);
420 return;
421 }
422
423 if ((canvas_stat[offset].canvas_used_flag > 0) &&
424 (canvas_stat[offset].id & (1 << id))) {
425 canvas_stat[offset].canvas_used_flag--;
426 canvas_stat[offset].id &= ~(1 << id);
427 if (canvas_stat[offset].canvas_used_flag == 0) {
428 canvas_stat[offset].type = 0;
429 canvas_stat[offset].id = 0;
430 }
be862a73
HZ
431 if (debug & 4) {
432 pr_debug("free index %d used_flag %d, type = %d, id = %d\n",
433 offset,
434 canvas_stat[offset].canvas_used_flag,
435 canvas_stat[offset].type,
436 canvas_stat[offset].id);
437 }
05afa03d
PY
438 }
439 vdec_canvas_unlock(vdec_core, flags);
440
441 return;
442
443}
444
445
446
447
448static int vdec_get_hw_type(int value)
449{
450 int type;
451 switch (value) {
452 case VFORMAT_HEVC:
453 case VFORMAT_VP9:
454 case VFORMAT_AVS2:
455 type = CORE_MASK_HEVC;
456 break;
457
458 case VFORMAT_MPEG12:
459 case VFORMAT_MPEG4:
460 case VFORMAT_H264:
461 case VFORMAT_MJPEG:
462 case VFORMAT_REAL:
463 case VFORMAT_JPEG:
464 case VFORMAT_VC1:
465 case VFORMAT_AVS:
466 case VFORMAT_YUV:
467 case VFORMAT_H264MVC:
468 case VFORMAT_H264_4K2K:
469 case VFORMAT_H264_ENC:
470 case VFORMAT_JPEG_ENC:
471 type = CORE_MASK_VDEC_1;
472 break;
473
474 default:
475 type = -1;
476 }
477
478 return type;
479}
480
481
482static void vdec_save_active_hw(struct vdec_s *vdec)
483{
484 int type;
485
486 type = vdec_get_hw_type(vdec->port->vformat);
487
488 if (type == CORE_MASK_HEVC) {
489 vdec_core->active_hevc = vdec;
490 } else if (type == CORE_MASK_VDEC_1) {
491 vdec_core->active_vdec = vdec;
492 } else {
493 pr_info("save_active_fw wrong\n");
494 }
495}
496
b9164398 497
fe96802b 498int vdec_status(struct vdec_s *vdec, struct vdec_info *vstatus)
b9164398 499{
9b670a2d 500 if (vdec && vdec->dec_status &&
501 ((vdec->status == VDEC_STATUS_CONNECTED ||
502 vdec->status == VDEC_STATUS_ACTIVE)))
b9164398
NQ
503 return vdec->dec_status(vdec, vstatus);
504
9b670a2d 505 return 0;
b9164398
NQ
506}
507EXPORT_SYMBOL(vdec_status);
508
509int vdec_set_trickmode(struct vdec_s *vdec, unsigned long trickmode)
510{
511 int r;
512
513 if (vdec->set_trickmode) {
514 r = vdec->set_trickmode(vdec, trickmode);
515
516 if ((r == 0) && (vdec->slave) && (vdec->slave->set_trickmode))
517 r = vdec->slave->set_trickmode(vdec->slave,
518 trickmode);
976f3376 519 return r;
b9164398
NQ
520 }
521
522 return -1;
523}
524EXPORT_SYMBOL(vdec_set_trickmode);
525
d481db31
NQ
526int vdec_set_isreset(struct vdec_s *vdec, int isreset)
527{
528 vdec->is_reset = isreset;
529 pr_info("is_reset=%d\n", isreset);
530 if (vdec->set_isreset)
531 return vdec->set_isreset(vdec, isreset);
532 return 0;
533}
534EXPORT_SYMBOL(vdec_set_isreset);
535
28e318df
NQ
536int vdec_set_dv_metawithel(struct vdec_s *vdec, int isdvmetawithel)
537{
538 vdec->dolby_meta_with_el = isdvmetawithel;
539 pr_info("isdvmetawithel=%d\n", isdvmetawithel);
540 return 0;
541}
c23e8aee 542EXPORT_SYMBOL(vdec_set_dv_metawithel);
28e318df
NQ
543
544void vdec_set_no_powerdown(int flag)
545{
546 no_powerdown = flag;
547 pr_info("no_powerdown=%d\n", no_powerdown);
548 return;
549}
c23e8aee 550EXPORT_SYMBOL(vdec_set_no_powerdown);
28e318df 551
fe96802b
NQ
552void vdec_count_info(struct vdec_info *vs, unsigned int err,
553 unsigned int offset)
554{
555 if (err)
556 vs->error_frame_count++;
557 if (offset) {
558 if (0 == vs->frame_count) {
559 vs->offset = 0;
560 vs->samp_cnt = 0;
561 }
562 vs->frame_data = offset > vs->total_data ?
563 offset - vs->total_data : vs->total_data - offset;
564 vs->total_data = offset;
565 if (vs->samp_cnt < 96000 * 2) { /* 2s */
566 if (0 == vs->samp_cnt)
567 vs->offset = offset;
568 vs->samp_cnt += vs->frame_dur;
569 } else {
570 vs->bit_rate = (offset - vs->offset) / 2;
571 /*pr_info("bitrate : %u\n",vs->bit_rate);*/
572 vs->samp_cnt = 0;
573 }
574 vs->frame_count++;
575 }
576 /*pr_info("size : %u, offset : %u, dur : %u, cnt : %u\n",
577 vs->offset,offset,vs->frame_dur,vs->samp_cnt);*/
578 return;
579}
580EXPORT_SYMBOL(vdec_count_info);
c23e8aee
HZ
581int vdec_is_support_4k(void)
582{
a8d5afab 583 return !is_meson_gxl_package_805X();
c23e8aee
HZ
584}
585EXPORT_SYMBOL(vdec_is_support_4k);
fe96802b 586
b9164398 587/*
e0614bf7 588 * clk_config:
b9164398
NQ
589 *0:default
590 *1:no gp0_pll;
591 *2:always used gp0_pll;
592 *>=10:fixed n M clk;
593 *== 100 , 100M clks;
e0614bf7 594 */
b9164398
NQ
595unsigned int get_vdec_clk_config_settings(void)
596{
597 return clk_config;
598}
599void update_vdec_clk_config_settings(unsigned int config)
600{
601 clk_config = config;
602}
603EXPORT_SYMBOL(update_vdec_clk_config_settings);
604
605static bool hevc_workaround_needed(void)
606{
63e810c0 607 return (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXBB) &&
b9164398
NQ
608 (get_meson_cpu_version(MESON_CPU_VERSION_LVL_MINOR)
609 == GXBB_REV_A_MINOR);
610}
611
612struct device *get_codec_cma_device(void)
613{
614 return vdec_core->cma_dev;
615}
616
fe96802b 617#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
618static const char * const vdec_device_name[] = {
619 "amvdec_mpeg12", "ammvdec_mpeg12",
620 "amvdec_mpeg4", "ammvdec_mpeg4",
621 "amvdec_h264", "ammvdec_h264",
622 "amvdec_mjpeg", "ammvdec_mjpeg",
623 "amvdec_real", "ammvdec_real",
624 "amjpegdec", "ammjpegdec",
625 "amvdec_vc1", "ammvdec_vc1",
626 "amvdec_avs", "ammvdec_avs",
627 "amvdec_yuv", "ammvdec_yuv",
628 "amvdec_h264mvc", "ammvdec_h264mvc",
629 "amvdec_h264_4k2k", "ammvdec_h264_4k2k",
630 "amvdec_h265", "ammvdec_h265",
631 "amvenc_avc", "amvenc_avc",
632 "jpegenc", "jpegenc",
a6c89e96
NQ
633 "amvdec_vp9", "ammvdec_vp9",
634 "amvdec_avs2", "ammvdec_avs2"
b9164398
NQ
635};
636
b9164398
NQ
637
638#else
639
640static const char * const vdec_device_name[] = {
641 "amvdec_mpeg12",
642 "amvdec_mpeg4",
643 "amvdec_h264",
644 "amvdec_mjpeg",
645 "amvdec_real",
646 "amjpegdec",
647 "amvdec_vc1",
648 "amvdec_avs",
649 "amvdec_yuv",
650 "amvdec_h264mvc",
651 "amvdec_h264_4k2k",
652 "amvdec_h265",
653 "amvenc_avc",
654 "jpegenc",
a6c89e96
NQ
655 "amvdec_vp9",
656 "amvdec_avs2"
b9164398
NQ
657};
658
b9164398
NQ
659#endif
660
a6c89e96
NQ
661#ifdef VDEC_DEBUG_SUPPORT
662static u64 get_current_clk(void)
663{
664 /*struct timespec xtime = current_kernel_time();
665 u64 usec = xtime.tv_sec * 1000000;
666 usec += xtime.tv_nsec / 1000;
667 */
668 u64 usec = sched_clock();
669 return usec;
670}
671
672static void inc_profi_count(unsigned long mask, u32 *count)
673{
674 enum vdec_type_e type;
675
676 for (type = VDEC_1; type < VDEC_MAX; type++) {
677 if (mask & (1 << type))
678 count[type]++;
679 }
680}
681
682static void update_profi_clk_run(struct vdec_s *vdec,
683 unsigned long mask, u64 clk)
684{
685 enum vdec_type_e type;
686
687 for (type = VDEC_1; type < VDEC_MAX; type++) {
688 if (mask & (1 << type)) {
689 vdec->start_run_clk[type] = clk;
690 if (vdec->profile_start_clk[type] == 0)
691 vdec->profile_start_clk[type] = clk;
692 vdec->total_clk[type] = clk
693 - vdec->profile_start_clk[type];
694 /*pr_info("set start_run_clk %ld\n",
695 vdec->start_run_clk);*/
696
697 }
698 }
699}
700
701static void update_profi_clk_stop(struct vdec_s *vdec,
702 unsigned long mask, u64 clk)
703{
704 enum vdec_type_e type;
705
706 for (type = VDEC_1; type < VDEC_MAX; type++) {
707 if (mask & (1 << type)) {
708 if (vdec->start_run_clk[type] == 0)
709 pr_info("error, start_run_clk[%d] not set\n", type);
710
711 /*pr_info("update run_clk type %d, %ld, %ld, %ld\n",
712 type,
713 clk,
714 vdec->start_run_clk[type],
715 vdec->run_clk[type]);*/
716 vdec->run_clk[type] +=
717 (clk - vdec->start_run_clk[type]);
718 }
719 }
720}
721
722#endif
723
b9164398
NQ
724int vdec_set_decinfo(struct vdec_s *vdec, struct dec_sysinfo *p)
725{
726 if (copy_from_user((void *)&vdec->sys_info_store, (void *)p,
727 sizeof(struct dec_sysinfo)))
728 return -EFAULT;
729
730 return 0;
731}
732EXPORT_SYMBOL(vdec_set_decinfo);
733
734/* construct vdec strcture */
735struct vdec_s *vdec_create(struct stream_port_s *port,
736 struct vdec_s *master)
737{
738 struct vdec_s *vdec;
739 int type = VDEC_TYPE_SINGLE;
fe96802b
NQ
740 int id;
741 if (is_mult_inc(port->type))
b9164398
NQ
742 type = (port->type & PORT_TYPE_FRAME) ?
743 VDEC_TYPE_FRAME_BLOCK :
744 VDEC_TYPE_STREAM_PARSER;
745
fe96802b
NQ
746 id = ida_simple_get(&vdec_core->ida,
747 0, MAX_INSTANCE_MUN, GFP_KERNEL);
748 if (id < 0) {
749 pr_info("vdec_create request id failed!ret =%d\n", id);
750 return NULL;
751 }
b9164398
NQ
752 vdec = vzalloc(sizeof(struct vdec_s));
753
754 /* TBD */
755 if (vdec) {
756 vdec->magic = 0x43454456;
fe96802b 757 vdec->id = -1;
b9164398
NQ
758 vdec->type = type;
759 vdec->port = port;
760 vdec->sys_info = &vdec->sys_info_store;
761
762 INIT_LIST_HEAD(&vdec->list);
763
b9164398 764 atomic_inc(&vdec_core->vdec_nr);
fe96802b
NQ
765 vdec->id = id;
766 vdec_input_init(&vdec->input, vdec);
b9164398
NQ
767 if (master) {
768 vdec->master = master;
769 master->slave = vdec;
770 master->sched = 1;
771 }
772 }
773
5b851ff9 774 pr_debug("vdec_create instance %p, total %d\n", vdec,
b9164398
NQ
775 atomic_read(&vdec_core->vdec_nr));
776
fe96802b
NQ
777 //trace_vdec_create(vdec); /*DEBUG_TMP*/
778
b9164398
NQ
779 return vdec;
780}
781EXPORT_SYMBOL(vdec_create);
782
783int vdec_set_format(struct vdec_s *vdec, int format)
784{
785 vdec->format = format;
fe96802b 786 vdec->port_flag |= PORT_FLAG_VFORMAT;
b9164398 787
fe96802b 788 if (vdec->slave) {
b9164398 789 vdec->slave->format = format;
fe96802b
NQ
790 vdec->slave->port_flag |= PORT_FLAG_VFORMAT;
791 }
792
793 //trace_vdec_set_format(vdec, format);/*DEBUG_TMP*/
b9164398
NQ
794
795 return 0;
796}
797EXPORT_SYMBOL(vdec_set_format);
798
799int vdec_set_pts(struct vdec_s *vdec, u32 pts)
800{
801 vdec->pts = pts;
fe96802b 802 vdec->pts64 = div64_u64((u64)pts * 100, 9);
b9164398 803 vdec->pts_valid = true;
fe96802b 804 //trace_vdec_set_pts(vdec, (u64)pts);/*DEBUG_TMP*/
b9164398
NQ
805 return 0;
806}
807EXPORT_SYMBOL(vdec_set_pts);
808
6b7ee58f
NQ
809void vdec_set_timestamp(struct vdec_s *vdec, u64 timestamp)
810{
811 vdec->timestamp = timestamp;
812 vdec->timestamp_valid = true;
813}
814EXPORT_SYMBOL(vdec_set_timestamp);
815
b9164398
NQ
816int vdec_set_pts64(struct vdec_s *vdec, u64 pts64)
817{
818 vdec->pts64 = pts64;
fe96802b 819 vdec->pts = (u32)div64_u64(pts64 * 9, 100);
b9164398 820 vdec->pts_valid = true;
fe96802b
NQ
821
822 //trace_vdec_set_pts64(vdec, pts64);/*DEBUG_TMP*/
b9164398
NQ
823 return 0;
824}
825EXPORT_SYMBOL(vdec_set_pts64);
826
6b7ee58f
NQ
827int vdec_get_status(struct vdec_s *vdec)
828{
829 return vdec->status;
830}
831EXPORT_SYMBOL(vdec_get_status);
832
b9164398
NQ
833void vdec_set_status(struct vdec_s *vdec, int status)
834{
fe96802b 835 //trace_vdec_set_status(vdec, status);/*DEBUG_TMP*/
b9164398
NQ
836 vdec->status = status;
837}
838EXPORT_SYMBOL(vdec_set_status);
839
840void vdec_set_next_status(struct vdec_s *vdec, int status)
841{
fe96802b 842 //trace_vdec_set_next_status(vdec, status);/*DEBUG_TMP*/
b9164398
NQ
843 vdec->next_status = status;
844}
845EXPORT_SYMBOL(vdec_set_next_status);
846
847int vdec_set_video_path(struct vdec_s *vdec, int video_path)
848{
849 vdec->frame_base_video_path = video_path;
850 return 0;
851}
852EXPORT_SYMBOL(vdec_set_video_path);
853
fe96802b
NQ
854int vdec_set_receive_id(struct vdec_s *vdec, int receive_id)
855{
856 vdec->vf_receiver_inst = receive_id;
857 return 0;
858}
859EXPORT_SYMBOL(vdec_set_receive_id);
860
b9164398
NQ
861/* add frame data to input chain */
862int vdec_write_vframe(struct vdec_s *vdec, const char *buf, size_t count)
863{
864 return vdec_input_add_frame(&vdec->input, buf, count);
865}
866EXPORT_SYMBOL(vdec_write_vframe);
867
fe96802b
NQ
868/* add a work queue thread for vdec*/
869void vdec_schedule_work(struct work_struct *work)
870{
871 if (vdec_core->vdec_core_wq)
872 queue_work(vdec_core->vdec_core_wq, work);
873 else
874 schedule_work(work);
875}
876EXPORT_SYMBOL(vdec_schedule_work);
877
878static struct vdec_s *vdec_get_associate(struct vdec_s *vdec)
879{
880 if (vdec->master)
881 return vdec->master;
882 else if (vdec->slave)
883 return vdec->slave;
884 return NULL;
885}
886
887static void vdec_sync_input_read(struct vdec_s *vdec)
888{
889 if (!vdec_stream_based(vdec))
890 return;
891
892 if (vdec_dual(vdec)) {
893 u32 me, other;
894 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
895 me = READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
896 other =
897 vdec_get_associate(vdec)->input.stream_cookie;
898 if (me > other)
899 return;
900 else if (me == other) {
901 me = READ_VREG(VLD_MEM_VIFIFO_RP);
902 other =
903 vdec_get_associate(vdec)->input.swap_rp;
904 if (me > other) {
905 WRITE_PARSER_REG(PARSER_VIDEO_RP,
906 vdec_get_associate(vdec)->
907 input.swap_rp);
908 return;
909 }
910 }
911 WRITE_PARSER_REG(PARSER_VIDEO_RP,
912 READ_VREG(VLD_MEM_VIFIFO_RP));
913 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
914 me = READ_VREG(HEVC_SHIFT_BYTE_COUNT);
915 if (((me & 0x80000000) == 0) &&
916 (vdec->input.streaming_rp & 0x80000000))
917 me += 1ULL << 32;
918 other = vdec_get_associate(vdec)->input.streaming_rp;
919 if (me > other) {
920 WRITE_PARSER_REG(PARSER_VIDEO_RP,
921 vdec_get_associate(vdec)->
922 input.swap_rp);
923 return;
924 }
925
926 WRITE_PARSER_REG(PARSER_VIDEO_RP,
927 READ_VREG(HEVC_STREAM_RD_PTR));
928 }
929 } else if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
930 WRITE_PARSER_REG(PARSER_VIDEO_RP,
931 READ_VREG(VLD_MEM_VIFIFO_RP));
932 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
933 WRITE_PARSER_REG(PARSER_VIDEO_RP,
934 READ_VREG(HEVC_STREAM_RD_PTR));
935 }
936}
937
938static void vdec_sync_input_write(struct vdec_s *vdec)
939{
940 if (!vdec_stream_based(vdec))
941 return;
942
943 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
944 WRITE_VREG(VLD_MEM_VIFIFO_WP,
945 READ_PARSER_REG(PARSER_VIDEO_WP));
946 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
947 WRITE_VREG(HEVC_STREAM_WR_PTR,
948 READ_PARSER_REG(PARSER_VIDEO_WP));
949 }
950}
951
b9164398 952/*
e0614bf7
ZZ
953 *get next frame from input chain
954 */
b9164398 955/*
e0614bf7 956 *THE VLD_FIFO is 512 bytes and Video buffer level
b9164398
NQ
957 * empty interrupt is set to 0x80 bytes threshold
958 */
959#define VLD_PADDING_SIZE 1024
960#define HEVC_PADDING_SIZE (1024*16)
b9164398
NQ
961int vdec_prepare_input(struct vdec_s *vdec, struct vframe_chunk_s **p)
962{
fe96802b 963 struct vdec_input_s *input = &vdec->input;
b9164398
NQ
964 struct vframe_chunk_s *chunk = NULL;
965 struct vframe_block_list_s *block = NULL;
966 int dummy;
967
968 /* full reset to HW input */
969 if (input->target == VDEC_INPUT_TARGET_VLD) {
970 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
971
972 /* reset VLD fifo for all vdec */
973 WRITE_VREG(DOS_SW_RESET0, (1<<5) | (1<<4) | (1<<3));
974 WRITE_VREG(DOS_SW_RESET0, 0);
975
fe96802b 976 dummy = READ_RESET_REG(RESET0_REGISTER);
b9164398
NQ
977 WRITE_VREG(POWER_CTL_VLD, 1 << 4);
978 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
979#if 0
980 /*move to driver*/
981 if (input_frame_based(input))
982 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
983
984 /*
985 * 2: assist
986 * 3: parser
987 * 4: parser_state
988 * 8: dblk
989 * 11:mcpu
990 * 12:ccpu
991 * 13:ddr
992 * 14:iqit
993 * 15:ipp
994 * 17:qdct
995 * 18:mpred
996 * 19:sao
997 * 24:hevc_afifo
998 */
999 WRITE_VREG(DOS_SW_RESET3,
1000 (1<<3)|(1<<4)|(1<<8)|(1<<11)|(1<<12)|(1<<14)|(1<<15)|
1001 (1<<17)|(1<<18)|(1<<19));
1002 WRITE_VREG(DOS_SW_RESET3, 0);
1003#endif
1004 }
1005
1006 /*
e0614bf7 1007 *setup HW decoder input buffer (VLD context)
b9164398
NQ
1008 * based on input->type and input->target
1009 */
1010 if (input_frame_based(input)) {
1011 chunk = vdec_input_next_chunk(&vdec->input);
1012
1013 if (chunk == NULL) {
1014 *p = NULL;
1015 return -1;
1016 }
1017
1018 block = chunk->block;
1019
1020 if (input->target == VDEC_INPUT_TARGET_VLD) {
1021 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR, block->start);
1022 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR, block->start +
1023 block->size - 8);
1024 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
1025 round_down(block->start + chunk->offset,
fe96802b 1026 VDEC_FIFO_ALIGN));
b9164398
NQ
1027
1028 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
1029 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1030
1031 /* set to manual mode */
1032 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1033 WRITE_VREG(VLD_MEM_VIFIFO_RP,
1034 round_down(block->start + chunk->offset,
fe96802b 1035 VDEC_FIFO_ALIGN));
b9164398
NQ
1036 dummy = chunk->offset + chunk->size +
1037 VLD_PADDING_SIZE;
1038 if (dummy >= block->size)
1039 dummy -= block->size;
1040 WRITE_VREG(VLD_MEM_VIFIFO_WP,
fe96802b
NQ
1041 round_down(block->start + dummy,
1042 VDEC_FIFO_ALIGN));
b9164398
NQ
1043
1044 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 3);
1045 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1046
1047 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1048 (0x11 << 16) | (1<<10) | (7<<3));
1049
1050 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1051 WRITE_VREG(HEVC_STREAM_START_ADDR, block->start);
1052 WRITE_VREG(HEVC_STREAM_END_ADDR, block->start +
1053 block->size);
1054 WRITE_VREG(HEVC_STREAM_RD_PTR, block->start +
1055 chunk->offset);
1056 dummy = chunk->offset + chunk->size +
1057 HEVC_PADDING_SIZE;
1058 if (dummy >= block->size)
1059 dummy -= block->size;
1060 WRITE_VREG(HEVC_STREAM_WR_PTR,
fe96802b
NQ
1061 round_down(block->start + dummy,
1062 VDEC_FIFO_ALIGN));
b9164398
NQ
1063
1064 /* set endian */
1065 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1066 }
1067
1068 *p = chunk;
1069 return chunk->size;
1070
1071 } else {
fe96802b 1072 /* stream based */
b9164398
NQ
1073 u32 rp = 0, wp = 0, fifo_len = 0;
1074 int size;
fe96802b
NQ
1075 bool swap_valid = input->swap_valid;
1076 unsigned long swap_page_phys = input->swap_page_phys;
1077
1078 if (vdec_dual(vdec) &&
1079 ((vdec->flag & VDEC_FLAG_SELF_INPUT_CONTEXT) == 0)) {
1080 /* keep using previous input context */
1081 struct vdec_s *master = (vdec->slave) ?
1082 vdec : vdec->master;
1083 if (master->input.last_swap_slave) {
1084 swap_valid = master->slave->input.swap_valid;
1085 swap_page_phys =
1086 master->slave->input.swap_page_phys;
1087 } else {
1088 swap_valid = master->input.swap_valid;
1089 swap_page_phys = master->input.swap_page_phys;
1090 }
1091 }
1092
1093 if (swap_valid) {
b9164398 1094 if (input->target == VDEC_INPUT_TARGET_VLD) {
fe96802b
NQ
1095 if (vdec->format == VFORMAT_H264)
1096 SET_VREG_MASK(POWER_CTL_VLD,
1097 (1 << 9));
1098
b9164398
NQ
1099 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1100
1101 /* restore read side */
1102 WRITE_VREG(VLD_MEM_SWAP_ADDR,
fe96802b 1103 swap_page_phys);
b9164398
NQ
1104 WRITE_VREG(VLD_MEM_SWAP_CTL, 1);
1105
1106 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1107 ;
1108 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1109
1110 /* restore wrap count */
1111 WRITE_VREG(VLD_MEM_VIFIFO_WRAP_COUNT,
1112 input->stream_cookie);
1113
1114 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1115 fifo_len = READ_VREG(VLD_MEM_VIFIFO_LEVEL);
1116
1117 /* enable */
1118 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1119 (0x11 << 16) | (1<<10));
1120
fe96802b
NQ
1121 /* sync with front end */
1122 vdec_sync_input_read(vdec);
1123 vdec_sync_input_write(vdec);
b9164398
NQ
1124
1125 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1126 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1127 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
1128
1129 /* restore read side */
1130 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
fe96802b 1131 swap_page_phys);
b9164398
NQ
1132 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 1);
1133
1134 while (READ_VREG(HEVC_STREAM_SWAP_CTRL)
1135 & (1<<7))
1136 ;
1137 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1138
1139 /* restore stream offset */
1140 WRITE_VREG(HEVC_SHIFT_BYTE_COUNT,
1141 input->stream_cookie);
1142
1143 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1144 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1145 >> 16) & 0x7f;
1146
1147
1148 /* enable */
1149
fe96802b
NQ
1150 /* sync with front end */
1151 vdec_sync_input_read(vdec);
1152 vdec_sync_input_write(vdec);
b9164398
NQ
1153
1154 wp = READ_VREG(HEVC_STREAM_WR_PTR);
fe96802b
NQ
1155
1156 /*pr_info("vdec: restore context\r\n");*/
b9164398
NQ
1157 }
1158
1159 } else {
1160 if (input->target == VDEC_INPUT_TARGET_VLD) {
1161 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR,
1162 input->start);
1163 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR,
1164 input->start + input->size - 8);
1165 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
1166 input->start);
1167
1168 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
1169 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1170
1171 /* set to manual mode */
1172 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1173 WRITE_VREG(VLD_MEM_VIFIFO_RP, input->start);
1174 WRITE_VREG(VLD_MEM_VIFIFO_WP,
fe96802b 1175 READ_PARSER_REG(PARSER_VIDEO_WP));
b9164398
NQ
1176
1177 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1178
1179 /* enable */
1180 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1181 (0x11 << 16) | (1<<10));
1182
1183 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1184
1185 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1186 WRITE_VREG(HEVC_STREAM_START_ADDR,
1187 input->start);
1188 WRITE_VREG(HEVC_STREAM_END_ADDR,
1189 input->start + input->size);
1190 WRITE_VREG(HEVC_STREAM_RD_PTR,
1191 input->start);
1192 WRITE_VREG(HEVC_STREAM_WR_PTR,
fe96802b 1193 READ_PARSER_REG(PARSER_VIDEO_WP));
b9164398
NQ
1194
1195 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1196 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1197 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1198 >> 16) & 0x7f;
1199
1200 /* enable */
1201 }
1202 }
1203 *p = NULL;
1204 if (wp >= rp)
1205 size = wp - rp + fifo_len;
1206 else
1207 size = wp + input->size - rp + fifo_len;
1208 if (size < 0) {
1209 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
1210 __func__, input->size, wp, rp, fifo_len, size);
1211 size = 0;
1212 }
1213 return size;
1214 }
1215}
1216EXPORT_SYMBOL(vdec_prepare_input);
1217
1218void vdec_enable_input(struct vdec_s *vdec)
1219{
1220 struct vdec_input_s *input = &vdec->input;
1221
1222 if (vdec->status != VDEC_STATUS_ACTIVE)
1223 return;
1224
1225 if (input->target == VDEC_INPUT_TARGET_VLD)
1226 SET_VREG_MASK(VLD_MEM_VIFIFO_CONTROL, (1<<2) | (1<<1));
1227 else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1228 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
1229 if (vdec_stream_based(vdec))
1230 CLEAR_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1231 else
1232 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1233 SET_VREG_MASK(HEVC_STREAM_FIFO_CTL, (1<<29));
1234 }
1235}
1236EXPORT_SYMBOL(vdec_enable_input);
1237
fe96802b
NQ
1238int vdec_set_input_buffer(struct vdec_s *vdec, u32 start, u32 size)
1239{
1240 int r = vdec_input_set_buffer(&vdec->input, start, size);
1241
1242 if (r)
1243 return r;
1244
1245 if (vdec->slave)
1246 r = vdec_input_set_buffer(&vdec->slave->input, start, size);
1247
1248 return r;
1249}
1250EXPORT_SYMBOL(vdec_set_input_buffer);
1251
1252/*
1253 * vdec_eos returns the possibility that there are
1254 * more input can be used by decoder through vdec_prepare_input
1255 * Note: this function should be called prior to vdec_vframe_dirty
1256 * by decoder driver to determine if EOS happens for stream based
1257 * decoding when there is no sufficient data for a frame
1258 */
1259bool vdec_has_more_input(struct vdec_s *vdec)
1260{
1261 struct vdec_input_s *input = &vdec->input;
1262
1263 if (!input->eos)
1264 return true;
1265
1266 if (input_frame_based(input))
1267 return vdec_input_next_input_chunk(input) != NULL;
1268 else {
1269 if (input->target == VDEC_INPUT_TARGET_VLD)
1270 return READ_VREG(VLD_MEM_VIFIFO_WP) !=
1271 READ_PARSER_REG(PARSER_VIDEO_WP);
1272 else {
1273 return (READ_VREG(HEVC_STREAM_WR_PTR) & ~0x3) !=
1274 (READ_PARSER_REG(PARSER_VIDEO_WP) & ~0x3);
1275 }
1276 }
1277}
1278EXPORT_SYMBOL(vdec_has_more_input);
1279
1280void vdec_set_prepare_level(struct vdec_s *vdec, int level)
1281{
1282 vdec->input.prepare_level = level;
1283}
1284EXPORT_SYMBOL(vdec_set_prepare_level);
1285
b9164398
NQ
1286void vdec_set_flag(struct vdec_s *vdec, u32 flag)
1287{
1288 vdec->flag = flag;
1289}
fe96802b
NQ
1290EXPORT_SYMBOL(vdec_set_flag);
1291
1292void vdec_set_eos(struct vdec_s *vdec, bool eos)
1293{
1294 vdec->input.eos = eos;
1295
1296 if (vdec->slave)
1297 vdec->slave->input.eos = eos;
1298}
1299EXPORT_SYMBOL(vdec_set_eos);
b9164398 1300
a6c89e96
NQ
1301#ifdef VDEC_DEBUG_SUPPORT
1302void vdec_set_step_mode(void)
1303{
1304 step_mode = 0x1ff;
1305}
1306EXPORT_SYMBOL(vdec_set_step_mode);
1307#endif
1308
b9164398
NQ
1309void vdec_set_next_sched(struct vdec_s *vdec, struct vdec_s *next_vdec)
1310{
1311 if (vdec && next_vdec) {
1312 vdec->sched = 0;
1313 next_vdec->sched = 1;
1314 }
1315}
fe96802b
NQ
1316EXPORT_SYMBOL(vdec_set_next_sched);
1317
1318/*
1319 * Swap Context: S0 S1 S2 S3 S4
1320 * Sample sequence: M S M M S
1321 * Master Context: S0 S0 S2 S3 S3
1322 * Slave context: NA S1 S1 S2 S4
1323 * ^
1324 * ^
1325 * ^
1326 * the tricky part
1327 * If there are back to back decoding of master or slave
1328 * then the context of the counter part should be updated
1329 * with current decoder. In this example, S1 should be
1330 * updated to S2.
1331 * This is done by swap the swap_page and related info
1332 * between two layers.
1333 */
1334static void vdec_borrow_input_context(struct vdec_s *vdec)
1335{
1336 struct page *swap_page;
1337 unsigned long swap_page_phys;
1338 struct vdec_input_s *me;
1339 struct vdec_input_s *other;
1340
1341 if (!vdec_dual(vdec))
1342 return;
1343
1344 me = &vdec->input;
1345 other = &vdec_get_associate(vdec)->input;
1346
1347 /* swap the swap_context, borrow counter part's
1348 * swap context storage and update all related info.
1349 * After vdec_vframe_dirty, vdec_save_input_context
1350 * will be called to update current vdec's
1351 * swap context
1352 */
1353 swap_page = other->swap_page;
1354 other->swap_page = me->swap_page;
1355 me->swap_page = swap_page;
1356
1357 swap_page_phys = other->swap_page_phys;
1358 other->swap_page_phys = me->swap_page_phys;
1359 me->swap_page_phys = swap_page_phys;
1360
1361 other->swap_rp = me->swap_rp;
1362 other->streaming_rp = me->streaming_rp;
1363 other->stream_cookie = me->stream_cookie;
1364 other->swap_valid = me->swap_valid;
1365}
1366
b9164398
NQ
1367void vdec_vframe_dirty(struct vdec_s *vdec, struct vframe_chunk_s *chunk)
1368{
1369 if (chunk)
1370 chunk->flag |= VFRAME_CHUNK_FLAG_CONSUMED;
1371
1372 if (vdec_stream_based(vdec)) {
fe96802b
NQ
1373 vdec->input.swap_needed = true;
1374
1375 if (vdec_dual(vdec)) {
1376 vdec_get_associate(vdec)->input.dirty_count = 0;
1377 vdec->input.dirty_count++;
1378 if (vdec->input.dirty_count > 1) {
1379 vdec->input.dirty_count = 1;
1380 vdec_borrow_input_context(vdec);
1381 }
b9164398 1382 }
fe96802b
NQ
1383
1384 /* for stream based mode, we update read and write pointer
1385 * also in case decoder wants to keep working on decoding
1386 * for more frames while input front end has more data
1387 */
1388 vdec_sync_input_read(vdec);
1389 vdec_sync_input_write(vdec);
1390
1391 vdec->need_more_data |= VDEC_NEED_MORE_DATA_DIRTY;
1392 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
b9164398
NQ
1393 }
1394}
1395EXPORT_SYMBOL(vdec_vframe_dirty);
1396
fe96802b
NQ
1397bool vdec_need_more_data(struct vdec_s *vdec)
1398{
1399 if (vdec_stream_based(vdec))
1400 return vdec->need_more_data & VDEC_NEED_MORE_DATA;
1401
1402 return false;
1403}
1404EXPORT_SYMBOL(vdec_need_more_data);
1405
976f3376
HZ
1406
1407void hevc_wait_ddr(void)
1408{
1409 unsigned long flags;
1410 spin_lock_irqsave(&vdec_spin_lock, flags);
1411 codec_dmcbus_write(DMC_REQ_CTRL,
1412 codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 4)));
1413 spin_unlock_irqrestore(&vdec_spin_lock, flags);
1414
1415 while (!(codec_dmcbus_read(DMC_CHAN_STS)
1416 & (1 << 4)))
1417 ;
1418}
1419
b9164398
NQ
1420void vdec_save_input_context(struct vdec_s *vdec)
1421{
fe96802b 1422 struct vdec_input_s *input = &vdec->input;
b9164398 1423
fe96802b 1424#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1425 vdec_profile(vdec, VDEC_PROFILE_EVENT_SAVE_INPUT);
1426#endif
1427
1428 if (input->target == VDEC_INPUT_TARGET_VLD)
1429 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1<<15);
1430
1431 if (input_stream_based(input) && (input->swap_needed)) {
1432 if (input->target == VDEC_INPUT_TARGET_VLD) {
1433 WRITE_VREG(VLD_MEM_SWAP_ADDR,
fe96802b 1434 input->swap_page_phys);
b9164398
NQ
1435 WRITE_VREG(VLD_MEM_SWAP_CTL, 3);
1436 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1437 ;
1438 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1439 vdec->input.stream_cookie =
1440 READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
fe96802b
NQ
1441 vdec->input.swap_rp =
1442 READ_VREG(VLD_MEM_VIFIFO_RP);
1443 vdec->input.total_rd_count =
1444 (u64)vdec->input.stream_cookie *
1445 vdec->input.size + vdec->input.swap_rp -
1446 READ_VREG(VLD_MEM_VIFIFO_BYTES_AVAIL);
b9164398
NQ
1447 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1448 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
fe96802b 1449 input->swap_page_phys);
b9164398
NQ
1450 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 3);
1451
1452 while (READ_VREG(HEVC_STREAM_SWAP_CTRL) & (1<<7))
1453 ;
1454 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1455
1456 vdec->input.stream_cookie =
1457 READ_VREG(HEVC_SHIFT_BYTE_COUNT);
fe96802b
NQ
1458 vdec->input.swap_rp =
1459 READ_VREG(HEVC_STREAM_RD_PTR);
1460 if (((vdec->input.stream_cookie & 0x80000000) == 0) &&
1461 (vdec->input.streaming_rp & 0x80000000))
1462 vdec->input.streaming_rp += 1ULL << 32;
1463 vdec->input.streaming_rp &= 0xffffffffULL << 32;
1464 vdec->input.streaming_rp |= vdec->input.stream_cookie;
1465 vdec->input.total_rd_count = vdec->input.streaming_rp;
b9164398
NQ
1466 }
1467
1468 input->swap_valid = true;
fe96802b
NQ
1469 input->swap_needed = false;
1470 /*pr_info("vdec: save context\r\n");*/
b9164398 1471
fe96802b
NQ
1472 vdec_sync_input_read(vdec);
1473
1474 if (vdec_dual(vdec)) {
1475 struct vdec_s *master = (vdec->slave) ?
1476 vdec : vdec->master;
1477 master->input.last_swap_slave = (master->slave == vdec);
1478 /* pr_info("master->input.last_swap_slave = %d\n",
1479 master->input.last_swap_slave); */
1480 }
976f3376
HZ
1481
1482 hevc_wait_ddr();
b9164398
NQ
1483 }
1484}
1485EXPORT_SYMBOL(vdec_save_input_context);
1486
1487void vdec_clean_input(struct vdec_s *vdec)
1488{
1489 struct vdec_input_s *input = &vdec->input;
1490
1491 while (!list_empty(&input->vframe_chunk_list)) {
1492 struct vframe_chunk_s *chunk =
1493 vdec_input_next_chunk(input);
87046a60 1494 if (chunk && (chunk->flag & VFRAME_CHUNK_FLAG_CONSUMED))
b9164398
NQ
1495 vdec_input_release_chunk(input, chunk);
1496 else
1497 break;
1498 }
1499 vdec_save_input_context(vdec);
1500}
1501EXPORT_SYMBOL(vdec_clean_input);
1502
fe96802b 1503int vdec_sync_input(struct vdec_s *vdec)
b9164398 1504{
fe96802b
NQ
1505 struct vdec_input_s *input = &vdec->input;
1506 u32 rp = 0, wp = 0, fifo_len = 0;
1507 int size;
1508
1509 vdec_sync_input_read(vdec);
1510 vdec_sync_input_write(vdec);
1511 if (input->target == VDEC_INPUT_TARGET_VLD) {
1512 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1513 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1514
1515 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1516 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1517 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1518 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1519 >> 16) & 0x7f;
1520 }
1521 if (wp >= rp)
1522 size = wp - rp + fifo_len;
1523 else
1524 size = wp + input->size - rp + fifo_len;
1525 if (size < 0) {
1526 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
1527 __func__, input->size, wp, rp, fifo_len, size);
1528 size = 0;
b9164398 1529 }
fe96802b
NQ
1530 return size;
1531
1532}
1533EXPORT_SYMBOL(vdec_sync_input);
1534
1535const char *vdec_status_str(struct vdec_s *vdec)
1536{
1537 return vdec->status < ARRAY_SIZE(vdec_status_string) ?
1538 vdec_status_string[vdec->status] : "INVALID";
b9164398
NQ
1539}
1540
1541const char *vdec_type_str(struct vdec_s *vdec)
1542{
1543 switch (vdec->type) {
1544 case VDEC_TYPE_SINGLE:
1545 return "VDEC_TYPE_SINGLE";
1546 case VDEC_TYPE_STREAM_PARSER:
1547 return "VDEC_TYPE_STREAM_PARSER";
1548 case VDEC_TYPE_FRAME_BLOCK:
1549 return "VDEC_TYPE_FRAME_BLOCK";
1550 case VDEC_TYPE_FRAME_CIRCULAR:
1551 return "VDEC_TYPE_FRAME_CIRCULAR";
1552 default:
1553 return "VDEC_TYPE_INVALID";
1554 }
1555}
1556
1557const char *vdec_device_name_str(struct vdec_s *vdec)
1558{
1559 return vdec_device_name[vdec->format * 2 + 1];
1560}
fe96802b 1561EXPORT_SYMBOL(vdec_device_name_str);
b9164398
NQ
1562
1563void walk_vdec_core_list(char *s)
1564{
1565 struct vdec_s *vdec;
1566 struct vdec_core_s *core = vdec_core;
1567 unsigned long flags;
1568
1569 pr_info("%s --->\n", s);
1570
1571 flags = vdec_core_lock(vdec_core);
1572
1573 if (list_empty(&core->connected_vdec_list)) {
1574 pr_info("connected vdec list empty\n");
1575 } else {
1576 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
1577 pr_info("\tvdec (%p), status = %s\n", vdec,
1578 vdec_status_str(vdec));
1579 }
1580 }
1581
1582 vdec_core_unlock(vdec_core, flags);
1583}
1584EXPORT_SYMBOL(walk_vdec_core_list);
1585
fe96802b
NQ
1586/* insert vdec to vdec_core for scheduling,
1587 * for dual running decoders, connect/disconnect always runs in pairs
1588 */
b9164398
NQ
1589int vdec_connect(struct vdec_s *vdec)
1590{
1591 unsigned long flags;
1592
fe96802b
NQ
1593 //trace_vdec_connect(vdec);/*DEBUG_TMP*/
1594
b9164398
NQ
1595 if (vdec->status != VDEC_STATUS_DISCONNECTED)
1596 return 0;
1597
1598 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
1599 vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
1600
1601 init_completion(&vdec->inactive_done);
1602
1603 if (vdec->slave) {
1604 vdec_set_status(vdec->slave, VDEC_STATUS_CONNECTED);
1605 vdec_set_next_status(vdec->slave, VDEC_STATUS_CONNECTED);
1606
1607 init_completion(&vdec->slave->inactive_done);
1608 }
1609
1610 flags = vdec_core_lock(vdec_core);
1611
1612 list_add_tail(&vdec->list, &vdec_core->connected_vdec_list);
1613
1614 if (vdec->slave) {
1615 list_add_tail(&vdec->slave->list,
1616 &vdec_core->connected_vdec_list);
1617 }
1618
1619 vdec_core_unlock(vdec_core, flags);
1620
1621 up(&vdec_core->sem);
1622
1623 return 0;
1624}
1625EXPORT_SYMBOL(vdec_connect);
1626
1627/* remove vdec from vdec_core scheduling */
1628int vdec_disconnect(struct vdec_s *vdec)
1629{
fe96802b 1630#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1631 vdec_profile(vdec, VDEC_PROFILE_EVENT_DISCONNECT);
1632#endif
fe96802b 1633 //trace_vdec_disconnect(vdec);/*DEBUG_TMP*/
b9164398
NQ
1634
1635 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
1636 (vdec->status != VDEC_STATUS_ACTIVE)) {
1637 return 0;
1638 }
6da7a8e8 1639 mutex_lock(&vdec_mutex);
b9164398 1640 /*
e0614bf7 1641 *when a vdec is under the management of scheduler
b9164398
NQ
1642 * the status change will only be from vdec_core_thread
1643 */
1644 vdec_set_next_status(vdec, VDEC_STATUS_DISCONNECTED);
1645
1646 if (vdec->slave)
1647 vdec_set_next_status(vdec->slave, VDEC_STATUS_DISCONNECTED);
1648 else if (vdec->master)
1649 vdec_set_next_status(vdec->master, VDEC_STATUS_DISCONNECTED);
6da7a8e8 1650 mutex_unlock(&vdec_mutex);
b9164398
NQ
1651 up(&vdec_core->sem);
1652
87046a60 1653 if(!wait_for_completion_timeout(&vdec->inactive_done,
1654 msecs_to_jiffies(2000)))
1655 goto discon_timeout;
b9164398 1656
87046a60 1657 if (vdec->slave) {
1658 if(!wait_for_completion_timeout(&vdec->slave->inactive_done,
1659 msecs_to_jiffies(2000)))
1660 goto discon_timeout;
1661 } else if (vdec->master) {
1662 if(!wait_for_completion_timeout(&vdec->master->inactive_done,
1663 msecs_to_jiffies(2000)))
1664 goto discon_timeout;
1665 }
b9164398 1666
87046a60 1667 return 0;
1668discon_timeout:
1669 pr_err("%s timeout!!! status: 0x%x\n", __func__, vdec->status);
b9164398
NQ
1670 return 0;
1671}
1672EXPORT_SYMBOL(vdec_disconnect);
1673
1674/* release vdec structure */
1675int vdec_destroy(struct vdec_s *vdec)
1676{
fe96802b
NQ
1677 //trace_vdec_destroy(vdec);/*DEBUG_TMP*/
1678
1679 vdec_input_release(&vdec->input);
b9164398 1680
fe96802b 1681#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1682 vdec_profile_flush(vdec);
1683#endif
fe96802b 1684 ida_simple_remove(&vdec_core->ida, vdec->id);
b9164398
NQ
1685 vfree(vdec);
1686
1687 atomic_dec(&vdec_core->vdec_nr);
1688
1689 return 0;
1690}
1691EXPORT_SYMBOL(vdec_destroy);
1692
1693/*
1694 * Only support time sliced decoding for frame based input,
1695 * so legacy decoder can exist with time sliced decoder.
1696 */
1697static const char *get_dev_name(bool use_legacy_vdec, int format)
1698{
fe96802b 1699#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1700 if (use_legacy_vdec)
1701 return vdec_device_name[format * 2];
1702 else
1703 return vdec_device_name[format * 2 + 1];
1704#else
1705 return vdec_device_name[format];
1706#endif
1707}
1708
d5c1c0ff 1709struct vdec_s *vdec_get_with_id(unsigned int id)
8458676f 1710{
1711 struct vdec_s *vdec, *ret_vdec = NULL;
1712 struct vdec_core_s *core = vdec_core;
1713 unsigned long flags;
1714
1715 if (id >= MAX_INSTANCE_MUN)
1716 return NULL;
1717
1718 flags = vdec_core_lock(vdec_core);
1719 if (!list_empty(&core->connected_vdec_list)) {
1720 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
1721 if (vdec->id == id) {
1722 pr_info("searched avaliable vdec connected, id = %d\n", id);
1723 ret_vdec = vdec;
1724 break;
1725 }
1726 }
1727 }
1728 vdec_core_unlock(vdec_core, flags);
1729
1730 return ret_vdec;
1731}
1732
b9164398 1733/*
e0614bf7 1734 *register vdec_device
b9164398
NQ
1735 * create output, vfm or create ionvideo output
1736 */
1737s32 vdec_init(struct vdec_s *vdec, int is_4k)
1738{
1739 int r = 0;
1740 struct vdec_s *p = vdec;
b9164398 1741 const char *dev_name;
fe96802b 1742 int id = PLATFORM_DEVID_AUTO;/*if have used my self*/
b9164398
NQ
1743
1744 dev_name = get_dev_name(vdec_single(vdec), vdec->format);
1745
1746 if (dev_name == NULL)
1747 return -ENODEV;
1748
1749 pr_info("vdec_init, dev_name:%s, vdec_type=%s\n",
1750 dev_name, vdec_type_str(vdec));
1751
1752 /*
e0614bf7 1753 *todo: VFM patch control should be configurable,
b9164398
NQ
1754 * for now all stream based input uses default VFM path.
1755 */
1756 if (vdec_stream_based(vdec) && !vdec_dual(vdec)) {
1757 if (vdec_core->vfm_vdec == NULL) {
5b851ff9 1758 pr_debug("vdec_init set vfm decoder %p\n", vdec);
b9164398
NQ
1759 vdec_core->vfm_vdec = vdec;
1760 } else {
1761 pr_info("vdec_init vfm path busy.\n");
1762 return -EBUSY;
1763 }
1764 }
1765
b9164398
NQ
1766 mutex_lock(&vdec_mutex);
1767 inited_vcodec_num++;
1768 mutex_unlock(&vdec_mutex);
1769
1770 vdec_input_set_type(&vdec->input, vdec->type,
1771 (vdec->format == VFORMAT_HEVC ||
a6c89e96 1772 vdec->format == VFORMAT_AVS2 ||
b9164398
NQ
1773 vdec->format == VFORMAT_VP9) ?
1774 VDEC_INPUT_TARGET_HEVC :
1775 VDEC_INPUT_TARGET_VLD);
1776
1777 p->cma_dev = vdec_core->cma_dev;
1778 p->get_canvas = get_canvas;
05afa03d
PY
1779 p->get_canvas_ex = get_canvas_ex;
1780 p->free_canvas_ex = free_canvas_ex;
97fe3d16 1781 p->vdec_fps_detec = vdec_fps_detec;
5f3fbfb7
HZ
1782 atomic_set(&p->inirq_flag, 0);
1783 atomic_set(&p->inirq_thread_flag, 0);
b9164398
NQ
1784 /* todo */
1785 if (!vdec_dual(vdec))
fe96802b 1786 p->use_vfm_path = vdec_stream_based(vdec);
b9164398 1787 /* vdec_dev_reg.flag = 0; */
fe96802b
NQ
1788 if (vdec->id >= 0)
1789 id = vdec->id;
05afa03d
PY
1790 p->parallel_dec = parallel_decode;
1791 vdec_core->parallel_dec = parallel_decode;
8458676f 1792 vdec->canvas_mode = CANVAS_BLKMODE_32X32;
1793#ifdef FRAME_CHECK
8458676f 1794 vdec_frame_check_init(vdec);
1795#endif
fe96802b 1796 p->dev = platform_device_register_data(
b9164398
NQ
1797 &vdec_core->vdec_core_platform_device->dev,
1798 dev_name,
fe96802b 1799 id,
b9164398
NQ
1800 &p, sizeof(struct vdec_s *));
1801
1802 if (IS_ERR(p->dev)) {
1803 r = PTR_ERR(p->dev);
1804 pr_err("vdec: Decoder device %s register failed (%d)\n",
1805 dev_name, r);
1806
1807 mutex_lock(&vdec_mutex);
1808 inited_vcodec_num--;
1809 mutex_unlock(&vdec_mutex);
1810
fe96802b
NQ
1811 goto error;
1812 } else if (!p->dev->dev.driver) {
1813 pr_info("vdec: Decoder device %s driver probe failed.\n",
1814 dev_name);
1815 r = -ENODEV;
1816
b9164398
NQ
1817 goto error;
1818 }
1819
1820 if ((p->type == VDEC_TYPE_FRAME_BLOCK) && (p->run == NULL)) {
1821 r = -ENODEV;
1822 pr_err("vdec: Decoder device not handled (%s)\n", dev_name);
1823
1824 mutex_lock(&vdec_mutex);
1825 inited_vcodec_num--;
1826 mutex_unlock(&vdec_mutex);
1827
1828 goto error;
1829 }
1830
1831 if (p->use_vfm_path) {
1832 vdec->vf_receiver_inst = -1;
fe96802b 1833 vdec->vfm_map_id[0] = 0;
b9164398
NQ
1834 } else if (!vdec_dual(vdec)) {
1835 /* create IONVIDEO instance and connect decoder's
1836 * vf_provider interface to it
1837 */
1838 if (p->type != VDEC_TYPE_FRAME_BLOCK) {
1839 r = -ENODEV;
1840 pr_err("vdec: Incorrect decoder type\n");
1841
1842 mutex_lock(&vdec_mutex);
1843 inited_vcodec_num--;
1844 mutex_unlock(&vdec_mutex);
1845
1846 goto error;
1847 }
1848 if (p->frame_base_video_path == FRAME_BASE_PATH_IONVIDEO) {
1849#if 1
ff4c2158
NQ
1850 r = ionvideo_assign_map(&vdec->vf_receiver_name,
1851 &vdec->vf_receiver_inst);
b9164398 1852#else
2b796d78
ZZ
1853 /*
1854 * temporarily just use decoder instance ID as iondriver ID
1855 * to solve OMX iondriver instance number check time sequence
1856 * only the limitation is we can NOT mix different video
1857 * decoders since same ID will be used for different decoder
1858 * formats.
1859 */
1860 vdec->vf_receiver_inst = p->dev->id;
1861 r = ionvideo_assign_map(&vdec->vf_receiver_name,
1862 &vdec->vf_receiver_inst);
b9164398 1863#endif
2b796d78
ZZ
1864 if (r < 0) {
1865 pr_err("IonVideo frame receiver allocation failed.\n");
b9164398 1866
2b796d78
ZZ
1867 mutex_lock(&vdec_mutex);
1868 inited_vcodec_num--;
1869 mutex_unlock(&vdec_mutex);
b9164398 1870
2b796d78
ZZ
1871 goto error;
1872 }
b9164398 1873
fe96802b
NQ
1874 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
1875 "%s %s", vdec->vf_provider_name,
1876 vdec->vf_receiver_name);
1877 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
1878 "vdec-map-%d", vdec->id);
b9164398
NQ
1879 } else if (p->frame_base_video_path ==
1880 FRAME_BASE_PATH_AMLVIDEO_AMVIDEO) {
2b796d78
ZZ
1881 if (vdec_secure(vdec)) {
1882 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
1883 "%s %s", vdec->vf_provider_name,
1884 "amlvideo amvideo");
1885 } else {
1886 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
1887 "%s %s", vdec->vf_provider_name,
60e7b9fa 1888 "amlvideo ppmgr deinterlace amvideo");
2b796d78 1889 }
b9164398 1890 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
fe96802b 1891 "vdec-map-%d", vdec->id);
b9164398
NQ
1892 } else if (p->frame_base_video_path ==
1893 FRAME_BASE_PATH_AMLVIDEO1_AMVIDEO2) {
1894 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
1895 "%s %s", vdec->vf_provider_name,
dbba62e2 1896 "aml_video.1 videosync.0 videopip");
b9164398 1897 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
fe96802b 1898 "vdec-map-%d", vdec->id);
6b7ee58f
NQ
1899 } else if (p->frame_base_video_path == FRAME_BASE_PATH_V4L_VIDEO) {
1900 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
65a98643
NQ
1901 "%s %s", vdec->vf_provider_name,
1902 vdec->vf_receiver_name);
6b7ee58f
NQ
1903 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
1904 "vdec-map-%d", vdec->id);
2b796d78
ZZ
1905 } else if (p->frame_base_video_path == FRAME_BASE_PATH_TUNNEL_MODE) {
1906 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
1907 "%s %s", vdec->vf_provider_name,
1908 "amvideo");
8ee39d2d 1909 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
1910 "vdec-map-%d", vdec->id);
b9164398
NQ
1911 }
1912
1913 if (vfm_map_add(vdec->vfm_map_id,
1914 vdec->vfm_map_chain) < 0) {
1915 r = -ENOMEM;
1916 pr_err("Decoder pipeline map creation failed %s.\n",
1917 vdec->vfm_map_id);
1918 vdec->vfm_map_id[0] = 0;
1919
1920 mutex_lock(&vdec_mutex);
1921 inited_vcodec_num--;
1922 mutex_unlock(&vdec_mutex);
1923
1924 goto error;
1925 }
1926
5b851ff9 1927 pr_debug("vfm map %s created\n", vdec->vfm_map_id);
b9164398
NQ
1928
1929 /*
e0614bf7 1930 *assume IONVIDEO driver already have a few vframe_receiver
b9164398
NQ
1931 * registered.
1932 * 1. Call iondriver function to allocate a IONVIDEO path and
1933 * provide receiver's name and receiver op.
1934 * 2. Get decoder driver's provider name from driver instance
1935 * 3. vfm_map_add(name, "<decoder provider name>
1936 * <iondriver receiver name>"), e.g.
1937 * vfm_map_add("vdec_ion_map_0", "mpeg4_0 iondriver_1");
1938 * 4. vf_reg_provider and vf_reg_receiver
1939 * Note: the decoder provider's op uses vdec as op_arg
1940 * the iondriver receiver's op uses iondev device as
1941 * op_arg
1942 */
1943
1944 }
1945
1946 if (!vdec_single(vdec)) {
1947 vf_reg_provider(&p->vframe_provider);
1948
1949 vf_notify_receiver(p->vf_provider_name,
1950 VFRAME_EVENT_PROVIDER_START,
1951 vdec);
fe96802b
NQ
1952
1953 if (vdec_core->hint_fr_vdec == NULL)
1954 vdec_core->hint_fr_vdec = vdec;
1955
1956 if (vdec_core->hint_fr_vdec == vdec) {
1957 if (p->sys_info->rate != 0) {
6d2907a6 1958 if (!vdec->is_reset) {
d481db31
NQ
1959 vf_notify_receiver(p->vf_provider_name,
1960 VFRAME_EVENT_PROVIDER_FR_HINT,
1961 (void *)
1962 ((unsigned long)
1963 p->sys_info->rate));
6d2907a6
LC
1964 vdec->fr_hint_state = VDEC_HINTED;
1965 }
fe96802b
NQ
1966 } else {
1967 vdec->fr_hint_state = VDEC_NEED_HINT;
1968 }
1969 }
b9164398
NQ
1970 }
1971
28e318df 1972 p->dolby_meta_with_el = 0;
5b851ff9 1973 pr_debug("vdec_init, vf_provider_name = %s\n", p->vf_provider_name);
fe96802b
NQ
1974 vdec_input_prepare_bufs(/*prepared buffer for fast playing.*/
1975 &vdec->input,
1976 vdec->sys_info->width,
1977 vdec->sys_info->height);
b9164398
NQ
1978 /* vdec is now ready to be active */
1979 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
1980
1981 return 0;
1982
1983error:
1984 return r;
1985}
1986EXPORT_SYMBOL(vdec_init);
1987
fe96802b
NQ
1988/* vdec_create/init/release/destroy are applied to both dual running decoders
1989 */
b9164398
NQ
1990void vdec_release(struct vdec_s *vdec)
1991{
fe96802b 1992 //trace_vdec_release(vdec);/*DEBUG_TMP*/
a6c89e96
NQ
1993#ifdef VDEC_DEBUG_SUPPORT
1994 if (step_mode) {
1995 pr_info("VDEC_DEBUG: in step_mode, wait release\n");
1996 while (step_mode)
1997 udelay(10);
1998 pr_info("VDEC_DEBUG: step_mode is clear\n");
1999 }
2000#endif
b9164398
NQ
2001 vdec_disconnect(vdec);
2002
fe96802b
NQ
2003 if (vdec->vframe_provider.name) {
2004 if (!vdec_single(vdec)) {
2005 if (vdec_core->hint_fr_vdec == vdec
6d2907a6 2006 && vdec->fr_hint_state == VDEC_HINTED)
fe96802b
NQ
2007 vf_notify_receiver(
2008 vdec->vf_provider_name,
2009 VFRAME_EVENT_PROVIDER_FR_END_HINT,
2010 NULL);
2011 vdec->fr_hint_state = VDEC_NO_NEED_HINT;
2012 }
b9164398 2013 vf_unreg_provider(&vdec->vframe_provider);
fe96802b 2014 }
b9164398
NQ
2015
2016 if (vdec_core->vfm_vdec == vdec)
2017 vdec_core->vfm_vdec = NULL;
2018
fe96802b
NQ
2019 if (vdec_core->hint_fr_vdec == vdec)
2020 vdec_core->hint_fr_vdec = NULL;
2021
b9164398
NQ
2022 if (vdec->vf_receiver_inst >= 0) {
2023 if (vdec->vfm_map_id[0]) {
2024 vfm_map_remove(vdec->vfm_map_id);
2025 vdec->vfm_map_id[0] = 0;
2026 }
b9164398
NQ
2027 }
2028
5f3fbfb7
HZ
2029 while ((atomic_read(&vdec->inirq_flag) > 0)
2030 || (atomic_read(&vdec->inirq_thread_flag) > 0))
2031 schedule();
2032
8458676f 2033#ifdef FRAME_CHECK
2034 vdec_frame_check_exit(vdec);
8458676f 2035#endif
97fe3d16 2036 vdec_fps_clear(vdec->id);
5050c738 2037
b9164398 2038 platform_device_unregister(vdec->dev);
a35da9f0
PY
2039 pr_debug("vdec_release instance %p, total %d\n", vdec,
2040 atomic_read(&vdec_core->vdec_nr));
b9164398
NQ
2041 vdec_destroy(vdec);
2042
2043 mutex_lock(&vdec_mutex);
2044 inited_vcodec_num--;
2045 mutex_unlock(&vdec_mutex);
fe96802b 2046
b9164398
NQ
2047}
2048EXPORT_SYMBOL(vdec_release);
2049
a6c89e96
NQ
2050/* For dual running decoders, vdec_reset is only called with master vdec.
2051 */
b9164398
NQ
2052int vdec_reset(struct vdec_s *vdec)
2053{
a6c89e96
NQ
2054 //trace_vdec_reset(vdec); /*DEBUG_TMP*/
2055
b9164398
NQ
2056 vdec_disconnect(vdec);
2057
2058 if (vdec->vframe_provider.name)
2059 vf_unreg_provider(&vdec->vframe_provider);
2060
2061 if ((vdec->slave) && (vdec->slave->vframe_provider.name))
2062 vf_unreg_provider(&vdec->slave->vframe_provider);
2063
2064 if (vdec->reset) {
2065 vdec->reset(vdec);
2066 if (vdec->slave)
2067 vdec->slave->reset(vdec->slave);
2068 }
158de7c4 2069 vdec->mc_loaded = 0;/*clear for reload firmware*/
b9164398
NQ
2070 vdec_input_release(&vdec->input);
2071
6b7ee58f
NQ
2072 vdec_input_init(&vdec->input, vdec);
2073
2074 vdec_input_prepare_bufs(&vdec->input, vdec->sys_info->width,
2075 vdec->sys_info->height);
2076
b9164398
NQ
2077 vf_reg_provider(&vdec->vframe_provider);
2078 vf_notify_receiver(vdec->vf_provider_name,
2079 VFRAME_EVENT_PROVIDER_START, vdec);
2080
2081 if (vdec->slave) {
2082 vf_reg_provider(&vdec->slave->vframe_provider);
2083 vf_notify_receiver(vdec->slave->vf_provider_name,
2084 VFRAME_EVENT_PROVIDER_START, vdec->slave);
158de7c4 2085 vdec->slave->mc_loaded = 0;/*clear for reload firmware*/
b9164398
NQ
2086 }
2087
2088 vdec_connect(vdec);
2089
2090 return 0;
2091}
2092EXPORT_SYMBOL(vdec_reset);
2093
fe96802b
NQ
2094void vdec_free_cmabuf(void)
2095{
2096 mutex_lock(&vdec_mutex);
2097
a35da9f0 2098 /*if (inited_vcodec_num > 0) {
fe96802b
NQ
2099 mutex_unlock(&vdec_mutex);
2100 return;
a35da9f0 2101 }*/
fe96802b
NQ
2102 mutex_unlock(&vdec_mutex);
2103}
2104
665a4a8e 2105void vdec_core_request(struct vdec_s *vdec, unsigned long mask)
b9164398 2106{
a6c89e96 2107 vdec->core_mask |= mask;
b9164398 2108
a6c89e96
NQ
2109 if (vdec->slave)
2110 vdec->slave->core_mask |= mask;
05afa03d
PY
2111 if (vdec_core->parallel_dec == 1) {
2112 if (mask & CORE_MASK_COMBINE)
2113 vdec_core->vdec_combine_flag++;
2114 }
a6c89e96 2115
a6c89e96
NQ
2116}
2117EXPORT_SYMBOL(vdec_core_request);
2118
2119int vdec_core_release(struct vdec_s *vdec, unsigned long mask)
2120{
2121 vdec->core_mask &= ~mask;
2122
2123 if (vdec->slave)
2124 vdec->slave->core_mask &= ~mask;
05afa03d
PY
2125 if (vdec_core->parallel_dec == 1) {
2126 if (mask & CORE_MASK_COMBINE)
2127 vdec_core->vdec_combine_flag--;
2128 }
a6c89e96
NQ
2129 return 0;
2130}
2131EXPORT_SYMBOL(vdec_core_release);
2132
a35da9f0 2133bool vdec_core_with_input(unsigned long mask)
a6c89e96
NQ
2134{
2135 enum vdec_type_e type;
2136
2137 for (type = VDEC_1; type < VDEC_MAX; type++) {
2138 if ((mask & (1 << type)) && cores_with_input[type])
2139 return true;
b9164398
NQ
2140 }
2141
a6c89e96
NQ
2142 return false;
2143}
2144
2145void vdec_core_finish_run(struct vdec_s *vdec, unsigned long mask)
2146{
2147 unsigned long i;
2148 unsigned long t = mask;
6da7a8e8 2149 mutex_lock(&vdec_mutex);
a6c89e96
NQ
2150 while (t) {
2151 i = __ffs(t);
2152 clear_bit(i, &vdec->active_mask);
2153 t &= ~(1 << i);
2154 }
2155
2156 if (vdec->active_mask == 0)
2157 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
6da7a8e8
PY
2158
2159 mutex_unlock(&vdec_mutex);
a6c89e96
NQ
2160}
2161EXPORT_SYMBOL(vdec_core_finish_run);
2162/*
2163 * find what core resources are available for vdec
2164 */
2165static unsigned long vdec_schedule_mask(struct vdec_s *vdec,
2166 unsigned long active_mask)
2167{
2168 unsigned long mask = vdec->core_mask &
2169 ~CORE_MASK_COMBINE;
2170
2171 if (vdec->core_mask & CORE_MASK_COMBINE) {
2172 /* combined cores must be granted together */
2173 if ((mask & ~active_mask) == mask)
2174 return mask;
2175 else
2176 return 0;
2177 } else
2178 return mask & ~vdec->sched_mask & ~active_mask;
b9164398
NQ
2179}
2180
2181/*
e0614bf7 2182 *Decoder callback
b9164398
NQ
2183 * Each decoder instance uses this callback to notify status change, e.g. when
2184 * decoder finished using HW resource.
2185 * a sample callback from decoder's driver is following:
2186 *
2187 * if (hw->vdec_cb) {
2188 * vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
2189 * hw->vdec_cb(vdec, hw->vdec_cb_arg);
2190 * }
2191 */
2192static void vdec_callback(struct vdec_s *vdec, void *data)
2193{
2194 struct vdec_core_s *core = (struct vdec_core_s *)data;
2195
fe96802b 2196#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
2197 vdec_profile(vdec, VDEC_PROFILE_EVENT_CB);
2198#endif
2199
2200 up(&core->sem);
2201}
2202
2203static irqreturn_t vdec_isr(int irq, void *dev_id)
2204{
2205 struct vdec_isr_context_s *c =
2206 (struct vdec_isr_context_s *)dev_id;
05afa03d 2207 struct vdec_s *vdec = vdec_core->last_vdec;
5f3fbfb7 2208 irqreturn_t ret = IRQ_HANDLED;
05afa03d
PY
2209
2210 if (vdec_core->parallel_dec == 1) {
2211 if (irq == vdec_core->isr_context[VDEC_IRQ_0].irq)
2212 vdec = vdec_core->active_hevc;
2213 else if (irq == vdec_core->isr_context[VDEC_IRQ_1].irq)
2214 vdec = vdec_core->active_vdec;
2215 else
2216 vdec = NULL;
2217 }
2218
5f3fbfb7
HZ
2219 if (vdec)
2220 atomic_set(&vdec->inirq_flag, 1);
2221 if (c->dev_isr) {
2222 ret = c->dev_isr(irq, c->dev_id);
2223 goto isr_done;
2224 }
b9164398 2225
a6c89e96
NQ
2226 if ((c != &vdec_core->isr_context[VDEC_IRQ_0]) &&
2227 (c != &vdec_core->isr_context[VDEC_IRQ_1]) &&
2228 (c != &vdec_core->isr_context[VDEC_IRQ_HEVC_BACK])) {
b9164398
NQ
2229#if 0
2230 pr_warn("vdec interrupt w/o a valid receiver\n");
2231#endif
5f3fbfb7 2232 goto isr_done;
b9164398
NQ
2233 }
2234
2235 if (!vdec) {
2236#if 0
2237 pr_warn("vdec interrupt w/o an active instance running. core = %p\n",
2238 core);
2239#endif
5f3fbfb7 2240 goto isr_done;
b9164398
NQ
2241 }
2242
2243 if (!vdec->irq_handler) {
2244#if 0
2245 pr_warn("vdec instance has no irq handle.\n");
2246#endif
5f3fbfb7 2247 goto isr_done;
b9164398
NQ
2248 }
2249
5f3fbfb7
HZ
2250 ret = vdec->irq_handler(vdec, c->index);
2251isr_done:
2252 if (vdec)
2253 atomic_set(&vdec->inirq_flag, 0);
2254 return ret;
b9164398
NQ
2255}
2256
2257static irqreturn_t vdec_thread_isr(int irq, void *dev_id)
2258{
2259 struct vdec_isr_context_s *c =
2260 (struct vdec_isr_context_s *)dev_id;
05afa03d 2261 struct vdec_s *vdec = vdec_core->last_vdec;
5f3fbfb7 2262 irqreturn_t ret = IRQ_HANDLED;
05afa03d
PY
2263
2264 if (vdec_core->parallel_dec == 1) {
2265 if (irq == vdec_core->isr_context[VDEC_IRQ_0].irq)
2266 vdec = vdec_core->active_hevc;
2267 else if (irq == vdec_core->isr_context[VDEC_IRQ_1].irq)
2268 vdec = vdec_core->active_vdec;
2269 else
2270 vdec = NULL;
2271 }
2272
5f3fbfb7
HZ
2273 if (vdec)
2274 atomic_set(&vdec->inirq_thread_flag, 1);
2275 if (c->dev_threaded_isr) {
2276 ret = c->dev_threaded_isr(irq, c->dev_id);
2277 goto thread_isr_done;
2278 }
b9164398 2279 if (!vdec)
5f3fbfb7 2280 goto thread_isr_done;
b9164398
NQ
2281
2282 if (!vdec->threaded_irq_handler)
5f3fbfb7
HZ
2283 goto thread_isr_done;
2284 ret = vdec->threaded_irq_handler(vdec, c->index);
2285thread_isr_done:
2286 if (vdec)
2287 atomic_set(&vdec->inirq_thread_flag, 0);
2288 return ret;
b9164398
NQ
2289}
2290
a6c89e96 2291unsigned long vdec_ready_to_run(struct vdec_s *vdec, unsigned long mask)
b9164398 2292{
a6c89e96 2293 unsigned long ready_mask;
fe96802b 2294 struct vdec_input_s *input = &vdec->input;
a6c89e96
NQ
2295 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
2296 (vdec->status != VDEC_STATUS_ACTIVE))
b9164398
NQ
2297 return false;
2298
2299 if (!vdec->run_ready)
2300 return false;
2301
d5c1c0ff 2302 /* when crc32 error, block at error frame */
2303 if (vdec->vfc.err_crc_block)
2304 return false;
2305
b9164398
NQ
2306 if ((vdec->slave || vdec->master) &&
2307 (vdec->sched == 0))
2308 return false;
a6c89e96
NQ
2309#ifdef VDEC_DEBUG_SUPPORT
2310 inc_profi_count(mask, vdec->check_count);
2311#endif
2312 if (vdec_core_with_input(mask)) {
2313 /* check frame based input underrun */
2314 if (input && !input->eos && input_frame_based(input)
2315 && (!vdec_input_next_chunk(input))) {
2316#ifdef VDEC_DEBUG_SUPPORT
2317 inc_profi_count(mask, vdec->input_underrun_count);
2318#endif
fe96802b 2319 return false;
a6c89e96
NQ
2320 }
2321 /* check streaming prepare level threshold if not EOS */
2322 if (input && input_stream_based(input) && !input->eos) {
2323 u32 rp, wp, level;
2324
2325 rp = READ_PARSER_REG(PARSER_VIDEO_RP);
2326 wp = READ_PARSER_REG(PARSER_VIDEO_WP);
2327 if (wp < rp)
2328 level = input->size + wp - rp;
2329 else
2330 level = wp - rp;
2331
2332 if ((level < input->prepare_level) &&
2333 (pts_get_rec_num(PTS_TYPE_VIDEO,
2334 vdec->input.total_rd_count) < 2)) {
2335 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
2336#ifdef VDEC_DEBUG_SUPPORT
2337 inc_profi_count(mask, vdec->input_underrun_count);
2338 if (step_mode & 0x200) {
2339 if ((step_mode & 0xff) == vdec->id) {
2340 step_mode |= 0xff;
2341 return mask;
2342 }
2343 }
2344#endif
2345 return false;
2346 } else if (level > input->prepare_level)
2347 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
2348 }
fe96802b
NQ
2349 }
2350
b9164398
NQ
2351 if (step_mode) {
2352 if ((step_mode & 0xff) != vdec->id)
a6c89e96
NQ
2353 return 0;
2354 step_mode |= 0xff; /*VDEC_DEBUG_SUPPORT*/
b9164398
NQ
2355 }
2356
a6c89e96 2357 /*step_mode &= ~0xff; not work for id of 0, removed*/
b9164398 2358
fe96802b 2359#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
2360 vdec_profile(vdec, VDEC_PROFILE_EVENT_CHK_RUN_READY);
2361#endif
2362
a6c89e96
NQ
2363 ready_mask = vdec->run_ready(vdec, mask) & mask;
2364#ifdef VDEC_DEBUG_SUPPORT
2365 if (ready_mask != mask)
2366 inc_profi_count(ready_mask ^ mask, vdec->not_run_ready_count);
2367#endif
fe96802b 2368#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
a6c89e96 2369 if (ready_mask)
b9164398
NQ
2370 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN_READY);
2371#endif
2372
a6c89e96
NQ
2373 return ready_mask;
2374}
2375
2376/* bridge on/off vdec's interrupt processing to vdec core */
2377static void vdec_route_interrupt(struct vdec_s *vdec, unsigned long mask,
2378 bool enable)
2379{
2380 enum vdec_type_e type;
2381
2382 for (type = VDEC_1; type < VDEC_MAX; type++) {
2383 if (mask & (1 << type)) {
2384 struct vdec_isr_context_s *c =
2385 &vdec_core->isr_context[cores_int[type]];
2386 if (enable)
2387 c->vdec = vdec;
2388 else if (c->vdec == vdec)
2389 c->vdec = NULL;
2390 }
2391 }
b9164398
NQ
2392}
2393
fe96802b
NQ
2394/*
2395 * Set up secure protection for each decoder instance running.
2396 * Note: The operation from REE side only resets memory access
2397 * to a default policy and even a non_secure type will still be
2398 * changed to secure type automatically when secure source is
2399 * detected inside TEE.
2400 * Perform need_more_data checking and set flag is decoder
2401 * is not consuming data.
2402 */
a6c89e96 2403void vdec_prepare_run(struct vdec_s *vdec, unsigned long mask)
fe96802b
NQ
2404{
2405 struct vdec_input_s *input = &vdec->input;
a6c89e96 2406 int secure = (vdec_secure(vdec)) ? DMC_DEV_TYPE_SECURE :
fe96802b
NQ
2407 DMC_DEV_TYPE_NON_SECURE;
2408
a6c89e96
NQ
2409 vdec_route_interrupt(vdec, mask, true);
2410
2411 if (!vdec_core_with_input(mask))
2412 return;
2413
fe96802b 2414 if (input->target == VDEC_INPUT_TARGET_VLD)
a6c89e96 2415 tee_config_device_secure(DMC_DEV_ID_VDEC, secure);
fe96802b 2416 else if (input->target == VDEC_INPUT_TARGET_HEVC)
a6c89e96 2417 tee_config_device_secure(DMC_DEV_ID_HEVC, secure);
fe96802b
NQ
2418
2419 if (vdec_stream_based(vdec) &&
2420 ((vdec->need_more_data & VDEC_NEED_MORE_DATA_RUN) &&
2421 (vdec->need_more_data & VDEC_NEED_MORE_DATA_DIRTY) == 0)) {
2422 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
2423 }
2424
2425 vdec->need_more_data |= VDEC_NEED_MORE_DATA_RUN;
2426 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA_DIRTY;
2427}
2428
b9164398
NQ
2429/* struct vdec_core_shread manages all decoder instance in active list. When
2430 * a vdec is added into the active list, it can onlt be in two status:
2431 * VDEC_STATUS_CONNECTED(the decoder does not own HW resource and ready to run)
2432 * VDEC_STATUS_ACTIVE(the decoder owns HW resources and is running).
2433 * Removing a decoder from active list is only performed within core thread.
2434 * Adding a decoder into active list is performed from user thread.
2435 */
2436static int vdec_core_thread(void *data)
2437{
b9164398 2438 struct vdec_core_s *core = (struct vdec_core_s *)data;
158de7c4 2439 struct sched_param param = {.sched_priority = MAX_RT_PRIO/2};
05afa03d 2440 int i;
b9164398
NQ
2441
2442 sched_setscheduler(current, SCHED_FIFO, &param);
2443
2444 allow_signal(SIGTERM);
05afa03d 2445
b9164398 2446 while (down_interruptible(&core->sem) == 0) {
a6c89e96
NQ
2447 struct vdec_s *vdec, *tmp, *worker;
2448 unsigned long sched_mask = 0;
b9164398
NQ
2449 LIST_HEAD(disconnecting_list);
2450
2451 if (kthread_should_stop())
2452 break;
6da7a8e8 2453 mutex_lock(&vdec_mutex);
05afa03d
PY
2454
2455 if (core->parallel_dec == 1) {
2456 for (i = VDEC_1; i < VDEC_MAX; i++) {
2457 core->power_ref_mask =
2458 core->power_ref_count[i] > 0 ?
2459 (core->power_ref_mask | (1 << i)) :
2460 (core->power_ref_mask & ~(1 << i));
2461 }
2462 }
b9164398 2463 /* clean up previous active vdec's input */
a6c89e96
NQ
2464 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
2465 unsigned long mask = vdec->sched_mask &
2466 (vdec->active_mask ^ vdec->sched_mask);
2467
2468 vdec_route_interrupt(vdec, mask, false);
2469
2470#ifdef VDEC_DEBUG_SUPPORT
2471 update_profi_clk_stop(vdec, mask, get_current_clk());
2472#endif
2473 /*
2474 * If decoder released some core resources (mask), then
2475 * check if these core resources are associated
2476 * with any input side and do input clean up accordingly
2477 */
2478 if (vdec_core_with_input(mask)) {
2479 struct vdec_input_s *input = &vdec->input;
2480 while (!list_empty(
2481 &input->vframe_chunk_list)) {
2482 struct vframe_chunk_s *chunk =
2483 vdec_input_next_chunk(input);
87046a60 2484 if (chunk && (chunk->flag &
2485 VFRAME_CHUNK_FLAG_CONSUMED))
a6c89e96
NQ
2486 vdec_input_release_chunk(input,
2487 chunk);
2488 else
2489 break;
2490 }
2491
2492 vdec_save_input_context(vdec);
b9164398
NQ
2493 }
2494
a6c89e96
NQ
2495 vdec->sched_mask &= ~mask;
2496 core->sched_mask &= ~mask;
b9164398
NQ
2497 }
2498
2499 /*
e0614bf7 2500 *todo:
b9164398
NQ
2501 * this is the case when the decoder is in active mode and
2502 * the system side wants to stop it. Currently we rely on
2503 * the decoder instance to go back to VDEC_STATUS_CONNECTED
2504 * from VDEC_STATUS_ACTIVE by its own. However, if for some
2505 * reason the decoder can not exist by itself (dead decoding
2506 * or whatever), then we may have to add another vdec API
2507 * to kill the vdec and release its HW resource and make it
2508 * become inactive again.
2509 * if ((core->active_vdec) &&
2510 * (core->active_vdec->status == VDEC_STATUS_DISCONNECTED)) {
2511 * }
2512 */
2513
b9164398
NQ
2514 /* check disconnected decoders */
2515 list_for_each_entry_safe(vdec, tmp,
2516 &core->connected_vdec_list, list) {
2517 if ((vdec->status == VDEC_STATUS_CONNECTED) &&
2518 (vdec->next_status == VDEC_STATUS_DISCONNECTED)) {
05afa03d
PY
2519 if (core->parallel_dec == 1) {
2520 if (vdec_core->active_hevc == vdec)
2521 vdec_core->active_hevc = NULL;
2522 if (vdec_core->active_vdec == vdec)
2523 vdec_core->active_vdec = NULL;
2524 }
2525 if (core->last_vdec == vdec)
2526 core->last_vdec = NULL;
b9164398
NQ
2527 list_move(&vdec->list, &disconnecting_list);
2528 }
2529 }
6da7a8e8 2530 mutex_unlock(&vdec_mutex);
a6c89e96 2531 /* elect next vdec to be scheduled */
05afa03d 2532 vdec = core->last_vdec;
a6c89e96
NQ
2533 if (vdec) {
2534 vdec = list_entry(vdec->list.next, struct vdec_s, list);
b9164398
NQ
2535 list_for_each_entry_from(vdec,
2536 &core->connected_vdec_list, list) {
a6c89e96
NQ
2537 sched_mask = vdec_schedule_mask(vdec,
2538 core->sched_mask);
2539 if (!sched_mask)
2540 continue;
2541 sched_mask = vdec_ready_to_run(vdec,
2542 sched_mask);
2543 if (sched_mask)
b9164398
NQ
2544 break;
2545 }
2546
a6c89e96
NQ
2547 if (&vdec->list == &core->connected_vdec_list)
2548 vdec = NULL;
2549 }
2550
2551 if (!vdec) {
2552 /* search from beginning */
2553 list_for_each_entry(vdec,
2554 &core->connected_vdec_list, list) {
2555 sched_mask = vdec_schedule_mask(vdec,
2556 core->sched_mask);
05afa03d 2557 if (vdec == core->last_vdec) {
a6c89e96
NQ
2558 if (!sched_mask) {
2559 vdec = NULL;
b9164398 2560 break;
a6c89e96
NQ
2561 }
2562
2563 sched_mask = vdec_ready_to_run(vdec,
2564 sched_mask);
b9164398 2565
a6c89e96 2566 if (!sched_mask) {
b9164398
NQ
2567 vdec = NULL;
2568 break;
2569 }
a6c89e96 2570 break;
b9164398 2571 }
a6c89e96
NQ
2572
2573 if (!sched_mask)
2574 continue;
2575
2576 sched_mask = vdec_ready_to_run(vdec,
2577 sched_mask);
2578 if (sched_mask)
2579 break;
b9164398
NQ
2580 }
2581
2582 if (&vdec->list == &core->connected_vdec_list)
2583 vdec = NULL;
b9164398
NQ
2584 }
2585
a6c89e96
NQ
2586 worker = vdec;
2587
2588 if (vdec) {
2589 unsigned long mask = sched_mask;
2590 unsigned long i;
2591
2592 /* setting active_mask should be atomic.
2593 * it can be modified by decoder driver callbacks.
2594 */
2595 while (sched_mask) {
2596 i = __ffs(sched_mask);
2597 set_bit(i, &vdec->active_mask);
2598 sched_mask &= ~(1 << i);
2599 }
2600
2601 /* vdec's sched_mask is only set from core thread */
2602 vdec->sched_mask |= mask;
05afa03d
PY
2603 if (core->last_vdec) {
2604 if ((core->last_vdec != vdec) &&
2605 (core->last_vdec->mc_type != vdec->mc_type))
158de7c4
HZ
2606 vdec->mc_loaded = 0;/*clear for reload firmware*/
2607 }
05afa03d 2608 core->last_vdec = vdec;
158de7c4
HZ
2609 if (debug & 2)
2610 vdec->mc_loaded = 0;/*alway reload firmware*/
b9164398
NQ
2611 vdec_set_status(vdec, VDEC_STATUS_ACTIVE);
2612
a6c89e96 2613 core->sched_mask |= mask;
05afa03d
PY
2614 if (core->parallel_dec == 1)
2615 vdec_save_active_hw(vdec);
fe96802b 2616#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
2617 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN);
2618#endif
a6c89e96
NQ
2619 vdec_prepare_run(vdec, mask);
2620#ifdef VDEC_DEBUG_SUPPORT
2621 inc_profi_count(mask, vdec->run_count);
2622 update_profi_clk_run(vdec, mask, get_current_clk());
2623#endif
2624 vdec->run(vdec, mask, vdec_callback, core);
2625
fe96802b 2626
a6c89e96
NQ
2627 /* we have some cores scheduled, keep working until
2628 * all vdecs are checked with no cores to schedule
2629 */
05afa03d
PY
2630 if (core->parallel_dec == 1) {
2631 if (vdec_core->vdec_combine_flag == 0)
2632 up(&core->sem);
2633 } else
2634 up(&core->sem);
b9164398
NQ
2635 }
2636
2637 /* remove disconnected decoder from active list */
2638 list_for_each_entry_safe(vdec, tmp, &disconnecting_list, list) {
2639 list_del(&vdec->list);
2640 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
05afa03d 2641 /*core->last_vdec = NULL;*/
b9164398
NQ
2642 complete(&vdec->inactive_done);
2643 }
2644
a6c89e96
NQ
2645 /* if there is no new work scheduled and nothing
2646 * is running, sleep 20ms
2647 */
05afa03d
PY
2648 if (core->parallel_dec == 1) {
2649 if (vdec_core->vdec_combine_flag == 0) {
2650 if ((!worker) &&
2651 ((core->sched_mask != core->power_ref_mask)) &&
2652 (atomic_read(&vdec_core->vdec_nr) > 0)) {
2653 usleep_range(1000, 2000);
2654 up(&core->sem);
2655 }
2656 } else {
2657 if ((!worker) && (!core->sched_mask) && (atomic_read(&vdec_core->vdec_nr) > 0)) {
2658 usleep_range(1000, 2000);
2659 up(&core->sem);
2660 }
2661 }
2662 } else if ((!worker) && (!core->sched_mask) && (atomic_read(&vdec_core->vdec_nr) > 0)) {
14d1803e 2663 usleep_range(1000, 2000);
b9164398
NQ
2664 up(&core->sem);
2665 }
3f4a083c 2666
b9164398
NQ
2667 }
2668
2669 return 0;
2670}
2671
2672#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */
2673static bool test_hevc(u32 decomp_addr, u32 us_delay)
2674{
2675 int i;
2676
2677 /* SW_RESET IPP */
2678 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 1);
2679 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0);
2680
2681 /* initialize all canvas table */
2682 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0);
2683 for (i = 0; i < 32; i++)
2684 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
2685 0x1 | (i << 8) | decomp_addr);
2686 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1);
2687 WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (0 << 8) | (0<<1) | 1);
2688 for (i = 0; i < 32; i++)
2689 WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0);
2690
2691 /* Initialize mcrcc */
2692 WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2);
2693 WRITE_VREG(HEVCD_MCRCC_CTL2, 0x0);
2694 WRITE_VREG(HEVCD_MCRCC_CTL3, 0x0);
2695 WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0);
2696
2697 /* Decomp initialize */
2698 WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x0);
2699 WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0x0);
2700
2701 /* Frame level initialization */
2702 WRITE_VREG(HEVCD_IPP_TOP_FRMCONFIG, 0x100 | (0x100 << 16));
2703 WRITE_VREG(HEVCD_IPP_TOP_TILECONFIG3, 0x0);
2704 WRITE_VREG(HEVCD_IPP_TOP_LCUCONFIG, 0x1 << 5);
2705 WRITE_VREG(HEVCD_IPP_BITDEPTH_CONFIG, 0x2 | (0x2 << 2));
2706
2707 WRITE_VREG(HEVCD_IPP_CONFIG, 0x0);
2708 WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, 0x0);
2709
2710 /* Enable SWIMP mode */
2711 WRITE_VREG(HEVCD_IPP_SWMPREDIF_CONFIG, 0x1);
2712
2713 /* Enable frame */
2714 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x2);
2715 WRITE_VREG(HEVCD_IPP_TOP_FRMCTL, 0x1);
2716
2717 /* Send SW-command CTB info */
2718 WRITE_VREG(HEVCD_IPP_SWMPREDIF_CTBINFO, 0x1 << 31);
2719
2720 /* Send PU_command */
2721 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO0, (0x4 << 9) | (0x4 << 16));
2722 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO1, 0x1 << 3);
2723 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO2, 0x0);
2724 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO3, 0x0);
2725
2726 udelay(us_delay);
2727
2728 WRITE_VREG(HEVCD_IPP_DBG_SEL, 0x2 << 4);
2729
2730 return (READ_VREG(HEVCD_IPP_DBG_DATA) & 3) == 1;
2731}
2732
865e748b
NQ
2733void vdec_power_reset(void)
2734{
2735 /* enable vdec1 isolation */
2736 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2737 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc0);
2738 /* power off vdec1 memories */
2739 WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL);
2740 /* vdec1 power off */
2741 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2742 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc);
2743
2744 if (has_vdec2()) {
2745 /* enable vdec2 isolation */
2746 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2747 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x300);
2748 /* power off vdec2 memories */
2749 WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL);
2750 /* vdec2 power off */
2751 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2752 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0x30);
2753 }
2754
2755 if (has_hdec()) {
2756 /* enable hcodec isolation */
2757 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2758 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x30);
2759 /* power off hcodec memories */
2760 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
2761 /* hcodec power off */
2762 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2763 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 3);
2764 }
2765
2766 if (has_hevc_vdec()) {
2767 /* enable hevc isolation */
2768 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2769 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc00);
2770 /* power off hevc memories */
2771 WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL);
2772 /* hevc power off */
2773 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2774 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc0);
2775 }
2776}
2777EXPORT_SYMBOL(vdec_power_reset);
2778
b9164398
NQ
2779void vdec_poweron(enum vdec_type_e core)
2780{
2781 void *decomp_addr = NULL;
2782 dma_addr_t decomp_dma_addr;
2783 u32 decomp_addr_aligned = 0;
2784 int hevc_loop = 0;
2785
2786 if (core >= VDEC_MAX)
2787 return;
2788
2789 mutex_lock(&vdec_mutex);
2790
2791 vdec_core->power_ref_count[core]++;
2792 if (vdec_core->power_ref_count[core] > 1) {
2793 mutex_unlock(&vdec_mutex);
2794 return;
2795 }
2796
2797 if (vdec_on(core)) {
2798 mutex_unlock(&vdec_mutex);
2799 return;
2800 }
2801
2802 if (hevc_workaround_needed() &&
2803 (core == VDEC_HEVC)) {
2804 decomp_addr = codec_mm_dma_alloc_coherent(MEM_NAME,
2805 SZ_64K + SZ_4K, &decomp_dma_addr, GFP_KERNEL, 0);
2806
2807 if (decomp_addr) {
2808 decomp_addr_aligned = ALIGN(decomp_dma_addr, SZ_64K);
2809 memset((u8 *)decomp_addr +
2810 (decomp_addr_aligned - decomp_dma_addr),
2811 0xff, SZ_4K);
2812 } else
2813 pr_err("vdec: alloc HEVC gxbb decomp buffer failed.\n");
2814 }
2815
2816 if (core == VDEC_1) {
2817 /* vdec1 power on */
2818 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2819 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~0xc);
2820 /* wait 10uS */
2821 udelay(10);
2822 /* vdec1 soft reset */
2823 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
2824 WRITE_VREG(DOS_SW_RESET0, 0);
2825 /* enable vdec1 clock */
2826 /*
e0614bf7
ZZ
2827 *add power on vdec clock level setting,only for m8 chip,
2828 * m8baby and m8m2 can dynamic adjust vdec clock,
2829 * power on with default clock level
2830 */
65a98643 2831 amports_switch_gate("clk_vdec_mux", 1);
b9164398
NQ
2832 vdec_clock_hi_enable();
2833 /* power up vdec memories */
2834 WRITE_VREG(DOS_MEM_PD_VDEC, 0);
2835 /* remove vdec1 isolation */
2836 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2837 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~0xC0);
2838 /* reset DOS top registers */
2839 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
63e810c0
CG
2840 if (get_cpu_major_id() >=
2841 AM_MESON_CPU_MAJOR_ID_GXBB) {
b9164398 2842 /*
e0614bf7
ZZ
2843 *enable VDEC_1 DMC request
2844 */
b9164398
NQ
2845 unsigned long flags;
2846
2847 spin_lock_irqsave(&vdec_spin_lock, flags);
2848 codec_dmcbus_write(DMC_REQ_CTRL,
2849 codec_dmcbus_read(DMC_REQ_CTRL) | (1 << 13));
2850 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2851 }
2852 } else if (core == VDEC_2) {
2853 if (has_vdec2()) {
2854 /* vdec2 power on */
2855 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2856 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2857 ~0x30);
2858 /* wait 10uS */
2859 udelay(10);
2860 /* vdec2 soft reset */
2861 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
2862 WRITE_VREG(DOS_SW_RESET2, 0);
2863 /* enable vdec1 clock */
2864 vdec2_clock_hi_enable();
2865 /* power up vdec memories */
2866 WRITE_VREG(DOS_MEM_PD_VDEC2, 0);
2867 /* remove vdec2 isolation */
2868 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2869 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2870 ~0x300);
2871 /* reset DOS top registers */
2872 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
2873 }
2874 } else if (core == VDEC_HCODEC) {
2875 if (has_hdec()) {
2876 /* hcodec power on */
2877 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2878 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2879 ~0x3);
2880 /* wait 10uS */
2881 udelay(10);
2882 /* hcodec soft reset */
2883 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
2884 WRITE_VREG(DOS_SW_RESET1, 0);
2885 /* enable hcodec clock */
2886 hcodec_clock_enable();
2887 /* power up hcodec memories */
2888 WRITE_VREG(DOS_MEM_PD_HCODEC, 0);
2889 /* remove hcodec isolation */
2890 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2891 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2892 ~0x30);
2893 }
2894 } else if (core == VDEC_HEVC) {
2895 if (has_hevc_vdec()) {
2896 bool hevc_fixed = false;
2897
2898 while (!hevc_fixed) {
2899 /* hevc power on */
2900 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2901 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2902 ~0xc0);
2903 /* wait 10uS */
2904 udelay(10);
2905 /* hevc soft reset */
2906 WRITE_VREG(DOS_SW_RESET3, 0xffffffff);
2907 WRITE_VREG(DOS_SW_RESET3, 0);
2908 /* enable hevc clock */
65a98643
NQ
2909 amports_switch_gate("clk_hevc_mux", 1);
2910 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
2911 amports_switch_gate("clk_hevcb_mux", 1);
b9164398 2912 hevc_clock_hi_enable();
118bcc65 2913 hevc_back_clock_hi_enable();
b9164398
NQ
2914 /* power up hevc memories */
2915 WRITE_VREG(DOS_MEM_PD_HEVC, 0);
2916 /* remove hevc isolation */
2917 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2918 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2919 ~0xc00);
2920
2921 if (!hevc_workaround_needed())
2922 break;
2923
2924 if (decomp_addr)
2925 hevc_fixed = test_hevc(
2926 decomp_addr_aligned, 20);
2927
2928 if (!hevc_fixed) {
2929 hevc_loop++;
2930
2931 mutex_unlock(&vdec_mutex);
2932
2933 if (hevc_loop >= HEVC_TEST_LIMIT) {
2934 pr_warn("hevc power sequence over limit\n");
2935 pr_warn("=====================================================\n");
2936 pr_warn(" This chip is identified to have HW failure.\n");
2937 pr_warn(" Please contact sqa-platform to replace the platform.\n");
2938 pr_warn("=====================================================\n");
2939
2940 panic("Force panic for chip detection !!!\n");
2941
2942 break;
2943 }
2944
2945 vdec_poweroff(VDEC_HEVC);
2946
2947 mdelay(10);
2948
2949 mutex_lock(&vdec_mutex);
2950 }
2951 }
2952
2953 if (hevc_loop > hevc_max_reset_count)
2954 hevc_max_reset_count = hevc_loop;
2955
2956 WRITE_VREG(DOS_SW_RESET3, 0xffffffff);
2957 udelay(10);
2958 WRITE_VREG(DOS_SW_RESET3, 0);
2959 }
2960 }
2961
2962 if (decomp_addr)
2963 codec_mm_dma_free_coherent(MEM_NAME,
2964 SZ_64K + SZ_4K, decomp_addr, decomp_dma_addr, 0);
2965
2966 mutex_unlock(&vdec_mutex);
2967}
2968EXPORT_SYMBOL(vdec_poweron);
2969
2970void vdec_poweroff(enum vdec_type_e core)
2971{
2972 if (core >= VDEC_MAX)
2973 return;
2974
2975 mutex_lock(&vdec_mutex);
2976
2977 vdec_core->power_ref_count[core]--;
2978 if (vdec_core->power_ref_count[core] > 0) {
2979 mutex_unlock(&vdec_mutex);
2980 return;
2981 }
2982
2983 if (core == VDEC_1) {
63e810c0
CG
2984 if (get_cpu_major_id() >=
2985 AM_MESON_CPU_MAJOR_ID_GXBB) {
b9164398
NQ
2986 /* disable VDEC_1 DMC REQ*/
2987 unsigned long flags;
2988
2989 spin_lock_irqsave(&vdec_spin_lock, flags);
2990 codec_dmcbus_write(DMC_REQ_CTRL,
2991 codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 13)));
2992 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2993 udelay(10);
2994 }
2995 /* enable vdec1 isolation */
2996 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2997 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc0);
2998 /* power off vdec1 memories */
2999 WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL);
3000 /* disable vdec1 clock */
3001 vdec_clock_off();
3002 /* vdec1 power off */
3003 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3004 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc);
3005 } else if (core == VDEC_2) {
3006 if (has_vdec2()) {
3007 /* enable vdec2 isolation */
3008 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3009 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
3010 0x300);
3011 /* power off vdec2 memories */
3012 WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL);
3013 /* disable vdec2 clock */
3014 vdec2_clock_off();
3015 /* vdec2 power off */
3016 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3017 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
3018 0x30);
3019 }
3020 } else if (core == VDEC_HCODEC) {
3021 if (has_hdec()) {
3022 /* enable hcodec isolation */
3023 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3024 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
3025 0x30);
3026 /* power off hcodec memories */
3027 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
3028 /* disable hcodec clock */
3029 hcodec_clock_off();
3030 /* hcodec power off */
3031 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3032 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 3);
3033 }
3034 } else if (core == VDEC_HEVC) {
3035 if (has_hevc_vdec()) {
28e318df
NQ
3036 if (no_powerdown == 0) {
3037 /* enable hevc isolation */
3038 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
b9164398
NQ
3039 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
3040 0xc00);
3041 /* power off hevc memories */
3042 WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL);
a6c89e96 3043
b9164398
NQ
3044 /* disable hevc clock */
3045 hevc_clock_off();
63e810c0 3046 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
a6c89e96
NQ
3047 hevc_back_clock_off();
3048
b9164398
NQ
3049 /* hevc power off */
3050 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3051 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
3052 0xc0);
28e318df
NQ
3053 } else {
3054 pr_info("!!!!!!!!not power down\n");
3055 hevc_reset_core(NULL);
3056 no_powerdown = 0;
3057 }
b9164398
NQ
3058 }
3059 }
3060 mutex_unlock(&vdec_mutex);
3061}
3062EXPORT_SYMBOL(vdec_poweroff);
3063
3064bool vdec_on(enum vdec_type_e core)
3065{
3066 bool ret = false;
3067
3068 if (core == VDEC_1) {
3069 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0xc) == 0) &&
3070 (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100))
3071 ret = true;
3072 } else if (core == VDEC_2) {
3073 if (has_vdec2()) {
3074 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0x30) == 0) &&
3075 (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100))
3076 ret = true;
3077 }
3078 } else if (core == VDEC_HCODEC) {
3079 if (has_hdec()) {
3080 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0x3) == 0) &&
3081 (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000))
3082 ret = true;
3083 }
3084 } else if (core == VDEC_HEVC) {
3085 if (has_hevc_vdec()) {
3086 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0xc0) == 0) &&
3087 (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x1000000))
3088 ret = true;
3089 }
3090 }
3091
3092 return ret;
3093}
3094EXPORT_SYMBOL(vdec_on);
3095
3096#elif 0 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD */
3097void vdec_poweron(enum vdec_type_e core)
3098{
3099 ulong flags;
3100
3101 spin_lock_irqsave(&lock, flags);
3102
3103 if (core == VDEC_1) {
3104 /* vdec1 soft reset */
3105 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
3106 WRITE_VREG(DOS_SW_RESET0, 0);
3107 /* enable vdec1 clock */
3108 vdec_clock_enable();
3109 /* reset DOS top registers */
3110 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
3111 } else if (core == VDEC_2) {
3112 /* vdec2 soft reset */
3113 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
3114 WRITE_VREG(DOS_SW_RESET2, 0);
3115 /* enable vdec2 clock */
3116 vdec2_clock_enable();
3117 /* reset DOS top registers */
3118 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
3119 } else if (core == VDEC_HCODEC) {
3120 /* hcodec soft reset */
3121 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
3122 WRITE_VREG(DOS_SW_RESET1, 0);
3123 /* enable hcodec clock */
3124 hcodec_clock_enable();
3125 }
3126
3127 spin_unlock_irqrestore(&lock, flags);
3128}
3129
3130void vdec_poweroff(enum vdec_type_e core)
3131{
3132 ulong flags;
3133
3134 spin_lock_irqsave(&lock, flags);
3135
3136 if (core == VDEC_1) {
3137 /* disable vdec1 clock */
3138 vdec_clock_off();
3139 } else if (core == VDEC_2) {
3140 /* disable vdec2 clock */
3141 vdec2_clock_off();
3142 } else if (core == VDEC_HCODEC) {
3143 /* disable hcodec clock */
3144 hcodec_clock_off();
3145 }
3146
3147 spin_unlock_irqrestore(&lock, flags);
3148}
3149
3150bool vdec_on(enum vdec_type_e core)
3151{
3152 bool ret = false;
3153
3154 if (core == VDEC_1) {
3155 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100)
3156 ret = true;
3157 } else if (core == VDEC_2) {
3158 if (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100)
3159 ret = true;
3160 } else if (core == VDEC_HCODEC) {
3161 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000)
3162 ret = true;
3163 }
3164
3165 return ret;
3166}
3167#endif
3168
3169int vdec_source_changed(int format, int width, int height, int fps)
3170{
3171 /* todo: add level routines for clock adjustment per chips */
3172 int ret = -1;
3173 static int on_setting;
3174
3175 if (on_setting > 0)
3176 return ret;/*on changing clk,ignore this change*/
3177
3178 if (vdec_source_get(VDEC_1) == width * height * fps)
3179 return ret;
3180
3181
3182 on_setting = 1;
3183 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
5b851ff9 3184 pr_debug("vdec1 video changed to %d x %d %d fps clk->%dMHZ\n",
b9164398
NQ
3185 width, height, fps, vdec_clk_get(VDEC_1));
3186 on_setting = 0;
3187 return ret;
3188
3189}
3190EXPORT_SYMBOL(vdec_source_changed);
3191
ecd31bc4 3192void vdec_disable_DMC(struct vdec_s *vdec)
3193{
3194 /*close first,then wait pedding end,timing suggestion from vlsi*/
3195 unsigned long flags;
3196 spin_lock_irqsave(&vdec_spin_lock, flags);
3197 codec_dmcbus_write(DMC_REQ_CTRL,
3198 codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 13)));
3199 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3200
3201 while (!(codec_dmcbus_read(DMC_CHAN_STS)
3202 & (1 << 13)))
3203 ;
3204}
3205EXPORT_SYMBOL(vdec_disable_DMC);
3206
3207void vdec_enable_DMC(struct vdec_s *vdec)
3208{
3209 unsigned long flags;
3210 spin_lock_irqsave(&vdec_spin_lock, flags);
3211 codec_dmcbus_write(DMC_REQ_CTRL,
3212 codec_dmcbus_read(DMC_REQ_CTRL) | (1 << 13));
3213 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3214}
3215
3216EXPORT_SYMBOL(vdec_enable_DMC);
3217
87046a60 3218void vdec_reset_core(struct vdec_s *vdec)
3219{
3220 unsigned long flags;
3221 spin_lock_irqsave(&vdec_spin_lock, flags);
3222 codec_dmcbus_write(DMC_REQ_CTRL,
3223 codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 13)));
3224 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3225
3226 while (!(codec_dmcbus_read(DMC_CHAN_STS)
3227 & (1 << 13)))
3228 ;
3229 /*
3230 * 2: assist
3231 * 3: vld_reset
3232 * 4: vld_part_reset
3233 * 5: vfifo reset
3234 * 6: iqidct
3235 * 7: mc
3236 * 8: dblk
3237 * 9: pic_dc
3238 * 10: psc
3239 * 11: mcpu
3240 * 12: ccpu
3241 * 13: ddr
3242 * 14: afifo
3243 */
3244
3245 WRITE_VREG(DOS_SW_RESET0,
3246 (1<<3)|(1<<4)|(1<<5));
3247
3248 WRITE_VREG(DOS_SW_RESET0, 0);
3249
3250 spin_lock_irqsave(&vdec_spin_lock, flags);
3251 codec_dmcbus_write(DMC_REQ_CTRL,
3252 codec_dmcbus_read(DMC_REQ_CTRL) | (1 << 13));
3253 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3254}
3255EXPORT_SYMBOL(vdec_reset_core);
3256
fe96802b
NQ
3257void hevc_reset_core(struct vdec_s *vdec)
3258{
3259 unsigned long flags;
3260 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
3261 spin_lock_irqsave(&vdec_spin_lock, flags);
3262 codec_dmcbus_write(DMC_REQ_CTRL,
3263 codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 4)));
3264 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3265
3266 while (!(codec_dmcbus_read(DMC_CHAN_STS)
3267 & (1 << 4)))
3268 ;
3269
28e318df 3270 if (vdec == NULL || input_frame_based(vdec))
fe96802b
NQ
3271 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
3272
3273 /*
3274 * 2: assist
3275 * 3: parser
3276 * 4: parser_state
3277 * 8: dblk
3278 * 11:mcpu
3279 * 12:ccpu
3280 * 13:ddr
3281 * 14:iqit
3282 * 15:ipp
3283 * 17:qdct
3284 * 18:mpred
3285 * 19:sao
3286 * 24:hevc_afifo
3287 */
3288 WRITE_VREG(DOS_SW_RESET3,
3289 (1<<3)|(1<<4)|(1<<8)|(1<<11)|
3290 (1<<12)|(1<<13)|(1<<14)|(1<<15)|
3291 (1<<17)|(1<<18)|(1<<19)|(1<<24));
3292
3293 WRITE_VREG(DOS_SW_RESET3, 0);
3294
3295
3296 spin_lock_irqsave(&vdec_spin_lock, flags);
3297 codec_dmcbus_write(DMC_REQ_CTRL,
3298 codec_dmcbus_read(DMC_REQ_CTRL) | (1 << 4));
3299 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3300
3301}
3302EXPORT_SYMBOL(hevc_reset_core);
3303
b9164398
NQ
3304int vdec2_source_changed(int format, int width, int height, int fps)
3305{
3306 int ret = -1;
3307 static int on_setting;
3308
3309 if (has_vdec2()) {
3310 /* todo: add level routines for clock adjustment per chips */
3311 if (on_setting != 0)
3312 return ret;/*on changing clk,ignore this change*/
3313
3314 if (vdec_source_get(VDEC_2) == width * height * fps)
3315 return ret;
3316
3317 on_setting = 1;
3318 ret = vdec_source_changed_for_clk_set(format,
3319 width, height, fps);
5b851ff9 3320 pr_debug("vdec2 video changed to %d x %d %d fps clk->%dMHZ\n",
b9164398
NQ
3321 width, height, fps, vdec_clk_get(VDEC_2));
3322 on_setting = 0;
3323 return ret;
3324 }
3325 return 0;
3326}
3327EXPORT_SYMBOL(vdec2_source_changed);
3328
3329int hevc_source_changed(int format, int width, int height, int fps)
3330{
3331 /* todo: add level routines for clock adjustment per chips */
3332 int ret = -1;
3333 static int on_setting;
3334
3335 if (on_setting != 0)
3336 return ret;/*on changing clk,ignore this change*/
3337
3338 if (vdec_source_get(VDEC_HEVC) == width * height * fps)
3339 return ret;
3340
3341 on_setting = 1;
3342 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
5b851ff9 3343 pr_debug("hevc video changed to %d x %d %d fps clk->%dMHZ\n",
b9164398
NQ
3344 width, height, fps, vdec_clk_get(VDEC_HEVC));
3345 on_setting = 0;
3346
3347 return ret;
3348}
3349EXPORT_SYMBOL(hevc_source_changed);
3350
b9164398
NQ
3351static struct am_reg am_risc[] = {
3352 {"MSP", 0x300},
3353 {"MPSR", 0x301},
3354 {"MCPU_INT_BASE", 0x302},
3355 {"MCPU_INTR_GRP", 0x303},
3356 {"MCPU_INTR_MSK", 0x304},
3357 {"MCPU_INTR_REQ", 0x305},
3358 {"MPC-P", 0x306},
3359 {"MPC-D", 0x307},
3360 {"MPC_E", 0x308},
3361 {"MPC_W", 0x309},
3362 {"CSP", 0x320},
3363 {"CPSR", 0x321},
3364 {"CCPU_INT_BASE", 0x322},
3365 {"CCPU_INTR_GRP", 0x323},
3366 {"CCPU_INTR_MSK", 0x324},
3367 {"CCPU_INTR_REQ", 0x325},
3368 {"CPC-P", 0x326},
3369 {"CPC-D", 0x327},
3370 {"CPC_E", 0x328},
3371 {"CPC_W", 0x329},
3372 {"AV_SCRATCH_0", 0x09c0},
3373 {"AV_SCRATCH_1", 0x09c1},
3374 {"AV_SCRATCH_2", 0x09c2},
3375 {"AV_SCRATCH_3", 0x09c3},
3376 {"AV_SCRATCH_4", 0x09c4},
3377 {"AV_SCRATCH_5", 0x09c5},
3378 {"AV_SCRATCH_6", 0x09c6},
3379 {"AV_SCRATCH_7", 0x09c7},
3380 {"AV_SCRATCH_8", 0x09c8},
3381 {"AV_SCRATCH_9", 0x09c9},
3382 {"AV_SCRATCH_A", 0x09ca},
3383 {"AV_SCRATCH_B", 0x09cb},
3384 {"AV_SCRATCH_C", 0x09cc},
3385 {"AV_SCRATCH_D", 0x09cd},
3386 {"AV_SCRATCH_E", 0x09ce},
3387 {"AV_SCRATCH_F", 0x09cf},
3388 {"AV_SCRATCH_G", 0x09d0},
3389 {"AV_SCRATCH_H", 0x09d1},
3390 {"AV_SCRATCH_I", 0x09d2},
3391 {"AV_SCRATCH_J", 0x09d3},
3392 {"AV_SCRATCH_K", 0x09d4},
3393 {"AV_SCRATCH_L", 0x09d5},
3394 {"AV_SCRATCH_M", 0x09d6},
3395 {"AV_SCRATCH_N", 0x09d7},
3396};
3397
3398static ssize_t amrisc_regs_show(struct class *class,
3399 struct class_attribute *attr, char *buf)
3400{
3401 char *pbuf = buf;
3402 struct am_reg *regs = am_risc;
3403 int rsize = sizeof(am_risc) / sizeof(struct am_reg);
3404 int i;
e0614bf7 3405 unsigned int val;
b9164398
NQ
3406 ssize_t ret;
3407
63e810c0 3408 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
b9164398
NQ
3409 mutex_lock(&vdec_mutex);
3410 if (!vdec_on(VDEC_1)) {
3411 mutex_unlock(&vdec_mutex);
3412 pbuf += sprintf(pbuf, "amrisc is power off\n");
3413 ret = pbuf - buf;
3414 return ret;
3415 }
63e810c0 3416 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
b9164398
NQ
3417 /*TODO:M6 define */
3418 /*
3419 * switch_mod_gate_by_type(MOD_VDEC, 1);
3420 */
3421 amports_switch_gate("vdec", 1);
3422 }
3423 pbuf += sprintf(pbuf, "amrisc registers show:\n");
3424 for (i = 0; i < rsize; i++) {
3425 val = READ_VREG(regs[i].offset);
3426 pbuf += sprintf(pbuf, "%s(%#x)\t:%#x(%d)\n",
3427 regs[i].name, regs[i].offset, val, val);
3428 }
63e810c0 3429 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
b9164398 3430 mutex_unlock(&vdec_mutex);
63e810c0 3431 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
b9164398
NQ
3432 /*TODO:M6 define */
3433 /*
3434 * switch_mod_gate_by_type(MOD_VDEC, 0);
3435 */
3436 amports_switch_gate("vdec", 0);
3437 }
3438 ret = pbuf - buf;
3439 return ret;
3440}
3441
3442static ssize_t dump_trace_show(struct class *class,
3443 struct class_attribute *attr, char *buf)
3444{
3445 int i;
3446 char *pbuf = buf;
3447 ssize_t ret;
3448 u16 *trace_buf = kmalloc(debug_trace_num * 2, GFP_KERNEL);
3449
3450 if (!trace_buf) {
3451 pbuf += sprintf(pbuf, "No Memory bug\n");
3452 ret = pbuf - buf;
3453 return ret;
3454 }
63e810c0 3455 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
b9164398
NQ
3456 mutex_lock(&vdec_mutex);
3457 if (!vdec_on(VDEC_1)) {
3458 mutex_unlock(&vdec_mutex);
3459 kfree(trace_buf);
3460 pbuf += sprintf(pbuf, "amrisc is power off\n");
3461 ret = pbuf - buf;
3462 return ret;
3463 }
63e810c0 3464 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
b9164398
NQ
3465 /*TODO:M6 define */
3466 /*
3467 * switch_mod_gate_by_type(MOD_VDEC, 1);
3468 */
3469 amports_switch_gate("vdec", 1);
3470 }
3471 pr_info("dump trace steps:%d start\n", debug_trace_num);
3472 i = 0;
3473 while (i <= debug_trace_num - 16) {
3474 trace_buf[i] = READ_VREG(MPC_E);
3475 trace_buf[i + 1] = READ_VREG(MPC_E);
3476 trace_buf[i + 2] = READ_VREG(MPC_E);
3477 trace_buf[i + 3] = READ_VREG(MPC_E);
3478 trace_buf[i + 4] = READ_VREG(MPC_E);
3479 trace_buf[i + 5] = READ_VREG(MPC_E);
3480 trace_buf[i + 6] = READ_VREG(MPC_E);
3481 trace_buf[i + 7] = READ_VREG(MPC_E);
3482 trace_buf[i + 8] = READ_VREG(MPC_E);
3483 trace_buf[i + 9] = READ_VREG(MPC_E);
3484 trace_buf[i + 10] = READ_VREG(MPC_E);
3485 trace_buf[i + 11] = READ_VREG(MPC_E);
3486 trace_buf[i + 12] = READ_VREG(MPC_E);
3487 trace_buf[i + 13] = READ_VREG(MPC_E);
3488 trace_buf[i + 14] = READ_VREG(MPC_E);
3489 trace_buf[i + 15] = READ_VREG(MPC_E);
3490 i += 16;
3491 };
3492 pr_info("dump trace steps:%d finished\n", debug_trace_num);
63e810c0 3493 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
b9164398 3494 mutex_unlock(&vdec_mutex);
63e810c0 3495 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
b9164398
NQ
3496 /*TODO:M6 define */
3497 /*
3498 * switch_mod_gate_by_type(MOD_VDEC, 0);
3499 */
3500 amports_switch_gate("vdec", 0);
3501 }
3502 for (i = 0; i < debug_trace_num; i++) {
3503 if (i % 4 == 0) {
3504 if (i % 16 == 0)
3505 pbuf += sprintf(pbuf, "\n");
3506 else if (i % 8 == 0)
3507 pbuf += sprintf(pbuf, " ");
3508 else /* 4 */
3509 pbuf += sprintf(pbuf, " ");
3510 }
3511 pbuf += sprintf(pbuf, "%04x:", trace_buf[i]);
3512 }
3513 while (i < debug_trace_num)
3514 ;
3515 kfree(trace_buf);
3516 pbuf += sprintf(pbuf, "\n");
3517 ret = pbuf - buf;
3518 return ret;
3519}
3520
3521static ssize_t clock_level_show(struct class *class,
3522 struct class_attribute *attr, char *buf)
3523{
3524 char *pbuf = buf;
3525 size_t ret;
3526
3527 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_1));
3528
3529 if (has_vdec2())
3530 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_2));
3531
3532 if (has_hevc_vdec())
3533 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_HEVC));
3534
3535 ret = pbuf - buf;
3536 return ret;
3537}
3538
3539static ssize_t store_poweron_clock_level(struct class *class,
3540 struct class_attribute *attr,
3541 const char *buf, size_t size)
3542{
e0614bf7 3543 unsigned int val;
b9164398
NQ
3544 ssize_t ret;
3545
3546 /*ret = sscanf(buf, "%d", &val);*/
3547 ret = kstrtoint(buf, 0, &val);
3548
3549 if (ret != 0)
3550 return -EINVAL;
3551 poweron_clock_level = val;
3552 return size;
3553}
3554
3555static ssize_t show_poweron_clock_level(struct class *class,
3556 struct class_attribute *attr, char *buf)
3557{
3558 return sprintf(buf, "%d\n", poweron_clock_level);
3559}
3560
3561/*
e0614bf7
ZZ
3562 *if keep_vdec_mem == 1
3563 *always don't release
3564 *vdec 64 memory for fast play.
3565 */
b9164398
NQ
3566static ssize_t store_keep_vdec_mem(struct class *class,
3567 struct class_attribute *attr,
3568 const char *buf, size_t size)
3569{
e0614bf7 3570 unsigned int val;
b9164398
NQ
3571 ssize_t ret;
3572
3573 /*ret = sscanf(buf, "%d", &val);*/
3574 ret = kstrtoint(buf, 0, &val);
3575 if (ret != 0)
3576 return -EINVAL;
3577 keep_vdec_mem = val;
3578 return size;
3579}
3580
3581static ssize_t show_keep_vdec_mem(struct class *class,
3582 struct class_attribute *attr, char *buf)
3583{
3584 return sprintf(buf, "%d\n", keep_vdec_mem);
3585}
3586
a6c89e96
NQ
3587#ifdef VDEC_DEBUG_SUPPORT
3588static ssize_t store_debug(struct class *class,
3589 struct class_attribute *attr,
3590 const char *buf, size_t size)
3591{
3592 struct vdec_s *vdec;
3593 struct vdec_core_s *core = vdec_core;
3594 unsigned long flags;
3595
3596 unsigned id;
3597 unsigned val;
3598 ssize_t ret;
3599 char cbuf[32];
3600
3601 cbuf[0] = 0;
3602 ret = sscanf(buf, "%s %x %x", cbuf, &id, &val);
3603 /*pr_info(
3604 "%s(%s)=>ret %ld: %s, %x, %x\n",
3605 __func__, buf, ret, cbuf, id, val);*/
3606 if (strcmp(cbuf, "schedule") == 0) {
3607 pr_info("VDEC_DEBUG: force schedule\n");
3608 up(&core->sem);
3609 } else if (strcmp(cbuf, "power_off") == 0) {
3610 pr_info("VDEC_DEBUG: power off core %d\n", id);
3611 vdec_poweroff(id);
3612 } else if (strcmp(cbuf, "power_on") == 0) {
3613 pr_info("VDEC_DEBUG: power_on core %d\n", id);
3614 vdec_poweron(id);
3615 } else if (strcmp(cbuf, "wr") == 0) {
3616 pr_info("VDEC_DEBUG: WRITE_VREG(0x%x, 0x%x)\n",
3617 id, val);
3618 WRITE_VREG(id, val);
3619 } else if (strcmp(cbuf, "rd") == 0) {
3620 pr_info("VDEC_DEBUG: READ_VREG(0x%x) = 0x%x\n",
3621 id, READ_VREG(id));
3622 } else if (strcmp(cbuf, "read_hevc_clk_reg") == 0) {
3623 pr_info(
3624 "VDEC_DEBUG: HHI_VDEC4_CLK_CNTL = 0x%x, HHI_VDEC2_CLK_CNTL = 0x%x\n",
3625 READ_HHI_REG(HHI_VDEC4_CLK_CNTL),
3626 READ_HHI_REG(HHI_VDEC2_CLK_CNTL));
3627 }
3628
3629 flags = vdec_core_lock(vdec_core);
3630
3631 list_for_each_entry(vdec,
3632 &core->connected_vdec_list, list) {
3633 pr_info("vdec: status %d, id %d\n", vdec->status, vdec->id);
3634 if (((vdec->status == VDEC_STATUS_CONNECTED
3635 || vdec->status == VDEC_STATUS_ACTIVE)) &&
3636 (vdec->id == id)) {
3637 /*to add*/
3638 break;
3639 }
3640 }
3641 vdec_core_unlock(vdec_core, flags);
3642 return size;
3643}
3644
3645static ssize_t show_debug(struct class *class,
3646 struct class_attribute *attr, char *buf)
3647{
3648 char *pbuf = buf;
3649 struct vdec_s *vdec;
3650 struct vdec_core_s *core = vdec_core;
3651 unsigned long flags = vdec_core_lock(vdec_core);
1e37ecab 3652 u64 tmp;
a6c89e96
NQ
3653
3654 pbuf += sprintf(pbuf,
3655 "============== help:\n");
3656 pbuf += sprintf(pbuf,
3657 "'echo xxx > debug' usuage:\n");
3658 pbuf += sprintf(pbuf,
3659 "schedule - trigger schedule thread to run\n");
3660 pbuf += sprintf(pbuf,
3661 "power_off core_num - call vdec_poweroff(core_num)\n");
3662 pbuf += sprintf(pbuf,
3663 "power_on core_num - call vdec_poweron(core_num)\n");
3664 pbuf += sprintf(pbuf,
3665 "wr adr val - call WRITE_VREG(adr, val)\n");
3666 pbuf += sprintf(pbuf,
3667 "rd adr - call READ_VREG(adr)\n");
3668 pbuf += sprintf(pbuf,
3669 "read_hevc_clk_reg - read HHI register for hevc clk\n");
3670 pbuf += sprintf(pbuf,
3671 "===================\n");
3672
3673 pbuf += sprintf(pbuf,
3674 "name(core)\tschedule_count\trun_count\tinput_underrun\tdecbuf_not_ready\trun_time\n");
3675 list_for_each_entry(vdec,
3676 &core->connected_vdec_list, list) {
3677 enum vdec_type_e type;
976f3376
HZ
3678 if ((vdec->status == VDEC_STATUS_CONNECTED
3679 || vdec->status == VDEC_STATUS_ACTIVE)) {
a6c89e96
NQ
3680 for (type = VDEC_1; type < VDEC_MAX; type++) {
3681 if (vdec->core_mask & (1 << type)) {
3682 pbuf += sprintf(pbuf, "%s(%d):",
3683 vdec->vf_provider_name, type);
3684 pbuf += sprintf(pbuf, "\t%d",
3685 vdec->check_count[type]);
3686 pbuf += sprintf(pbuf, "\t%d",
3687 vdec->run_count[type]);
3688 pbuf += sprintf(pbuf, "\t%d",
3689 vdec->input_underrun_count[type]);
3690 pbuf += sprintf(pbuf, "\t%d",
3691 vdec->not_run_ready_count[type]);
1e37ecab
AX
3692 tmp = vdec->run_clk[type] * 100;
3693 do_div(tmp, vdec->total_clk[type]);
a6c89e96
NQ
3694 pbuf += sprintf(pbuf,
3695 "\t%d%%\n",
3696 vdec->total_clk[type] == 0 ? 0 :
1e37ecab 3697 (u32)tmp);
a6c89e96
NQ
3698 }
3699 }
976f3376 3700 }
a6c89e96
NQ
3701 }
3702
3703 vdec_core_unlock(vdec_core, flags);
3704 return pbuf - buf;
3705
3706}
3707#endif
b9164398
NQ
3708
3709/*irq num as same as .dts*/
3710/*
e0614bf7
ZZ
3711 * interrupts = <0 3 1
3712 * 0 23 1
3713 * 0 32 1
3714 * 0 43 1
3715 * 0 44 1
3716 * 0 45 1>;
3717 * interrupt-names = "vsync",
3718 * "demux",
3719 * "parser",
3720 * "mailbox_0",
3721 * "mailbox_1",
3722 * "mailbox_2";
3723 */
b9164398
NQ
3724s32 vdec_request_threaded_irq(enum vdec_irq_num num,
3725 irq_handler_t handler,
3726 irq_handler_t thread_fn,
3727 unsigned long irqflags,
3728 const char *devname, void *dev)
3729{
3730 s32 res_irq;
3731 s32 ret = 0;
3732
3733 if (num >= VDEC_IRQ_MAX) {
3734 pr_err("[%s] request irq error, irq num too big!", __func__);
3735 return -EINVAL;
3736 }
3737
3738 if (vdec_core->isr_context[num].irq < 0) {
3739 res_irq = platform_get_irq(
3740 vdec_core->vdec_core_platform_device, num);
3741 if (res_irq < 0) {
3742 pr_err("[%s] get irq error!", __func__);
3743 return -EINVAL;
3744 }
3745
3746 vdec_core->isr_context[num].irq = res_irq;
3747 vdec_core->isr_context[num].dev_isr = handler;
3748 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
3749 vdec_core->isr_context[num].dev_id = dev;
3750
3751 ret = request_threaded_irq(res_irq,
3752 vdec_isr,
3753 vdec_thread_isr,
3754 (thread_fn) ? IRQF_ONESHOT : irqflags,
3755 devname,
3756 &vdec_core->isr_context[num]);
3757
3758 if (ret) {
3759 vdec_core->isr_context[num].irq = -1;
3760 vdec_core->isr_context[num].dev_isr = NULL;
3761 vdec_core->isr_context[num].dev_threaded_isr = NULL;
3762 vdec_core->isr_context[num].dev_id = NULL;
3763
3764 pr_err("vdec irq register error for %s.\n", devname);
3765 return -EIO;
3766 }
3767 } else {
3768 vdec_core->isr_context[num].dev_isr = handler;
3769 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
3770 vdec_core->isr_context[num].dev_id = dev;
3771 }
3772
3773 return ret;
3774}
3775EXPORT_SYMBOL(vdec_request_threaded_irq);
3776
3777s32 vdec_request_irq(enum vdec_irq_num num, irq_handler_t handler,
3778 const char *devname, void *dev)
3779{
5b851ff9 3780 pr_debug("vdec_request_irq %p, %s\n", handler, devname);
b9164398
NQ
3781
3782 return vdec_request_threaded_irq(num,
3783 handler,
3784 NULL,/*no thread_fn*/
3785 IRQF_SHARED,
3786 devname,
3787 dev);
3788}
3789EXPORT_SYMBOL(vdec_request_irq);
3790
3791void vdec_free_irq(enum vdec_irq_num num, void *dev)
3792{
3793 if (num >= VDEC_IRQ_MAX) {
3794 pr_err("[%s] request irq error, irq num too big!", __func__);
3795 return;
3796 }
b9164398 3797 /*
e0614bf7 3798 *assume amrisc is stopped already and there is no mailbox interrupt
b9164398
NQ
3799 * when we reset pointers here.
3800 */
3801 vdec_core->isr_context[num].dev_isr = NULL;
3802 vdec_core->isr_context[num].dev_threaded_isr = NULL;
3803 vdec_core->isr_context[num].dev_id = NULL;
5f3fbfb7 3804 synchronize_irq(vdec_core->isr_context[num].irq);
b9164398
NQ
3805}
3806EXPORT_SYMBOL(vdec_free_irq);
3807
a6c89e96
NQ
3808struct vdec_s *vdec_get_default_vdec_for_userdata(void)
3809{
3810 struct vdec_s *vdec;
3811 struct vdec_s *ret_vdec;
3812 struct vdec_core_s *core = vdec_core;
3813 unsigned long flags;
3814 int id;
3815
3816 flags = vdec_core_lock(vdec_core);
3817
3818 id = 0x10000000;
3819 ret_vdec = NULL;
3820 if (!list_empty(&core->connected_vdec_list)) {
3821 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3822 if (vdec->id < id) {
3823 id = vdec->id;
3824 ret_vdec = vdec;
3825 }
3826 }
3827 }
3828
3829 vdec_core_unlock(vdec_core, flags);
3830
3831 return ret_vdec;
3832}
3833EXPORT_SYMBOL(vdec_get_default_vdec_for_userdata);
3834
9cc3c918
RZ
3835struct vdec_s *vdec_get_vdec_by_id(int vdec_id)
3836{
3837 struct vdec_s *vdec;
3838 struct vdec_s *ret_vdec;
3839 struct vdec_core_s *core = vdec_core;
3840 unsigned long flags;
3841
3842 flags = vdec_core_lock(vdec_core);
3843
3844 ret_vdec = NULL;
3845 if (!list_empty(&core->connected_vdec_list)) {
3846 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3847 if (vdec->id == vdec_id) {
3848 ret_vdec = vdec;
3849 break;
3850 }
3851 }
3852 }
3853
3854 vdec_core_unlock(vdec_core, flags);
3855
3856 return ret_vdec;
3857}
3858EXPORT_SYMBOL(vdec_get_vdec_by_id);
3859
a6c89e96
NQ
3860int vdec_read_user_data(struct vdec_s *vdec,
3861 struct userdata_param_t *p_userdata_param)
3862{
3863 int ret = 0;
3864
3865 if (!vdec)
3866 vdec = vdec_get_default_vdec_for_userdata();
3867
3868 if (vdec) {
3869 if (vdec->user_data_read)
3870 ret = vdec->user_data_read(vdec, p_userdata_param);
3871 }
3872 return ret;
3873}
3874EXPORT_SYMBOL(vdec_read_user_data);
3875
3876int vdec_wakeup_userdata_poll(struct vdec_s *vdec)
3877{
9cc3c918 3878 if (vdec) {
b78f4cd9 3879 if (vdec->wakeup_userdata_poll)
9cc3c918 3880 vdec->wakeup_userdata_poll(vdec);
b78f4cd9 3881 }
a6c89e96
NQ
3882
3883 return 0;
3884}
3885EXPORT_SYMBOL(vdec_wakeup_userdata_poll);
3886
3887void vdec_reset_userdata_fifo(struct vdec_s *vdec, int bInit)
3888{
3889 if (!vdec)
3890 vdec = vdec_get_default_vdec_for_userdata();
3891
3892 if (vdec) {
3893 if (vdec->reset_userdata_fifo)
3894 vdec->reset_userdata_fifo(vdec, bInit);
3895 }
3896}
3897EXPORT_SYMBOL(vdec_reset_userdata_fifo);
3898
b9164398
NQ
3899static int dump_mode;
3900static ssize_t dump_risc_mem_store(struct class *class,
3901 struct class_attribute *attr,
3902 const char *buf, size_t size)/*set*/
3903{
e0614bf7 3904 unsigned int val;
b9164398
NQ
3905 ssize_t ret;
3906 char dump_mode_str[4] = "PRL";
3907
3908 /*ret = sscanf(buf, "%d", &val);*/
3909 ret = kstrtoint(buf, 0, &val);
3910
3911 if (ret != 0)
3912 return -EINVAL;
3913 dump_mode = val & 0x3;
3914 pr_info("set dump mode to %d,%c_mem\n",
3915 dump_mode, dump_mode_str[dump_mode]);
3916 return size;
3917}
3918static u32 read_amrisc_reg(int reg)
3919{
3920 WRITE_VREG(0x31b, reg);
3921 return READ_VREG(0x31c);
3922}
3923
3924static void dump_pmem(void)
3925{
3926 int i;
3927
3928 WRITE_VREG(0x301, 0x8000);
3929 WRITE_VREG(0x31d, 0);
3930 pr_info("start dump amrisc pmem of risc\n");
3931 for (i = 0; i < 0xfff; i++) {
3932 /*same as .o format*/
3933 pr_info("%08x // 0x%04x:\n", read_amrisc_reg(i), i);
3934 }
3935}
3936
3937static void dump_lmem(void)
3938{
3939 int i;
3940
3941 WRITE_VREG(0x301, 0x8000);
3942 WRITE_VREG(0x31d, 2);
3943 pr_info("start dump amrisc lmem\n");
3944 for (i = 0; i < 0x3ff; i++) {
3945 /*same as */
3946 pr_info("[%04x] = 0x%08x:\n", i, read_amrisc_reg(i));
3947 }
3948}
3949
3950static ssize_t dump_risc_mem_show(struct class *class,
3951 struct class_attribute *attr, char *buf)
3952{
3953 char *pbuf = buf;
3954 int ret;
3955
63e810c0 3956 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
b9164398
NQ
3957 mutex_lock(&vdec_mutex);
3958 if (!vdec_on(VDEC_1)) {
3959 mutex_unlock(&vdec_mutex);
3960 pbuf += sprintf(pbuf, "amrisc is power off\n");
3961 ret = pbuf - buf;
3962 return ret;
3963 }
63e810c0 3964 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
b9164398
NQ
3965 /*TODO:M6 define */
3966 /*
3967 * switch_mod_gate_by_type(MOD_VDEC, 1);
3968 */
3969 amports_switch_gate("vdec", 1);
3970 }
3971 /*start do**/
3972 switch (dump_mode) {
3973 case 0:
3974 dump_pmem();
3975 break;
3976 case 2:
3977 dump_lmem();
3978 break;
3979 default:
3980 break;
3981 }
3982
3983 /*done*/
63e810c0 3984 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
b9164398 3985 mutex_unlock(&vdec_mutex);
63e810c0 3986 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
b9164398
NQ
3987 /*TODO:M6 define */
3988 /*
3989 * switch_mod_gate_by_type(MOD_VDEC, 0);
3990 */
3991 amports_switch_gate("vdec", 0);
3992 }
3993 return sprintf(buf, "done\n");
3994}
3995
3996static ssize_t core_show(struct class *class, struct class_attribute *attr,
3997 char *buf)
3998{
3999 struct vdec_core_s *core = vdec_core;
4000 char *pbuf = buf;
4001
4002 if (list_empty(&core->connected_vdec_list))
4003 pbuf += sprintf(pbuf, "connected vdec list empty\n");
4004 else {
4005 struct vdec_s *vdec;
4006
a6c89e96
NQ
4007 pbuf += sprintf(pbuf,
4008 " Core: last_sched %p, sched_mask %lx\n",
05afa03d 4009 core->last_vdec,
a6c89e96
NQ
4010 core->sched_mask);
4011
b9164398
NQ
4012 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4013 pbuf += sprintf(pbuf,
a6c89e96 4014 "\tvdec.%d (%p (%s)), status = %s,\ttype = %s, \tactive_mask = %lx\n",
fe96802b
NQ
4015 vdec->id,
4016 vdec,
4017 vdec_device_name[vdec->format * 2],
b9164398 4018 vdec_status_str(vdec),
a6c89e96
NQ
4019 vdec_type_str(vdec),
4020 vdec->active_mask);
b9164398
NQ
4021 }
4022 }
4023
4024 return pbuf - buf;
4025}
4026
fe96802b
NQ
4027static ssize_t vdec_status_show(struct class *class,
4028 struct class_attribute *attr, char *buf)
4029{
4030 char *pbuf = buf;
4031 struct vdec_s *vdec;
4032 struct vdec_info vs;
4033 unsigned char vdec_num = 0;
4034 struct vdec_core_s *core = vdec_core;
4035 unsigned long flags = vdec_core_lock(vdec_core);
4036
4037 if (list_empty(&core->connected_vdec_list)) {
4038 pbuf += sprintf(pbuf, "No vdec.\n");
4039 goto out;
4040 }
4041
4042 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
9b670a2d 4043 if ((vdec->status == VDEC_STATUS_CONNECTED
4044 || vdec->status == VDEC_STATUS_ACTIVE)) {
fe96802b
NQ
4045 memset(&vs, 0, sizeof(vs));
4046 if (vdec_status(vdec, &vs)) {
4047 pbuf += sprintf(pbuf, "err.\n");
4048 goto out;
4049 }
4050 pbuf += sprintf(pbuf,
4051 "vdec channel %u statistics:\n",
4052 vdec_num);
4053 pbuf += sprintf(pbuf,
4054 "%13s : %s\n", "device name",
4055 vs.vdec_name);
4056 pbuf += sprintf(pbuf,
4057 "%13s : %u\n", "frame width",
4058 vs.frame_width);
4059 pbuf += sprintf(pbuf,
4060 "%13s : %u\n", "frame height",
4061 vs.frame_height);
4062 pbuf += sprintf(pbuf,
4063 "%13s : %u %s\n", "frame rate",
4064 vs.frame_rate, "fps");
4065 pbuf += sprintf(pbuf,
4066 "%13s : %u %s\n", "bit rate",
4067 vs.bit_rate / 1024 * 8, "kbps");
4068 pbuf += sprintf(pbuf,
4069 "%13s : %u\n", "status",
4070 vs.status);
4071 pbuf += sprintf(pbuf,
4072 "%13s : %u\n", "frame dur",
4073 vs.frame_dur);
4074 pbuf += sprintf(pbuf,
4075 "%13s : %u %s\n", "frame data",
4076 vs.frame_data / 1024, "KB");
4077 pbuf += sprintf(pbuf,
4078 "%13s : %u\n", "frame count",
4079 vs.frame_count);
4080 pbuf += sprintf(pbuf,
4081 "%13s : %u\n", "drop count",
4082 vs.drop_frame_count);
4083 pbuf += sprintf(pbuf,
4084 "%13s : %u\n", "fra err count",
4085 vs.error_frame_count);
4086 pbuf += sprintf(pbuf,
4087 "%13s : %u\n", "hw err count",
4088 vs.error_count);
4089 pbuf += sprintf(pbuf,
4090 "%13s : %llu %s\n\n", "total data",
4091 vs.total_data / 1024, "KB");
4092
4093 vdec_num++;
4094 }
4095 }
4096out:
4097 vdec_core_unlock(vdec_core, flags);
4098 return pbuf - buf;
4099}
4100
4101static ssize_t dump_vdec_blocks_show(struct class *class,
4102 struct class_attribute *attr, char *buf)
4103{
4104 struct vdec_core_s *core = vdec_core;
4105 char *pbuf = buf;
4106
4107 if (list_empty(&core->connected_vdec_list))
4108 pbuf += sprintf(pbuf, "connected vdec list empty\n");
4109 else {
4110 struct vdec_s *vdec;
4111 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4112 pbuf += vdec_input_dump_blocks(&vdec->input,
4113 pbuf, PAGE_SIZE - (pbuf - buf));
4114 }
4115 }
4116
4117 return pbuf - buf;
4118}
4119static ssize_t dump_vdec_chunks_show(struct class *class,
4120 struct class_attribute *attr, char *buf)
4121{
4122 struct vdec_core_s *core = vdec_core;
4123 char *pbuf = buf;
4124
4125 if (list_empty(&core->connected_vdec_list))
4126 pbuf += sprintf(pbuf, "connected vdec list empty\n");
4127 else {
4128 struct vdec_s *vdec;
4129 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4130 pbuf += vdec_input_dump_chunks(&vdec->input,
4131 pbuf, PAGE_SIZE - (pbuf - buf));
4132 }
4133 }
4134
4135 return pbuf - buf;
4136}
4137
fe96802b
NQ
4138static ssize_t dump_decoder_state_show(struct class *class,
4139 struct class_attribute *attr, char *buf)
4140{
4141 char *pbuf = buf;
4142 struct vdec_s *vdec;
4143 struct vdec_core_s *core = vdec_core;
4144 unsigned long flags = vdec_core_lock(vdec_core);
4145
4146 if (list_empty(&core->connected_vdec_list)) {
4147 pbuf += sprintf(pbuf, "No vdec.\n");
4148 } else {
4149 list_for_each_entry(vdec,
4150 &core->connected_vdec_list, list) {
4151 if ((vdec->status == VDEC_STATUS_CONNECTED
4152 || vdec->status == VDEC_STATUS_ACTIVE)
4153 && vdec->dump_state)
4154 vdec->dump_state(vdec);
4155 }
4156 }
4157 vdec_core_unlock(vdec_core, flags);
4158
4159 return pbuf - buf;
4160}
d481db31 4161
97fe3d16
PY
4162static ssize_t dump_fps_show(struct class *class,
4163 struct class_attribute *attr, char *buf)
4164{
4165 char *pbuf = buf;
4166 struct vdec_core_s *core = vdec_core;
4167 int i;
4168
4169 unsigned long flags = vdec_fps_lock(vdec_core);
4170 for (i = 0; i < MAX_INSTANCE_MUN; i++)
4171 pbuf += sprintf(pbuf, "%d ", core->decode_fps[i].fps);
4172
4173 pbuf += sprintf(pbuf, "\n");
4174 vdec_fps_unlock(vdec_core, flags);
4175
4176 return pbuf - buf;
4177}
4178
d481db31 4179
fe96802b 4180
b9164398
NQ
4181static struct class_attribute vdec_class_attrs[] = {
4182 __ATTR_RO(amrisc_regs),
4183 __ATTR_RO(dump_trace),
4184 __ATTR_RO(clock_level),
4185 __ATTR(poweron_clock_level, S_IRUGO | S_IWUSR | S_IWGRP,
4186 show_poweron_clock_level, store_poweron_clock_level),
4187 __ATTR(dump_risc_mem, S_IRUGO | S_IWUSR | S_IWGRP,
4188 dump_risc_mem_show, dump_risc_mem_store),
4189 __ATTR(keep_vdec_mem, S_IRUGO | S_IWUSR | S_IWGRP,
4190 show_keep_vdec_mem, store_keep_vdec_mem),
4191 __ATTR_RO(core),
fe96802b
NQ
4192 __ATTR_RO(vdec_status),
4193 __ATTR_RO(dump_vdec_blocks),
4194 __ATTR_RO(dump_vdec_chunks),
d481db31 4195 __ATTR_RO(dump_decoder_state),
a6c89e96
NQ
4196#ifdef VDEC_DEBUG_SUPPORT
4197 __ATTR(debug, S_IRUGO | S_IWUSR | S_IWGRP,
4198 show_debug, store_debug),
8458676f 4199#endif
4200#ifdef FRAME_CHECK
4201 __ATTR(dump_yuv, S_IRUGO | S_IWUSR | S_IWGRP,
4202 dump_yuv_show, dump_yuv_store),
4203 __ATTR(frame_check, S_IRUGO | S_IWUSR | S_IWGRP,
4204 frame_check_show, frame_check_store),
a6c89e96 4205#endif
97fe3d16 4206 __ATTR_RO(dump_fps),
b9164398
NQ
4207 __ATTR_NULL
4208};
4209
4210static struct class vdec_class = {
4211 .name = "vdec",
4212 .class_attrs = vdec_class_attrs,
4213 };
4214
b9164398
NQ
4215struct device *get_vdec_device(void)
4216{
4217 return &vdec_core->vdec_core_platform_device->dev;
4218}
4219EXPORT_SYMBOL(get_vdec_device);
4220
4221static int vdec_probe(struct platform_device *pdev)
4222{
4223 s32 i, r;
4224
4225 vdec_core = (struct vdec_core_s *)devm_kzalloc(&pdev->dev,
4226 sizeof(struct vdec_core_s), GFP_KERNEL);
4227 if (vdec_core == NULL) {
4228 pr_err("vdec core allocation failed.\n");
4229 return -ENOMEM;
4230 }
4231
4232 atomic_set(&vdec_core->vdec_nr, 0);
4233 sema_init(&vdec_core->sem, 1);
4234
4235 r = class_register(&vdec_class);
4236 if (r) {
4237 pr_info("vdec class create fail.\n");
4238 return r;
4239 }
4240
4241 vdec_core->vdec_core_platform_device = pdev;
4242
4243 platform_set_drvdata(pdev, vdec_core);
4244
4245 for (i = 0; i < VDEC_IRQ_MAX; i++) {
4246 vdec_core->isr_context[i].index = i;
4247 vdec_core->isr_context[i].irq = -1;
4248 }
4249
a6c89e96
NQ
4250 r = vdec_request_threaded_irq(VDEC_IRQ_0, NULL, NULL,
4251 IRQF_ONESHOT, "vdec-0", NULL);
4252 if (r < 0) {
4253 pr_err("vdec interrupt request failed\n");
4254 return r;
4255 }
4256
b9164398
NQ
4257 r = vdec_request_threaded_irq(VDEC_IRQ_1, NULL, NULL,
4258 IRQF_ONESHOT, "vdec-1", NULL);
4259 if (r < 0) {
4260 pr_err("vdec interrupt request failed\n");
4261 return r;
4262 }
a6c89e96 4263#if 0
df841122 4264 if (get_cpu_major_id() >= MESON_CPU_MAJOR_ID_G12A) {
a6c89e96
NQ
4265 r = vdec_request_threaded_irq(VDEC_IRQ_HEVC_BACK, NULL, NULL,
4266 IRQF_ONESHOT, "vdec-hevc_back", NULL);
4267 if (r < 0) {
4268 pr_err("vdec interrupt request failed\n");
4269 return r;
4270 }
4271 }
4272#endif
b9164398
NQ
4273 r = of_reserved_mem_device_init(&pdev->dev);
4274 if (r == 0)
4275 pr_info("vdec_probe done\n");
4276
4277 vdec_core->cma_dev = &pdev->dev;
4278
63e810c0 4279 if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_M8) {
b9164398
NQ
4280 /* default to 250MHz */
4281 vdec_clock_hi_enable();
4282 }
4283
63e810c0 4284 if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXBB) {
b9164398
NQ
4285 /* set vdec dmc request to urgent */
4286 WRITE_DMCREG(DMC_AM5_CHAN_CTRL, 0x3f203cf);
4287 }
b9164398
NQ
4288 INIT_LIST_HEAD(&vdec_core->connected_vdec_list);
4289 spin_lock_init(&vdec_core->lock);
05afa03d 4290 spin_lock_init(&vdec_core->canvas_lock);
97fe3d16 4291 spin_lock_init(&vdec_core->fps_lock);
fe96802b 4292 ida_init(&vdec_core->ida);
b9164398
NQ
4293 vdec_core->thread = kthread_run(vdec_core_thread, vdec_core,
4294 "vdec-core");
4295
158de7c4
HZ
4296 vdec_core->vdec_core_wq = alloc_ordered_workqueue("%s",__WQ_LEGACY |
4297 WQ_MEM_RECLAIM |WQ_HIGHPRI/*high priority*/, "vdec-work");
4298 /*work queue priority lower than vdec-core.*/
b9164398
NQ
4299 return 0;
4300}
4301
4302static int vdec_remove(struct platform_device *pdev)
4303{
4304 int i;
4305
4306 for (i = 0; i < VDEC_IRQ_MAX; i++) {
4307 if (vdec_core->isr_context[i].irq >= 0) {
4308 free_irq(vdec_core->isr_context[i].irq,
4309 &vdec_core->isr_context[i]);
4310 vdec_core->isr_context[i].irq = -1;
4311 vdec_core->isr_context[i].dev_isr = NULL;
4312 vdec_core->isr_context[i].dev_threaded_isr = NULL;
4313 vdec_core->isr_context[i].dev_id = NULL;
4314 }
4315 }
4316
4317 kthread_stop(vdec_core->thread);
4318
fe96802b 4319 destroy_workqueue(vdec_core->vdec_core_wq);
b9164398
NQ
4320 class_unregister(&vdec_class);
4321
4322 return 0;
4323}
4324
4325static const struct of_device_id amlogic_vdec_dt_match[] = {
4326 {
4327 .compatible = "amlogic, vdec",
4328 },
4329 {},
4330};
4331
fe96802b 4332static struct mconfig vdec_configs[] = {
fe96802b
NQ
4333 MC_PU32("debug_trace_num", &debug_trace_num),
4334 MC_PI32("hevc_max_reset_count", &hevc_max_reset_count),
4335 MC_PU32("clk_config", &clk_config),
4336 MC_PI32("step_mode", &step_mode),
4337 MC_PI32("poweron_clock_level", &poweron_clock_level),
4338};
4339static struct mconfig_node vdec_node;
4340
b9164398
NQ
4341static struct platform_driver vdec_driver = {
4342 .probe = vdec_probe,
4343 .remove = vdec_remove,
4344 .driver = {
4345 .name = "vdec",
4346 .of_match_table = amlogic_vdec_dt_match,
4347 }
4348};
4349
f811c57d
TG
4350static struct codec_profile_t amvdec_input_profile = {
4351 .name = "vdec_input",
4352 .profile = "drm_framemode"
4353};
4354
b9164398
NQ
4355int vdec_module_init(void)
4356{
4357 if (platform_driver_register(&vdec_driver)) {
4358 pr_info("failed to register vdec module\n");
4359 return -ENODEV;
4360 }
fe96802b
NQ
4361 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
4362 "vdec", vdec_configs, CONFIG_FOR_RW);
f811c57d 4363 vcodec_profile_register(&amvdec_input_profile);
b9164398
NQ
4364 return 0;
4365}
4366EXPORT_SYMBOL(vdec_module_init);
4367
4368void vdec_module_exit(void)
4369{
4370 platform_driver_unregister(&vdec_driver);
4371}
4372EXPORT_SYMBOL(vdec_module_exit);
4373
4374#if 0
4375static int __init vdec_module_init(void)
4376{
4377 if (platform_driver_register(&vdec_driver)) {
4378 pr_info("failed to register vdec module\n");
4379 return -ENODEV;
4380 }
fe96802b
NQ
4381 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
4382 "vdec", vdec_configs, CONFIG_FOR_RW);
b9164398
NQ
4383 return 0;
4384}
4385
4386static void __exit vdec_module_exit(void)
4387{
4388 platform_driver_unregister(&vdec_driver);
4389}
4390#endif
4391
4392static int vdec_mem_device_init(struct reserved_mem *rmem, struct device *dev)
4393{
b9164398
NQ
4394 vdec_core->cma_dev = dev;
4395
4396 return 0;
4397}
4398
4399static const struct reserved_mem_ops rmem_vdec_ops = {
4400 .device_init = vdec_mem_device_init,
4401};
4402
4403static int __init vdec_mem_setup(struct reserved_mem *rmem)
4404{
4405 rmem->ops = &rmem_vdec_ops;
4406 pr_info("vdec: reserved mem setup\n");
4407
4408 return 0;
4409}
4410
4411RESERVEDMEM_OF_DECLARE(vdec, "amlogic, vdec-memory", vdec_mem_setup);
a6c89e96
NQ
4412/*
4413uint force_hevc_clock_cntl;
4414EXPORT_SYMBOL(force_hevc_clock_cntl);
4415
4416module_param(force_hevc_clock_cntl, uint, 0664);
4417*/
158de7c4 4418module_param(debug, uint, 0664);
b9164398
NQ
4419module_param(debug_trace_num, uint, 0664);
4420module_param(hevc_max_reset_count, int, 0664);
4421module_param(clk_config, uint, 0664);
4422module_param(step_mode, int, 0664);
a6c89e96 4423module_param(debugflags, int, 0664);
05afa03d 4424module_param(parallel_decode, int, 0664);
97fe3d16
PY
4425module_param(fps_detection, int, 0664);
4426module_param(fps_clear, int, 0664);
fe96802b 4427
b9164398
NQ
4428/*
4429*module_init(vdec_module_init);
4430*module_exit(vdec_module_exit);
4431*/
fe96802b
NQ
4432#define CREATE_TRACE_POINTS
4433#include "vdec_trace.h"
b9164398
NQ
4434MODULE_DESCRIPTION("AMLOGIC vdec driver");
4435MODULE_LICENSE("GPL");
4436MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");