decoder: checked init state before reads the vdec status. [1/1]
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_media.git] / drivers / frame_provider / decoder / utils / vdec.c
CommitLineData
b9164398
NQ
1/*
2 * drivers/amlogic/media/frame_provider/decoder/utils/vdec.c
3 *
4 * Copyright (C) 2016 Amlogic, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
e0614bf7 16 */
5b851ff9 17#define DEBUG
b9164398
NQ
18#include <linux/kernel.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/kthread.h>
24#include <linux/platform_device.h>
25#include <linux/uaccess.h>
26#include <linux/semaphore.h>
27#include <linux/sched/rt.h>
28#include <linux/interrupt.h>
29#include <linux/amlogic/media/utils/vformat.h>
30#include <linux/amlogic/iomap.h>
31#include <linux/amlogic/media/canvas/canvas.h>
32#include <linux/amlogic/media/vfm/vframe.h>
33#include <linux/amlogic/media/vfm/vframe_provider.h>
34#include <linux/amlogic/media/vfm/vframe_receiver.h>
35#include <linux/amlogic/media/video_sink/ionvideo_ext.h>
36#include <linux/amlogic/media/vfm/vfm_ext.h>
a6c89e96
NQ
37/*for VDEC_DEBUG_SUPPORT*/
38#include <linux/time.h>
b9164398
NQ
39
40#include <linux/amlogic/media/utils/vdec_reg.h>
41#include "vdec.h"
fe96802b
NQ
42#include "vdec_trace.h"
43#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
44#include "vdec_profile.h"
45#endif
46#include <linux/of.h>
47#include <linux/of_fdt.h>
48#include <linux/libfdt_env.h>
49#include <linux/of_reserved_mem.h>
50#include <linux/dma-contiguous.h>
51#include <linux/cma.h>
52#include <linux/module.h>
53#include <linux/slab.h>
54#include <linux/dma-mapping.h>
55#include <linux/dma-contiguous.h>
56#include "../../../stream_input/amports/amports_priv.h"
57
58#include <linux/amlogic/media/utils/amports_config.h>
59#include "../utils/amvdec.h"
60#include "vdec_input.h"
61
62#include "../../../common/media_clock/clk/clk.h"
63#include <linux/reset.h>
fe96802b 64#include <linux/amlogic/cpu_version.h>
b9164398
NQ
65#include <linux/amlogic/media/codec_mm/codec_mm.h>
66#include <linux/amlogic/media/video_sink/video_keeper.h>
fe96802b
NQ
67#include <linux/amlogic/media/codec_mm/configs.h>
68#include <linux/amlogic/media/frame_sync/ptsserv.h>
69#include "secprot.h"
63e810c0 70#include "../../../common/chips/decoder_cpu_ver_info.h"
b9164398
NQ
71
72static DEFINE_MUTEX(vdec_mutex);
73
74#define MC_SIZE (4096 * 4)
75#define CMA_ALLOC_SIZE SZ_64M
76#define MEM_NAME "vdec_prealloc"
77static int inited_vcodec_num;
976f3376 78#define jiffies_ms div64_u64(get_jiffies_64() * 1000, HZ)
b9164398
NQ
79static int poweron_clock_level;
80static int keep_vdec_mem;
81static unsigned int debug_trace_num = 16 * 20;
82static int step_mode;
83static unsigned int clk_config;
158de7c4
HZ
84/*
85 &1: sched_priority to MAX_RT_PRIO -1.
86 &2: always reload firmware.
87 */
88static unsigned int debug;
8247f369 89
b9164398 90static int hevc_max_reset_count;
fe96802b
NQ
91#define MAX_INSTANCE_MUN 9
92
28e318df 93static int no_powerdown;
b9164398
NQ
94static DEFINE_SPINLOCK(vdec_spin_lock);
95
96#define HEVC_TEST_LIMIT 100
97#define GXBB_REV_A_MINOR 0xA
98
99struct am_reg {
100 char *name;
101 int offset;
102};
103
104struct vdec_isr_context_s {
105 int index;
106 int irq;
107 irq_handler_t dev_isr;
108 irq_handler_t dev_threaded_isr;
109 void *dev_id;
a6c89e96 110 struct vdec_s *vdec;
b9164398
NQ
111};
112
113struct vdec_core_s {
114 struct list_head connected_vdec_list;
115 spinlock_t lock;
fe96802b 116 struct ida ida;
b9164398
NQ
117 atomic_t vdec_nr;
118 struct vdec_s *vfm_vdec;
119 struct vdec_s *active_vdec;
fe96802b 120 struct vdec_s *hint_fr_vdec;
b9164398
NQ
121 struct platform_device *vdec_core_platform_device;
122 struct device *cma_dev;
b9164398
NQ
123 struct semaphore sem;
124 struct task_struct *thread;
fe96802b 125 struct workqueue_struct *vdec_core_wq;
b9164398 126
a6c89e96 127 unsigned long sched_mask;
b9164398
NQ
128 struct vdec_isr_context_s isr_context[VDEC_IRQ_MAX];
129 int power_ref_count[VDEC_MAX];
158de7c4 130 void *last_vdec;
b9164398
NQ
131};
132
133static struct vdec_core_s *vdec_core;
134
fe96802b
NQ
135static const char * const vdec_status_string[] = {
136 "VDEC_STATUS_UNINITIALIZED",
137 "VDEC_STATUS_DISCONNECTED",
138 "VDEC_STATUS_CONNECTED",
139 "VDEC_STATUS_ACTIVE"
140};
141
142static int debugflags;
143
144int vdec_get_debug_flags(void)
145{
146 return debugflags;
147}
148EXPORT_SYMBOL(vdec_get_debug_flags);
149
150unsigned char is_mult_inc(unsigned int type)
151{
152 unsigned char ret = 0;
153 if (vdec_get_debug_flags() & 0xf000)
154 ret = (vdec_get_debug_flags() & 0x1000)
155 ? 1 : 0;
156 else if (type & PORT_TYPE_DECODER_SCHED)
157 ret = 1;
158 return ret;
159}
160EXPORT_SYMBOL(is_mult_inc);
161
a6c89e96
NQ
162static const bool cores_with_input[VDEC_MAX] = {
163 true, /* VDEC_1 */
164 false, /* VDEC_HCODEC */
165 false, /* VDEC_2 */
166 true, /* VDEC_HEVC / VDEC_HEVC_FRONT */
167 false, /* VDEC_HEVC_BACK */
168};
169
170static const int cores_int[VDEC_MAX] = {
171 VDEC_IRQ_1,
172 VDEC_IRQ_2,
173 VDEC_IRQ_0,
174 VDEC_IRQ_0,
175 VDEC_IRQ_HEVC_BACK
176};
177
b9164398
NQ
178unsigned long vdec_core_lock(struct vdec_core_s *core)
179{
180 unsigned long flags;
181
182 spin_lock_irqsave(&core->lock, flags);
183
184 return flags;
185}
186
187void vdec_core_unlock(struct vdec_core_s *core, unsigned long flags)
188{
189 spin_unlock_irqrestore(&core->lock, flags);
190}
191
192static int get_canvas(unsigned int index, unsigned int base)
193{
194 int start;
195 int canvas_index = index * base;
a35da9f0 196 int ret;
b9164398
NQ
197
198 if ((base > 4) || (base == 0))
199 return -1;
200
201 if ((AMVDEC_CANVAS_START_INDEX + canvas_index + base - 1)
202 <= AMVDEC_CANVAS_MAX1) {
203 start = AMVDEC_CANVAS_START_INDEX + base * index;
204 } else {
205 canvas_index -= (AMVDEC_CANVAS_MAX1 -
206 AMVDEC_CANVAS_START_INDEX + 1) / base * base;
207 if (canvas_index <= AMVDEC_CANVAS_MAX2)
208 start = canvas_index / base;
209 else
210 return -1;
211 }
212
213 if (base == 1) {
a35da9f0 214 ret = start;
b9164398 215 } else if (base == 2) {
a35da9f0 216 ret = ((start + 1) << 16) | ((start + 1) << 8) | start;
b9164398 217 } else if (base == 3) {
a35da9f0 218 ret = ((start + 2) << 16) | ((start + 1) << 8) | start;
b9164398 219 } else if (base == 4) {
a35da9f0 220 ret = (((start + 3) << 24) | (start + 2) << 16) |
b9164398
NQ
221 ((start + 1) << 8) | start;
222 }
223
a35da9f0 224 return ret;
b9164398
NQ
225}
226
227
fe96802b 228int vdec_status(struct vdec_s *vdec, struct vdec_info *vstatus)
b9164398 229{
976f3376 230 if (vdec && vdec->dec_status)
b9164398
NQ
231 return vdec->dec_status(vdec, vstatus);
232
233 return -1;
234}
235EXPORT_SYMBOL(vdec_status);
236
237int vdec_set_trickmode(struct vdec_s *vdec, unsigned long trickmode)
238{
239 int r;
240
241 if (vdec->set_trickmode) {
242 r = vdec->set_trickmode(vdec, trickmode);
243
244 if ((r == 0) && (vdec->slave) && (vdec->slave->set_trickmode))
245 r = vdec->slave->set_trickmode(vdec->slave,
246 trickmode);
976f3376 247 return r;
b9164398
NQ
248 }
249
250 return -1;
251}
252EXPORT_SYMBOL(vdec_set_trickmode);
253
d481db31
NQ
254int vdec_set_isreset(struct vdec_s *vdec, int isreset)
255{
256 vdec->is_reset = isreset;
257 pr_info("is_reset=%d\n", isreset);
258 if (vdec->set_isreset)
259 return vdec->set_isreset(vdec, isreset);
260 return 0;
261}
262EXPORT_SYMBOL(vdec_set_isreset);
263
28e318df
NQ
264int vdec_set_dv_metawithel(struct vdec_s *vdec, int isdvmetawithel)
265{
266 vdec->dolby_meta_with_el = isdvmetawithel;
267 pr_info("isdvmetawithel=%d\n", isdvmetawithel);
268 return 0;
269}
c23e8aee 270EXPORT_SYMBOL(vdec_set_dv_metawithel);
28e318df
NQ
271
272void vdec_set_no_powerdown(int flag)
273{
274 no_powerdown = flag;
275 pr_info("no_powerdown=%d\n", no_powerdown);
276 return;
277}
c23e8aee 278EXPORT_SYMBOL(vdec_set_no_powerdown);
28e318df 279
fe96802b
NQ
280void vdec_count_info(struct vdec_info *vs, unsigned int err,
281 unsigned int offset)
282{
283 if (err)
284 vs->error_frame_count++;
285 if (offset) {
286 if (0 == vs->frame_count) {
287 vs->offset = 0;
288 vs->samp_cnt = 0;
289 }
290 vs->frame_data = offset > vs->total_data ?
291 offset - vs->total_data : vs->total_data - offset;
292 vs->total_data = offset;
293 if (vs->samp_cnt < 96000 * 2) { /* 2s */
294 if (0 == vs->samp_cnt)
295 vs->offset = offset;
296 vs->samp_cnt += vs->frame_dur;
297 } else {
298 vs->bit_rate = (offset - vs->offset) / 2;
299 /*pr_info("bitrate : %u\n",vs->bit_rate);*/
300 vs->samp_cnt = 0;
301 }
302 vs->frame_count++;
303 }
304 /*pr_info("size : %u, offset : %u, dur : %u, cnt : %u\n",
305 vs->offset,offset,vs->frame_dur,vs->samp_cnt);*/
306 return;
307}
308EXPORT_SYMBOL(vdec_count_info);
c23e8aee
HZ
309int vdec_is_support_4k(void)
310{
a8d5afab 311 return !is_meson_gxl_package_805X();
c23e8aee
HZ
312}
313EXPORT_SYMBOL(vdec_is_support_4k);
fe96802b 314
b9164398 315/*
e0614bf7 316 * clk_config:
b9164398
NQ
317 *0:default
318 *1:no gp0_pll;
319 *2:always used gp0_pll;
320 *>=10:fixed n M clk;
321 *== 100 , 100M clks;
e0614bf7 322 */
b9164398
NQ
323unsigned int get_vdec_clk_config_settings(void)
324{
325 return clk_config;
326}
327void update_vdec_clk_config_settings(unsigned int config)
328{
329 clk_config = config;
330}
331EXPORT_SYMBOL(update_vdec_clk_config_settings);
332
333static bool hevc_workaround_needed(void)
334{
63e810c0 335 return (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXBB) &&
b9164398
NQ
336 (get_meson_cpu_version(MESON_CPU_VERSION_LVL_MINOR)
337 == GXBB_REV_A_MINOR);
338}
339
340struct device *get_codec_cma_device(void)
341{
342 return vdec_core->cma_dev;
343}
344
fe96802b 345#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
346static const char * const vdec_device_name[] = {
347 "amvdec_mpeg12", "ammvdec_mpeg12",
348 "amvdec_mpeg4", "ammvdec_mpeg4",
349 "amvdec_h264", "ammvdec_h264",
350 "amvdec_mjpeg", "ammvdec_mjpeg",
351 "amvdec_real", "ammvdec_real",
352 "amjpegdec", "ammjpegdec",
353 "amvdec_vc1", "ammvdec_vc1",
354 "amvdec_avs", "ammvdec_avs",
355 "amvdec_yuv", "ammvdec_yuv",
356 "amvdec_h264mvc", "ammvdec_h264mvc",
357 "amvdec_h264_4k2k", "ammvdec_h264_4k2k",
358 "amvdec_h265", "ammvdec_h265",
359 "amvenc_avc", "amvenc_avc",
360 "jpegenc", "jpegenc",
a6c89e96
NQ
361 "amvdec_vp9", "ammvdec_vp9",
362 "amvdec_avs2", "ammvdec_avs2"
b9164398
NQ
363};
364
b9164398
NQ
365
366#else
367
368static const char * const vdec_device_name[] = {
369 "amvdec_mpeg12",
370 "amvdec_mpeg4",
371 "amvdec_h264",
372 "amvdec_mjpeg",
373 "amvdec_real",
374 "amjpegdec",
375 "amvdec_vc1",
376 "amvdec_avs",
377 "amvdec_yuv",
378 "amvdec_h264mvc",
379 "amvdec_h264_4k2k",
380 "amvdec_h265",
381 "amvenc_avc",
382 "jpegenc",
a6c89e96
NQ
383 "amvdec_vp9",
384 "amvdec_avs2"
b9164398
NQ
385};
386
b9164398
NQ
387#endif
388
a6c89e96
NQ
389#ifdef VDEC_DEBUG_SUPPORT
390static u64 get_current_clk(void)
391{
392 /*struct timespec xtime = current_kernel_time();
393 u64 usec = xtime.tv_sec * 1000000;
394 usec += xtime.tv_nsec / 1000;
395 */
396 u64 usec = sched_clock();
397 return usec;
398}
399
400static void inc_profi_count(unsigned long mask, u32 *count)
401{
402 enum vdec_type_e type;
403
404 for (type = VDEC_1; type < VDEC_MAX; type++) {
405 if (mask & (1 << type))
406 count[type]++;
407 }
408}
409
410static void update_profi_clk_run(struct vdec_s *vdec,
411 unsigned long mask, u64 clk)
412{
413 enum vdec_type_e type;
414
415 for (type = VDEC_1; type < VDEC_MAX; type++) {
416 if (mask & (1 << type)) {
417 vdec->start_run_clk[type] = clk;
418 if (vdec->profile_start_clk[type] == 0)
419 vdec->profile_start_clk[type] = clk;
420 vdec->total_clk[type] = clk
421 - vdec->profile_start_clk[type];
422 /*pr_info("set start_run_clk %ld\n",
423 vdec->start_run_clk);*/
424
425 }
426 }
427}
428
429static void update_profi_clk_stop(struct vdec_s *vdec,
430 unsigned long mask, u64 clk)
431{
432 enum vdec_type_e type;
433
434 for (type = VDEC_1; type < VDEC_MAX; type++) {
435 if (mask & (1 << type)) {
436 if (vdec->start_run_clk[type] == 0)
437 pr_info("error, start_run_clk[%d] not set\n", type);
438
439 /*pr_info("update run_clk type %d, %ld, %ld, %ld\n",
440 type,
441 clk,
442 vdec->start_run_clk[type],
443 vdec->run_clk[type]);*/
444 vdec->run_clk[type] +=
445 (clk - vdec->start_run_clk[type]);
446 }
447 }
448}
449
450#endif
451
b9164398
NQ
452int vdec_set_decinfo(struct vdec_s *vdec, struct dec_sysinfo *p)
453{
454 if (copy_from_user((void *)&vdec->sys_info_store, (void *)p,
455 sizeof(struct dec_sysinfo)))
456 return -EFAULT;
457
458 return 0;
459}
460EXPORT_SYMBOL(vdec_set_decinfo);
461
462/* construct vdec strcture */
463struct vdec_s *vdec_create(struct stream_port_s *port,
464 struct vdec_s *master)
465{
466 struct vdec_s *vdec;
467 int type = VDEC_TYPE_SINGLE;
fe96802b
NQ
468 int id;
469 if (is_mult_inc(port->type))
b9164398
NQ
470 type = (port->type & PORT_TYPE_FRAME) ?
471 VDEC_TYPE_FRAME_BLOCK :
472 VDEC_TYPE_STREAM_PARSER;
473
fe96802b
NQ
474 id = ida_simple_get(&vdec_core->ida,
475 0, MAX_INSTANCE_MUN, GFP_KERNEL);
476 if (id < 0) {
477 pr_info("vdec_create request id failed!ret =%d\n", id);
478 return NULL;
479 }
b9164398
NQ
480 vdec = vzalloc(sizeof(struct vdec_s));
481
482 /* TBD */
483 if (vdec) {
484 vdec->magic = 0x43454456;
fe96802b 485 vdec->id = -1;
b9164398
NQ
486 vdec->type = type;
487 vdec->port = port;
488 vdec->sys_info = &vdec->sys_info_store;
489
490 INIT_LIST_HEAD(&vdec->list);
491
b9164398 492 atomic_inc(&vdec_core->vdec_nr);
fe96802b
NQ
493 vdec->id = id;
494 vdec_input_init(&vdec->input, vdec);
b9164398
NQ
495 if (master) {
496 vdec->master = master;
497 master->slave = vdec;
498 master->sched = 1;
499 }
500 }
501
5b851ff9 502 pr_debug("vdec_create instance %p, total %d\n", vdec,
b9164398
NQ
503 atomic_read(&vdec_core->vdec_nr));
504
fe96802b
NQ
505 //trace_vdec_create(vdec); /*DEBUG_TMP*/
506
b9164398
NQ
507 return vdec;
508}
509EXPORT_SYMBOL(vdec_create);
510
511int vdec_set_format(struct vdec_s *vdec, int format)
512{
513 vdec->format = format;
fe96802b 514 vdec->port_flag |= PORT_FLAG_VFORMAT;
b9164398 515
fe96802b 516 if (vdec->slave) {
b9164398 517 vdec->slave->format = format;
fe96802b
NQ
518 vdec->slave->port_flag |= PORT_FLAG_VFORMAT;
519 }
520
521 //trace_vdec_set_format(vdec, format);/*DEBUG_TMP*/
b9164398
NQ
522
523 return 0;
524}
525EXPORT_SYMBOL(vdec_set_format);
526
527int vdec_set_pts(struct vdec_s *vdec, u32 pts)
528{
529 vdec->pts = pts;
fe96802b 530 vdec->pts64 = div64_u64((u64)pts * 100, 9);
b9164398 531 vdec->pts_valid = true;
fe96802b 532 //trace_vdec_set_pts(vdec, (u64)pts);/*DEBUG_TMP*/
b9164398
NQ
533 return 0;
534}
535EXPORT_SYMBOL(vdec_set_pts);
536
6b7ee58f
NQ
537void vdec_set_timestamp(struct vdec_s *vdec, u64 timestamp)
538{
539 vdec->timestamp = timestamp;
540 vdec->timestamp_valid = true;
541}
542EXPORT_SYMBOL(vdec_set_timestamp);
543
b9164398
NQ
544int vdec_set_pts64(struct vdec_s *vdec, u64 pts64)
545{
546 vdec->pts64 = pts64;
fe96802b 547 vdec->pts = (u32)div64_u64(pts64 * 9, 100);
b9164398 548 vdec->pts_valid = true;
fe96802b
NQ
549
550 //trace_vdec_set_pts64(vdec, pts64);/*DEBUG_TMP*/
b9164398
NQ
551 return 0;
552}
553EXPORT_SYMBOL(vdec_set_pts64);
554
6b7ee58f
NQ
555int vdec_get_status(struct vdec_s *vdec)
556{
557 return vdec->status;
558}
559EXPORT_SYMBOL(vdec_get_status);
560
b9164398
NQ
561void vdec_set_status(struct vdec_s *vdec, int status)
562{
fe96802b 563 //trace_vdec_set_status(vdec, status);/*DEBUG_TMP*/
b9164398
NQ
564 vdec->status = status;
565}
566EXPORT_SYMBOL(vdec_set_status);
567
568void vdec_set_next_status(struct vdec_s *vdec, int status)
569{
fe96802b 570 //trace_vdec_set_next_status(vdec, status);/*DEBUG_TMP*/
b9164398
NQ
571 vdec->next_status = status;
572}
573EXPORT_SYMBOL(vdec_set_next_status);
574
575int vdec_set_video_path(struct vdec_s *vdec, int video_path)
576{
577 vdec->frame_base_video_path = video_path;
578 return 0;
579}
580EXPORT_SYMBOL(vdec_set_video_path);
581
fe96802b
NQ
582int vdec_set_receive_id(struct vdec_s *vdec, int receive_id)
583{
584 vdec->vf_receiver_inst = receive_id;
585 return 0;
586}
587EXPORT_SYMBOL(vdec_set_receive_id);
588
b9164398
NQ
589/* add frame data to input chain */
590int vdec_write_vframe(struct vdec_s *vdec, const char *buf, size_t count)
591{
592 return vdec_input_add_frame(&vdec->input, buf, count);
593}
594EXPORT_SYMBOL(vdec_write_vframe);
595
fe96802b
NQ
596/* add a work queue thread for vdec*/
597void vdec_schedule_work(struct work_struct *work)
598{
599 if (vdec_core->vdec_core_wq)
600 queue_work(vdec_core->vdec_core_wq, work);
601 else
602 schedule_work(work);
603}
604EXPORT_SYMBOL(vdec_schedule_work);
605
606static struct vdec_s *vdec_get_associate(struct vdec_s *vdec)
607{
608 if (vdec->master)
609 return vdec->master;
610 else if (vdec->slave)
611 return vdec->slave;
612 return NULL;
613}
614
615static void vdec_sync_input_read(struct vdec_s *vdec)
616{
617 if (!vdec_stream_based(vdec))
618 return;
619
620 if (vdec_dual(vdec)) {
621 u32 me, other;
622 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
623 me = READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
624 other =
625 vdec_get_associate(vdec)->input.stream_cookie;
626 if (me > other)
627 return;
628 else if (me == other) {
629 me = READ_VREG(VLD_MEM_VIFIFO_RP);
630 other =
631 vdec_get_associate(vdec)->input.swap_rp;
632 if (me > other) {
633 WRITE_PARSER_REG(PARSER_VIDEO_RP,
634 vdec_get_associate(vdec)->
635 input.swap_rp);
636 return;
637 }
638 }
639 WRITE_PARSER_REG(PARSER_VIDEO_RP,
640 READ_VREG(VLD_MEM_VIFIFO_RP));
641 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
642 me = READ_VREG(HEVC_SHIFT_BYTE_COUNT);
643 if (((me & 0x80000000) == 0) &&
644 (vdec->input.streaming_rp & 0x80000000))
645 me += 1ULL << 32;
646 other = vdec_get_associate(vdec)->input.streaming_rp;
647 if (me > other) {
648 WRITE_PARSER_REG(PARSER_VIDEO_RP,
649 vdec_get_associate(vdec)->
650 input.swap_rp);
651 return;
652 }
653
654 WRITE_PARSER_REG(PARSER_VIDEO_RP,
655 READ_VREG(HEVC_STREAM_RD_PTR));
656 }
657 } else if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
658 WRITE_PARSER_REG(PARSER_VIDEO_RP,
659 READ_VREG(VLD_MEM_VIFIFO_RP));
660 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
661 WRITE_PARSER_REG(PARSER_VIDEO_RP,
662 READ_VREG(HEVC_STREAM_RD_PTR));
663 }
664}
665
666static void vdec_sync_input_write(struct vdec_s *vdec)
667{
668 if (!vdec_stream_based(vdec))
669 return;
670
671 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
672 WRITE_VREG(VLD_MEM_VIFIFO_WP,
673 READ_PARSER_REG(PARSER_VIDEO_WP));
674 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
675 WRITE_VREG(HEVC_STREAM_WR_PTR,
676 READ_PARSER_REG(PARSER_VIDEO_WP));
677 }
678}
679
b9164398 680/*
e0614bf7
ZZ
681 *get next frame from input chain
682 */
b9164398 683/*
e0614bf7 684 *THE VLD_FIFO is 512 bytes and Video buffer level
b9164398
NQ
685 * empty interrupt is set to 0x80 bytes threshold
686 */
687#define VLD_PADDING_SIZE 1024
688#define HEVC_PADDING_SIZE (1024*16)
b9164398
NQ
689int vdec_prepare_input(struct vdec_s *vdec, struct vframe_chunk_s **p)
690{
fe96802b 691 struct vdec_input_s *input = &vdec->input;
b9164398
NQ
692 struct vframe_chunk_s *chunk = NULL;
693 struct vframe_block_list_s *block = NULL;
694 int dummy;
695
696 /* full reset to HW input */
697 if (input->target == VDEC_INPUT_TARGET_VLD) {
698 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
699
700 /* reset VLD fifo for all vdec */
701 WRITE_VREG(DOS_SW_RESET0, (1<<5) | (1<<4) | (1<<3));
702 WRITE_VREG(DOS_SW_RESET0, 0);
703
fe96802b 704 dummy = READ_RESET_REG(RESET0_REGISTER);
b9164398
NQ
705 WRITE_VREG(POWER_CTL_VLD, 1 << 4);
706 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
707#if 0
708 /*move to driver*/
709 if (input_frame_based(input))
710 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
711
712 /*
713 * 2: assist
714 * 3: parser
715 * 4: parser_state
716 * 8: dblk
717 * 11:mcpu
718 * 12:ccpu
719 * 13:ddr
720 * 14:iqit
721 * 15:ipp
722 * 17:qdct
723 * 18:mpred
724 * 19:sao
725 * 24:hevc_afifo
726 */
727 WRITE_VREG(DOS_SW_RESET3,
728 (1<<3)|(1<<4)|(1<<8)|(1<<11)|(1<<12)|(1<<14)|(1<<15)|
729 (1<<17)|(1<<18)|(1<<19));
730 WRITE_VREG(DOS_SW_RESET3, 0);
731#endif
732 }
733
734 /*
e0614bf7 735 *setup HW decoder input buffer (VLD context)
b9164398
NQ
736 * based on input->type and input->target
737 */
738 if (input_frame_based(input)) {
739 chunk = vdec_input_next_chunk(&vdec->input);
740
741 if (chunk == NULL) {
742 *p = NULL;
743 return -1;
744 }
745
746 block = chunk->block;
747
748 if (input->target == VDEC_INPUT_TARGET_VLD) {
749 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR, block->start);
750 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR, block->start +
751 block->size - 8);
752 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
753 round_down(block->start + chunk->offset,
fe96802b 754 VDEC_FIFO_ALIGN));
b9164398
NQ
755
756 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
757 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
758
759 /* set to manual mode */
760 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
761 WRITE_VREG(VLD_MEM_VIFIFO_RP,
762 round_down(block->start + chunk->offset,
fe96802b 763 VDEC_FIFO_ALIGN));
b9164398
NQ
764 dummy = chunk->offset + chunk->size +
765 VLD_PADDING_SIZE;
766 if (dummy >= block->size)
767 dummy -= block->size;
768 WRITE_VREG(VLD_MEM_VIFIFO_WP,
fe96802b
NQ
769 round_down(block->start + dummy,
770 VDEC_FIFO_ALIGN));
b9164398
NQ
771
772 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 3);
773 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
774
775 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
776 (0x11 << 16) | (1<<10) | (7<<3));
777
778 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
779 WRITE_VREG(HEVC_STREAM_START_ADDR, block->start);
780 WRITE_VREG(HEVC_STREAM_END_ADDR, block->start +
781 block->size);
782 WRITE_VREG(HEVC_STREAM_RD_PTR, block->start +
783 chunk->offset);
784 dummy = chunk->offset + chunk->size +
785 HEVC_PADDING_SIZE;
786 if (dummy >= block->size)
787 dummy -= block->size;
788 WRITE_VREG(HEVC_STREAM_WR_PTR,
fe96802b
NQ
789 round_down(block->start + dummy,
790 VDEC_FIFO_ALIGN));
b9164398
NQ
791
792 /* set endian */
793 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
794 }
795
796 *p = chunk;
797 return chunk->size;
798
799 } else {
fe96802b 800 /* stream based */
b9164398
NQ
801 u32 rp = 0, wp = 0, fifo_len = 0;
802 int size;
fe96802b
NQ
803 bool swap_valid = input->swap_valid;
804 unsigned long swap_page_phys = input->swap_page_phys;
805
806 if (vdec_dual(vdec) &&
807 ((vdec->flag & VDEC_FLAG_SELF_INPUT_CONTEXT) == 0)) {
808 /* keep using previous input context */
809 struct vdec_s *master = (vdec->slave) ?
810 vdec : vdec->master;
811 if (master->input.last_swap_slave) {
812 swap_valid = master->slave->input.swap_valid;
813 swap_page_phys =
814 master->slave->input.swap_page_phys;
815 } else {
816 swap_valid = master->input.swap_valid;
817 swap_page_phys = master->input.swap_page_phys;
818 }
819 }
820
821 if (swap_valid) {
b9164398 822 if (input->target == VDEC_INPUT_TARGET_VLD) {
fe96802b
NQ
823 if (vdec->format == VFORMAT_H264)
824 SET_VREG_MASK(POWER_CTL_VLD,
825 (1 << 9));
826
b9164398
NQ
827 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
828
829 /* restore read side */
830 WRITE_VREG(VLD_MEM_SWAP_ADDR,
fe96802b 831 swap_page_phys);
b9164398
NQ
832 WRITE_VREG(VLD_MEM_SWAP_CTL, 1);
833
834 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
835 ;
836 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
837
838 /* restore wrap count */
839 WRITE_VREG(VLD_MEM_VIFIFO_WRAP_COUNT,
840 input->stream_cookie);
841
842 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
843 fifo_len = READ_VREG(VLD_MEM_VIFIFO_LEVEL);
844
845 /* enable */
846 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
847 (0x11 << 16) | (1<<10));
848
fe96802b
NQ
849 /* sync with front end */
850 vdec_sync_input_read(vdec);
851 vdec_sync_input_write(vdec);
b9164398
NQ
852
853 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
854 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
855 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
856
857 /* restore read side */
858 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
fe96802b 859 swap_page_phys);
b9164398
NQ
860 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 1);
861
862 while (READ_VREG(HEVC_STREAM_SWAP_CTRL)
863 & (1<<7))
864 ;
865 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
866
867 /* restore stream offset */
868 WRITE_VREG(HEVC_SHIFT_BYTE_COUNT,
869 input->stream_cookie);
870
871 rp = READ_VREG(HEVC_STREAM_RD_PTR);
872 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
873 >> 16) & 0x7f;
874
875
876 /* enable */
877
fe96802b
NQ
878 /* sync with front end */
879 vdec_sync_input_read(vdec);
880 vdec_sync_input_write(vdec);
b9164398
NQ
881
882 wp = READ_VREG(HEVC_STREAM_WR_PTR);
fe96802b
NQ
883
884 /*pr_info("vdec: restore context\r\n");*/
b9164398
NQ
885 }
886
887 } else {
888 if (input->target == VDEC_INPUT_TARGET_VLD) {
889 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR,
890 input->start);
891 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR,
892 input->start + input->size - 8);
893 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
894 input->start);
895
896 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
897 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
898
899 /* set to manual mode */
900 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
901 WRITE_VREG(VLD_MEM_VIFIFO_RP, input->start);
902 WRITE_VREG(VLD_MEM_VIFIFO_WP,
fe96802b 903 READ_PARSER_REG(PARSER_VIDEO_WP));
b9164398
NQ
904
905 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
906
907 /* enable */
908 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
909 (0x11 << 16) | (1<<10));
910
911 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
912
913 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
914 WRITE_VREG(HEVC_STREAM_START_ADDR,
915 input->start);
916 WRITE_VREG(HEVC_STREAM_END_ADDR,
917 input->start + input->size);
918 WRITE_VREG(HEVC_STREAM_RD_PTR,
919 input->start);
920 WRITE_VREG(HEVC_STREAM_WR_PTR,
fe96802b 921 READ_PARSER_REG(PARSER_VIDEO_WP));
b9164398
NQ
922
923 rp = READ_VREG(HEVC_STREAM_RD_PTR);
924 wp = READ_VREG(HEVC_STREAM_WR_PTR);
925 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
926 >> 16) & 0x7f;
927
928 /* enable */
929 }
930 }
931 *p = NULL;
932 if (wp >= rp)
933 size = wp - rp + fifo_len;
934 else
935 size = wp + input->size - rp + fifo_len;
936 if (size < 0) {
937 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
938 __func__, input->size, wp, rp, fifo_len, size);
939 size = 0;
940 }
941 return size;
942 }
943}
944EXPORT_SYMBOL(vdec_prepare_input);
945
946void vdec_enable_input(struct vdec_s *vdec)
947{
948 struct vdec_input_s *input = &vdec->input;
949
950 if (vdec->status != VDEC_STATUS_ACTIVE)
951 return;
952
953 if (input->target == VDEC_INPUT_TARGET_VLD)
954 SET_VREG_MASK(VLD_MEM_VIFIFO_CONTROL, (1<<2) | (1<<1));
955 else if (input->target == VDEC_INPUT_TARGET_HEVC) {
956 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
957 if (vdec_stream_based(vdec))
958 CLEAR_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
959 else
960 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
961 SET_VREG_MASK(HEVC_STREAM_FIFO_CTL, (1<<29));
962 }
963}
964EXPORT_SYMBOL(vdec_enable_input);
965
fe96802b
NQ
966int vdec_set_input_buffer(struct vdec_s *vdec, u32 start, u32 size)
967{
968 int r = vdec_input_set_buffer(&vdec->input, start, size);
969
970 if (r)
971 return r;
972
973 if (vdec->slave)
974 r = vdec_input_set_buffer(&vdec->slave->input, start, size);
975
976 return r;
977}
978EXPORT_SYMBOL(vdec_set_input_buffer);
979
980/*
981 * vdec_eos returns the possibility that there are
982 * more input can be used by decoder through vdec_prepare_input
983 * Note: this function should be called prior to vdec_vframe_dirty
984 * by decoder driver to determine if EOS happens for stream based
985 * decoding when there is no sufficient data for a frame
986 */
987bool vdec_has_more_input(struct vdec_s *vdec)
988{
989 struct vdec_input_s *input = &vdec->input;
990
991 if (!input->eos)
992 return true;
993
994 if (input_frame_based(input))
995 return vdec_input_next_input_chunk(input) != NULL;
996 else {
997 if (input->target == VDEC_INPUT_TARGET_VLD)
998 return READ_VREG(VLD_MEM_VIFIFO_WP) !=
999 READ_PARSER_REG(PARSER_VIDEO_WP);
1000 else {
1001 return (READ_VREG(HEVC_STREAM_WR_PTR) & ~0x3) !=
1002 (READ_PARSER_REG(PARSER_VIDEO_WP) & ~0x3);
1003 }
1004 }
1005}
1006EXPORT_SYMBOL(vdec_has_more_input);
1007
1008void vdec_set_prepare_level(struct vdec_s *vdec, int level)
1009{
1010 vdec->input.prepare_level = level;
1011}
1012EXPORT_SYMBOL(vdec_set_prepare_level);
1013
b9164398
NQ
1014void vdec_set_flag(struct vdec_s *vdec, u32 flag)
1015{
1016 vdec->flag = flag;
1017}
fe96802b
NQ
1018EXPORT_SYMBOL(vdec_set_flag);
1019
1020void vdec_set_eos(struct vdec_s *vdec, bool eos)
1021{
1022 vdec->input.eos = eos;
1023
1024 if (vdec->slave)
1025 vdec->slave->input.eos = eos;
1026}
1027EXPORT_SYMBOL(vdec_set_eos);
b9164398 1028
a6c89e96
NQ
1029#ifdef VDEC_DEBUG_SUPPORT
1030void vdec_set_step_mode(void)
1031{
1032 step_mode = 0x1ff;
1033}
1034EXPORT_SYMBOL(vdec_set_step_mode);
1035#endif
1036
b9164398
NQ
1037void vdec_set_next_sched(struct vdec_s *vdec, struct vdec_s *next_vdec)
1038{
1039 if (vdec && next_vdec) {
1040 vdec->sched = 0;
1041 next_vdec->sched = 1;
1042 }
1043}
fe96802b
NQ
1044EXPORT_SYMBOL(vdec_set_next_sched);
1045
1046/*
1047 * Swap Context: S0 S1 S2 S3 S4
1048 * Sample sequence: M S M M S
1049 * Master Context: S0 S0 S2 S3 S3
1050 * Slave context: NA S1 S1 S2 S4
1051 * ^
1052 * ^
1053 * ^
1054 * the tricky part
1055 * If there are back to back decoding of master or slave
1056 * then the context of the counter part should be updated
1057 * with current decoder. In this example, S1 should be
1058 * updated to S2.
1059 * This is done by swap the swap_page and related info
1060 * between two layers.
1061 */
1062static void vdec_borrow_input_context(struct vdec_s *vdec)
1063{
1064 struct page *swap_page;
1065 unsigned long swap_page_phys;
1066 struct vdec_input_s *me;
1067 struct vdec_input_s *other;
1068
1069 if (!vdec_dual(vdec))
1070 return;
1071
1072 me = &vdec->input;
1073 other = &vdec_get_associate(vdec)->input;
1074
1075 /* swap the swap_context, borrow counter part's
1076 * swap context storage and update all related info.
1077 * After vdec_vframe_dirty, vdec_save_input_context
1078 * will be called to update current vdec's
1079 * swap context
1080 */
1081 swap_page = other->swap_page;
1082 other->swap_page = me->swap_page;
1083 me->swap_page = swap_page;
1084
1085 swap_page_phys = other->swap_page_phys;
1086 other->swap_page_phys = me->swap_page_phys;
1087 me->swap_page_phys = swap_page_phys;
1088
1089 other->swap_rp = me->swap_rp;
1090 other->streaming_rp = me->streaming_rp;
1091 other->stream_cookie = me->stream_cookie;
1092 other->swap_valid = me->swap_valid;
1093}
1094
b9164398
NQ
1095void vdec_vframe_dirty(struct vdec_s *vdec, struct vframe_chunk_s *chunk)
1096{
1097 if (chunk)
1098 chunk->flag |= VFRAME_CHUNK_FLAG_CONSUMED;
1099
1100 if (vdec_stream_based(vdec)) {
fe96802b
NQ
1101 vdec->input.swap_needed = true;
1102
1103 if (vdec_dual(vdec)) {
1104 vdec_get_associate(vdec)->input.dirty_count = 0;
1105 vdec->input.dirty_count++;
1106 if (vdec->input.dirty_count > 1) {
1107 vdec->input.dirty_count = 1;
1108 vdec_borrow_input_context(vdec);
1109 }
b9164398 1110 }
fe96802b
NQ
1111
1112 /* for stream based mode, we update read and write pointer
1113 * also in case decoder wants to keep working on decoding
1114 * for more frames while input front end has more data
1115 */
1116 vdec_sync_input_read(vdec);
1117 vdec_sync_input_write(vdec);
1118
1119 vdec->need_more_data |= VDEC_NEED_MORE_DATA_DIRTY;
1120 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
b9164398
NQ
1121 }
1122}
1123EXPORT_SYMBOL(vdec_vframe_dirty);
1124
fe96802b
NQ
1125bool vdec_need_more_data(struct vdec_s *vdec)
1126{
1127 if (vdec_stream_based(vdec))
1128 return vdec->need_more_data & VDEC_NEED_MORE_DATA;
1129
1130 return false;
1131}
1132EXPORT_SYMBOL(vdec_need_more_data);
1133
976f3376
HZ
1134
1135void hevc_wait_ddr(void)
1136{
1137 unsigned long flags;
1138 spin_lock_irqsave(&vdec_spin_lock, flags);
1139 codec_dmcbus_write(DMC_REQ_CTRL,
1140 codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 4)));
1141 spin_unlock_irqrestore(&vdec_spin_lock, flags);
1142
1143 while (!(codec_dmcbus_read(DMC_CHAN_STS)
1144 & (1 << 4)))
1145 ;
1146}
1147
b9164398
NQ
1148void vdec_save_input_context(struct vdec_s *vdec)
1149{
fe96802b 1150 struct vdec_input_s *input = &vdec->input;
b9164398 1151
fe96802b 1152#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1153 vdec_profile(vdec, VDEC_PROFILE_EVENT_SAVE_INPUT);
1154#endif
1155
1156 if (input->target == VDEC_INPUT_TARGET_VLD)
1157 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1<<15);
1158
1159 if (input_stream_based(input) && (input->swap_needed)) {
1160 if (input->target == VDEC_INPUT_TARGET_VLD) {
1161 WRITE_VREG(VLD_MEM_SWAP_ADDR,
fe96802b 1162 input->swap_page_phys);
b9164398
NQ
1163 WRITE_VREG(VLD_MEM_SWAP_CTL, 3);
1164 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1165 ;
1166 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1167 vdec->input.stream_cookie =
1168 READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
fe96802b
NQ
1169 vdec->input.swap_rp =
1170 READ_VREG(VLD_MEM_VIFIFO_RP);
1171 vdec->input.total_rd_count =
1172 (u64)vdec->input.stream_cookie *
1173 vdec->input.size + vdec->input.swap_rp -
1174 READ_VREG(VLD_MEM_VIFIFO_BYTES_AVAIL);
b9164398
NQ
1175 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1176 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
fe96802b 1177 input->swap_page_phys);
b9164398
NQ
1178 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 3);
1179
1180 while (READ_VREG(HEVC_STREAM_SWAP_CTRL) & (1<<7))
1181 ;
1182 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1183
1184 vdec->input.stream_cookie =
1185 READ_VREG(HEVC_SHIFT_BYTE_COUNT);
fe96802b
NQ
1186 vdec->input.swap_rp =
1187 READ_VREG(HEVC_STREAM_RD_PTR);
1188 if (((vdec->input.stream_cookie & 0x80000000) == 0) &&
1189 (vdec->input.streaming_rp & 0x80000000))
1190 vdec->input.streaming_rp += 1ULL << 32;
1191 vdec->input.streaming_rp &= 0xffffffffULL << 32;
1192 vdec->input.streaming_rp |= vdec->input.stream_cookie;
1193 vdec->input.total_rd_count = vdec->input.streaming_rp;
b9164398
NQ
1194 }
1195
1196 input->swap_valid = true;
fe96802b
NQ
1197 input->swap_needed = false;
1198 /*pr_info("vdec: save context\r\n");*/
b9164398 1199
fe96802b
NQ
1200 vdec_sync_input_read(vdec);
1201
1202 if (vdec_dual(vdec)) {
1203 struct vdec_s *master = (vdec->slave) ?
1204 vdec : vdec->master;
1205 master->input.last_swap_slave = (master->slave == vdec);
1206 /* pr_info("master->input.last_swap_slave = %d\n",
1207 master->input.last_swap_slave); */
1208 }
976f3376
HZ
1209
1210 hevc_wait_ddr();
b9164398
NQ
1211 }
1212}
1213EXPORT_SYMBOL(vdec_save_input_context);
1214
1215void vdec_clean_input(struct vdec_s *vdec)
1216{
1217 struct vdec_input_s *input = &vdec->input;
1218
1219 while (!list_empty(&input->vframe_chunk_list)) {
1220 struct vframe_chunk_s *chunk =
1221 vdec_input_next_chunk(input);
87046a60 1222 if (chunk && (chunk->flag & VFRAME_CHUNK_FLAG_CONSUMED))
b9164398
NQ
1223 vdec_input_release_chunk(input, chunk);
1224 else
1225 break;
1226 }
1227 vdec_save_input_context(vdec);
1228}
1229EXPORT_SYMBOL(vdec_clean_input);
1230
fe96802b 1231int vdec_sync_input(struct vdec_s *vdec)
b9164398 1232{
fe96802b
NQ
1233 struct vdec_input_s *input = &vdec->input;
1234 u32 rp = 0, wp = 0, fifo_len = 0;
1235 int size;
1236
1237 vdec_sync_input_read(vdec);
1238 vdec_sync_input_write(vdec);
1239 if (input->target == VDEC_INPUT_TARGET_VLD) {
1240 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1241 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1242
1243 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1244 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1245 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1246 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1247 >> 16) & 0x7f;
1248 }
1249 if (wp >= rp)
1250 size = wp - rp + fifo_len;
1251 else
1252 size = wp + input->size - rp + fifo_len;
1253 if (size < 0) {
1254 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
1255 __func__, input->size, wp, rp, fifo_len, size);
1256 size = 0;
b9164398 1257 }
fe96802b
NQ
1258 return size;
1259
1260}
1261EXPORT_SYMBOL(vdec_sync_input);
1262
1263const char *vdec_status_str(struct vdec_s *vdec)
1264{
1265 return vdec->status < ARRAY_SIZE(vdec_status_string) ?
1266 vdec_status_string[vdec->status] : "INVALID";
b9164398
NQ
1267}
1268
1269const char *vdec_type_str(struct vdec_s *vdec)
1270{
1271 switch (vdec->type) {
1272 case VDEC_TYPE_SINGLE:
1273 return "VDEC_TYPE_SINGLE";
1274 case VDEC_TYPE_STREAM_PARSER:
1275 return "VDEC_TYPE_STREAM_PARSER";
1276 case VDEC_TYPE_FRAME_BLOCK:
1277 return "VDEC_TYPE_FRAME_BLOCK";
1278 case VDEC_TYPE_FRAME_CIRCULAR:
1279 return "VDEC_TYPE_FRAME_CIRCULAR";
1280 default:
1281 return "VDEC_TYPE_INVALID";
1282 }
1283}
1284
1285const char *vdec_device_name_str(struct vdec_s *vdec)
1286{
1287 return vdec_device_name[vdec->format * 2 + 1];
1288}
fe96802b 1289EXPORT_SYMBOL(vdec_device_name_str);
b9164398
NQ
1290
1291void walk_vdec_core_list(char *s)
1292{
1293 struct vdec_s *vdec;
1294 struct vdec_core_s *core = vdec_core;
1295 unsigned long flags;
1296
1297 pr_info("%s --->\n", s);
1298
1299 flags = vdec_core_lock(vdec_core);
1300
1301 if (list_empty(&core->connected_vdec_list)) {
1302 pr_info("connected vdec list empty\n");
1303 } else {
1304 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
1305 pr_info("\tvdec (%p), status = %s\n", vdec,
1306 vdec_status_str(vdec));
1307 }
1308 }
1309
1310 vdec_core_unlock(vdec_core, flags);
1311}
1312EXPORT_SYMBOL(walk_vdec_core_list);
1313
fe96802b
NQ
1314/* insert vdec to vdec_core for scheduling,
1315 * for dual running decoders, connect/disconnect always runs in pairs
1316 */
b9164398
NQ
1317int vdec_connect(struct vdec_s *vdec)
1318{
1319 unsigned long flags;
1320
fe96802b
NQ
1321 //trace_vdec_connect(vdec);/*DEBUG_TMP*/
1322
b9164398
NQ
1323 if (vdec->status != VDEC_STATUS_DISCONNECTED)
1324 return 0;
1325
1326 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
1327 vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
1328
1329 init_completion(&vdec->inactive_done);
1330
1331 if (vdec->slave) {
1332 vdec_set_status(vdec->slave, VDEC_STATUS_CONNECTED);
1333 vdec_set_next_status(vdec->slave, VDEC_STATUS_CONNECTED);
1334
1335 init_completion(&vdec->slave->inactive_done);
1336 }
1337
1338 flags = vdec_core_lock(vdec_core);
1339
1340 list_add_tail(&vdec->list, &vdec_core->connected_vdec_list);
1341
1342 if (vdec->slave) {
1343 list_add_tail(&vdec->slave->list,
1344 &vdec_core->connected_vdec_list);
1345 }
1346
1347 vdec_core_unlock(vdec_core, flags);
1348
1349 up(&vdec_core->sem);
1350
1351 return 0;
1352}
1353EXPORT_SYMBOL(vdec_connect);
1354
1355/* remove vdec from vdec_core scheduling */
1356int vdec_disconnect(struct vdec_s *vdec)
1357{
fe96802b 1358#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1359 vdec_profile(vdec, VDEC_PROFILE_EVENT_DISCONNECT);
1360#endif
fe96802b 1361 //trace_vdec_disconnect(vdec);/*DEBUG_TMP*/
b9164398
NQ
1362
1363 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
1364 (vdec->status != VDEC_STATUS_ACTIVE)) {
1365 return 0;
1366 }
6da7a8e8 1367 mutex_lock(&vdec_mutex);
b9164398 1368 /*
e0614bf7 1369 *when a vdec is under the management of scheduler
b9164398
NQ
1370 * the status change will only be from vdec_core_thread
1371 */
1372 vdec_set_next_status(vdec, VDEC_STATUS_DISCONNECTED);
1373
1374 if (vdec->slave)
1375 vdec_set_next_status(vdec->slave, VDEC_STATUS_DISCONNECTED);
1376 else if (vdec->master)
1377 vdec_set_next_status(vdec->master, VDEC_STATUS_DISCONNECTED);
6da7a8e8 1378 mutex_unlock(&vdec_mutex);
b9164398
NQ
1379 up(&vdec_core->sem);
1380
87046a60 1381 if(!wait_for_completion_timeout(&vdec->inactive_done,
1382 msecs_to_jiffies(2000)))
1383 goto discon_timeout;
b9164398 1384
87046a60 1385 if (vdec->slave) {
1386 if(!wait_for_completion_timeout(&vdec->slave->inactive_done,
1387 msecs_to_jiffies(2000)))
1388 goto discon_timeout;
1389 } else if (vdec->master) {
1390 if(!wait_for_completion_timeout(&vdec->master->inactive_done,
1391 msecs_to_jiffies(2000)))
1392 goto discon_timeout;
1393 }
b9164398 1394
87046a60 1395 return 0;
1396discon_timeout:
1397 pr_err("%s timeout!!! status: 0x%x\n", __func__, vdec->status);
b9164398
NQ
1398 return 0;
1399}
1400EXPORT_SYMBOL(vdec_disconnect);
1401
1402/* release vdec structure */
1403int vdec_destroy(struct vdec_s *vdec)
1404{
fe96802b
NQ
1405 //trace_vdec_destroy(vdec);/*DEBUG_TMP*/
1406
1407 vdec_input_release(&vdec->input);
b9164398 1408
fe96802b 1409#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1410 vdec_profile_flush(vdec);
1411#endif
fe96802b 1412 ida_simple_remove(&vdec_core->ida, vdec->id);
b9164398
NQ
1413 vfree(vdec);
1414
1415 atomic_dec(&vdec_core->vdec_nr);
1416
1417 return 0;
1418}
1419EXPORT_SYMBOL(vdec_destroy);
1420
1421/*
1422 * Only support time sliced decoding for frame based input,
1423 * so legacy decoder can exist with time sliced decoder.
1424 */
1425static const char *get_dev_name(bool use_legacy_vdec, int format)
1426{
fe96802b 1427#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1428 if (use_legacy_vdec)
1429 return vdec_device_name[format * 2];
1430 else
1431 return vdec_device_name[format * 2 + 1];
1432#else
1433 return vdec_device_name[format];
1434#endif
1435}
1436
b9164398 1437/*
e0614bf7 1438 *register vdec_device
b9164398
NQ
1439 * create output, vfm or create ionvideo output
1440 */
1441s32 vdec_init(struct vdec_s *vdec, int is_4k)
1442{
1443 int r = 0;
1444 struct vdec_s *p = vdec;
b9164398 1445 const char *dev_name;
fe96802b 1446 int id = PLATFORM_DEVID_AUTO;/*if have used my self*/
b9164398
NQ
1447
1448 dev_name = get_dev_name(vdec_single(vdec), vdec->format);
1449
1450 if (dev_name == NULL)
1451 return -ENODEV;
1452
1453 pr_info("vdec_init, dev_name:%s, vdec_type=%s\n",
1454 dev_name, vdec_type_str(vdec));
1455
1456 /*
e0614bf7 1457 *todo: VFM patch control should be configurable,
b9164398
NQ
1458 * for now all stream based input uses default VFM path.
1459 */
1460 if (vdec_stream_based(vdec) && !vdec_dual(vdec)) {
1461 if (vdec_core->vfm_vdec == NULL) {
5b851ff9 1462 pr_debug("vdec_init set vfm decoder %p\n", vdec);
b9164398
NQ
1463 vdec_core->vfm_vdec = vdec;
1464 } else {
1465 pr_info("vdec_init vfm path busy.\n");
1466 return -EBUSY;
1467 }
1468 }
1469
b9164398
NQ
1470 mutex_lock(&vdec_mutex);
1471 inited_vcodec_num++;
1472 mutex_unlock(&vdec_mutex);
1473
1474 vdec_input_set_type(&vdec->input, vdec->type,
1475 (vdec->format == VFORMAT_HEVC ||
a6c89e96 1476 vdec->format == VFORMAT_AVS2 ||
b9164398
NQ
1477 vdec->format == VFORMAT_VP9) ?
1478 VDEC_INPUT_TARGET_HEVC :
1479 VDEC_INPUT_TARGET_VLD);
1480
1481 p->cma_dev = vdec_core->cma_dev;
1482 p->get_canvas = get_canvas;
5f3fbfb7
HZ
1483 atomic_set(&p->inirq_flag, 0);
1484 atomic_set(&p->inirq_thread_flag, 0);
b9164398
NQ
1485 /* todo */
1486 if (!vdec_dual(vdec))
fe96802b 1487 p->use_vfm_path = vdec_stream_based(vdec);
b9164398 1488 /* vdec_dev_reg.flag = 0; */
fe96802b
NQ
1489 if (vdec->id >= 0)
1490 id = vdec->id;
1491 p->dev = platform_device_register_data(
b9164398
NQ
1492 &vdec_core->vdec_core_platform_device->dev,
1493 dev_name,
fe96802b 1494 id,
b9164398
NQ
1495 &p, sizeof(struct vdec_s *));
1496
1497 if (IS_ERR(p->dev)) {
1498 r = PTR_ERR(p->dev);
1499 pr_err("vdec: Decoder device %s register failed (%d)\n",
1500 dev_name, r);
1501
1502 mutex_lock(&vdec_mutex);
1503 inited_vcodec_num--;
1504 mutex_unlock(&vdec_mutex);
1505
fe96802b
NQ
1506 goto error;
1507 } else if (!p->dev->dev.driver) {
1508 pr_info("vdec: Decoder device %s driver probe failed.\n",
1509 dev_name);
1510 r = -ENODEV;
1511
b9164398
NQ
1512 goto error;
1513 }
1514
1515 if ((p->type == VDEC_TYPE_FRAME_BLOCK) && (p->run == NULL)) {
1516 r = -ENODEV;
1517 pr_err("vdec: Decoder device not handled (%s)\n", dev_name);
1518
1519 mutex_lock(&vdec_mutex);
1520 inited_vcodec_num--;
1521 mutex_unlock(&vdec_mutex);
1522
1523 goto error;
1524 }
1525
1526 if (p->use_vfm_path) {
1527 vdec->vf_receiver_inst = -1;
fe96802b 1528 vdec->vfm_map_id[0] = 0;
b9164398
NQ
1529 } else if (!vdec_dual(vdec)) {
1530 /* create IONVIDEO instance and connect decoder's
1531 * vf_provider interface to it
1532 */
1533 if (p->type != VDEC_TYPE_FRAME_BLOCK) {
1534 r = -ENODEV;
1535 pr_err("vdec: Incorrect decoder type\n");
1536
1537 mutex_lock(&vdec_mutex);
1538 inited_vcodec_num--;
1539 mutex_unlock(&vdec_mutex);
1540
1541 goto error;
1542 }
1543 if (p->frame_base_video_path == FRAME_BASE_PATH_IONVIDEO) {
1544#if 1
ff4c2158
NQ
1545 r = ionvideo_assign_map(&vdec->vf_receiver_name,
1546 &vdec->vf_receiver_inst);
b9164398
NQ
1547#else
1548 /*
1549 * temporarily just use decoder instance ID as iondriver ID
1550 * to solve OMX iondriver instance number check time sequence
1551 * only the limitation is we can NOT mix different video
1552 * decoders since same ID will be used for different decoder
1553 * formats.
1554 */
1555 vdec->vf_receiver_inst = p->dev->id;
1556 r = ionvideo_assign_map(&vdec->vf_receiver_name,
1557 &vdec->vf_receiver_inst);
1558#endif
1559 if (r < 0) {
1560 pr_err("IonVideo frame receiver allocation failed.\n");
1561
1562 mutex_lock(&vdec_mutex);
1563 inited_vcodec_num--;
1564 mutex_unlock(&vdec_mutex);
1565
1566 goto error;
1567 }
1568
fe96802b
NQ
1569 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
1570 "%s %s", vdec->vf_provider_name,
1571 vdec->vf_receiver_name);
1572 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
1573 "vdec-map-%d", vdec->id);
b9164398
NQ
1574 } else if (p->frame_base_video_path ==
1575 FRAME_BASE_PATH_AMLVIDEO_AMVIDEO) {
1576 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
1577 "%s %s", vdec->vf_provider_name,
fe96802b 1578 "amlvideo deinterlace amvideo");
b9164398 1579 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
fe96802b 1580 "vdec-map-%d", vdec->id);
b9164398
NQ
1581 } else if (p->frame_base_video_path ==
1582 FRAME_BASE_PATH_AMLVIDEO1_AMVIDEO2) {
1583 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
1584 "%s %s", vdec->vf_provider_name,
1585 "ppmgr amlvideo.1 amvide2");
1586 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
fe96802b 1587 "vdec-map-%d", vdec->id);
6b7ee58f
NQ
1588 } else if (p->frame_base_video_path == FRAME_BASE_PATH_V4L_VIDEO) {
1589 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
65a98643
NQ
1590 "%s %s", vdec->vf_provider_name,
1591 vdec->vf_receiver_name);
6b7ee58f
NQ
1592 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
1593 "vdec-map-%d", vdec->id);
b9164398
NQ
1594 }
1595
1596 if (vfm_map_add(vdec->vfm_map_id,
1597 vdec->vfm_map_chain) < 0) {
1598 r = -ENOMEM;
1599 pr_err("Decoder pipeline map creation failed %s.\n",
1600 vdec->vfm_map_id);
1601 vdec->vfm_map_id[0] = 0;
1602
1603 mutex_lock(&vdec_mutex);
1604 inited_vcodec_num--;
1605 mutex_unlock(&vdec_mutex);
1606
1607 goto error;
1608 }
1609
5b851ff9 1610 pr_debug("vfm map %s created\n", vdec->vfm_map_id);
b9164398
NQ
1611
1612 /*
e0614bf7 1613 *assume IONVIDEO driver already have a few vframe_receiver
b9164398
NQ
1614 * registered.
1615 * 1. Call iondriver function to allocate a IONVIDEO path and
1616 * provide receiver's name and receiver op.
1617 * 2. Get decoder driver's provider name from driver instance
1618 * 3. vfm_map_add(name, "<decoder provider name>
1619 * <iondriver receiver name>"), e.g.
1620 * vfm_map_add("vdec_ion_map_0", "mpeg4_0 iondriver_1");
1621 * 4. vf_reg_provider and vf_reg_receiver
1622 * Note: the decoder provider's op uses vdec as op_arg
1623 * the iondriver receiver's op uses iondev device as
1624 * op_arg
1625 */
1626
1627 }
1628
1629 if (!vdec_single(vdec)) {
1630 vf_reg_provider(&p->vframe_provider);
1631
1632 vf_notify_receiver(p->vf_provider_name,
1633 VFRAME_EVENT_PROVIDER_START,
1634 vdec);
fe96802b
NQ
1635
1636 if (vdec_core->hint_fr_vdec == NULL)
1637 vdec_core->hint_fr_vdec = vdec;
1638
1639 if (vdec_core->hint_fr_vdec == vdec) {
1640 if (p->sys_info->rate != 0) {
d481db31
NQ
1641 if (!vdec->is_reset)
1642 vf_notify_receiver(p->vf_provider_name,
1643 VFRAME_EVENT_PROVIDER_FR_HINT,
1644 (void *)
1645 ((unsigned long)
1646 p->sys_info->rate));
fe96802b
NQ
1647 vdec->fr_hint_state = VDEC_HINTED;
1648 } else {
1649 vdec->fr_hint_state = VDEC_NEED_HINT;
1650 }
1651 }
b9164398
NQ
1652 }
1653
28e318df 1654 p->dolby_meta_with_el = 0;
5b851ff9 1655 pr_debug("vdec_init, vf_provider_name = %s\n", p->vf_provider_name);
fe96802b
NQ
1656 vdec_input_prepare_bufs(/*prepared buffer for fast playing.*/
1657 &vdec->input,
1658 vdec->sys_info->width,
1659 vdec->sys_info->height);
b9164398
NQ
1660 /* vdec is now ready to be active */
1661 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
1662
1663 return 0;
1664
1665error:
1666 return r;
1667}
1668EXPORT_SYMBOL(vdec_init);
1669
fe96802b
NQ
1670/* vdec_create/init/release/destroy are applied to both dual running decoders
1671 */
b9164398
NQ
1672void vdec_release(struct vdec_s *vdec)
1673{
fe96802b 1674 //trace_vdec_release(vdec);/*DEBUG_TMP*/
a6c89e96
NQ
1675#ifdef VDEC_DEBUG_SUPPORT
1676 if (step_mode) {
1677 pr_info("VDEC_DEBUG: in step_mode, wait release\n");
1678 while (step_mode)
1679 udelay(10);
1680 pr_info("VDEC_DEBUG: step_mode is clear\n");
1681 }
1682#endif
b9164398
NQ
1683 vdec_disconnect(vdec);
1684
fe96802b
NQ
1685 if (vdec->vframe_provider.name) {
1686 if (!vdec_single(vdec)) {
1687 if (vdec_core->hint_fr_vdec == vdec
d481db31
NQ
1688 && vdec->fr_hint_state == VDEC_HINTED
1689 && !vdec->is_reset)
fe96802b
NQ
1690 vf_notify_receiver(
1691 vdec->vf_provider_name,
1692 VFRAME_EVENT_PROVIDER_FR_END_HINT,
1693 NULL);
1694 vdec->fr_hint_state = VDEC_NO_NEED_HINT;
1695 }
b9164398 1696 vf_unreg_provider(&vdec->vframe_provider);
fe96802b 1697 }
b9164398
NQ
1698
1699 if (vdec_core->vfm_vdec == vdec)
1700 vdec_core->vfm_vdec = NULL;
1701
fe96802b
NQ
1702 if (vdec_core->hint_fr_vdec == vdec)
1703 vdec_core->hint_fr_vdec = NULL;
1704
b9164398
NQ
1705 if (vdec->vf_receiver_inst >= 0) {
1706 if (vdec->vfm_map_id[0]) {
1707 vfm_map_remove(vdec->vfm_map_id);
1708 vdec->vfm_map_id[0] = 0;
1709 }
b9164398
NQ
1710 }
1711
5f3fbfb7
HZ
1712 while ((atomic_read(&vdec->inirq_flag) > 0)
1713 || (atomic_read(&vdec->inirq_thread_flag) > 0))
1714 schedule();
1715
b9164398 1716 platform_device_unregister(vdec->dev);
a35da9f0
PY
1717 pr_debug("vdec_release instance %p, total %d\n", vdec,
1718 atomic_read(&vdec_core->vdec_nr));
b9164398
NQ
1719 vdec_destroy(vdec);
1720
1721 mutex_lock(&vdec_mutex);
1722 inited_vcodec_num--;
1723 mutex_unlock(&vdec_mutex);
fe96802b 1724
b9164398
NQ
1725}
1726EXPORT_SYMBOL(vdec_release);
1727
a6c89e96
NQ
1728/* For dual running decoders, vdec_reset is only called with master vdec.
1729 */
b9164398
NQ
1730int vdec_reset(struct vdec_s *vdec)
1731{
a6c89e96
NQ
1732 //trace_vdec_reset(vdec); /*DEBUG_TMP*/
1733
b9164398
NQ
1734 vdec_disconnect(vdec);
1735
1736 if (vdec->vframe_provider.name)
1737 vf_unreg_provider(&vdec->vframe_provider);
1738
1739 if ((vdec->slave) && (vdec->slave->vframe_provider.name))
1740 vf_unreg_provider(&vdec->slave->vframe_provider);
1741
1742 if (vdec->reset) {
1743 vdec->reset(vdec);
1744 if (vdec->slave)
1745 vdec->slave->reset(vdec->slave);
1746 }
158de7c4 1747 vdec->mc_loaded = 0;/*clear for reload firmware*/
b9164398
NQ
1748 vdec_input_release(&vdec->input);
1749
6b7ee58f
NQ
1750 vdec_input_init(&vdec->input, vdec);
1751
1752 vdec_input_prepare_bufs(&vdec->input, vdec->sys_info->width,
1753 vdec->sys_info->height);
1754
b9164398
NQ
1755 vf_reg_provider(&vdec->vframe_provider);
1756 vf_notify_receiver(vdec->vf_provider_name,
1757 VFRAME_EVENT_PROVIDER_START, vdec);
1758
1759 if (vdec->slave) {
1760 vf_reg_provider(&vdec->slave->vframe_provider);
1761 vf_notify_receiver(vdec->slave->vf_provider_name,
1762 VFRAME_EVENT_PROVIDER_START, vdec->slave);
158de7c4 1763 vdec->slave->mc_loaded = 0;/*clear for reload firmware*/
b9164398
NQ
1764 }
1765
1766 vdec_connect(vdec);
1767
1768 return 0;
1769}
1770EXPORT_SYMBOL(vdec_reset);
1771
fe96802b
NQ
1772void vdec_free_cmabuf(void)
1773{
1774 mutex_lock(&vdec_mutex);
1775
a35da9f0 1776 /*if (inited_vcodec_num > 0) {
fe96802b
NQ
1777 mutex_unlock(&vdec_mutex);
1778 return;
a35da9f0 1779 }*/
fe96802b
NQ
1780 mutex_unlock(&vdec_mutex);
1781}
1782
a6c89e96 1783int vdec_core_request(struct vdec_s *vdec, unsigned long mask)
b9164398 1784{
a6c89e96 1785 vdec->core_mask |= mask;
b9164398 1786
a6c89e96
NQ
1787 if (vdec->slave)
1788 vdec->slave->core_mask |= mask;
1789
1790 return 0;
1791}
1792EXPORT_SYMBOL(vdec_core_request);
1793
1794int vdec_core_release(struct vdec_s *vdec, unsigned long mask)
1795{
1796 vdec->core_mask &= ~mask;
1797
1798 if (vdec->slave)
1799 vdec->slave->core_mask &= ~mask;
1800
1801 return 0;
1802}
1803EXPORT_SYMBOL(vdec_core_release);
1804
a35da9f0 1805bool vdec_core_with_input(unsigned long mask)
a6c89e96
NQ
1806{
1807 enum vdec_type_e type;
1808
1809 for (type = VDEC_1; type < VDEC_MAX; type++) {
1810 if ((mask & (1 << type)) && cores_with_input[type])
1811 return true;
b9164398
NQ
1812 }
1813
a6c89e96
NQ
1814 return false;
1815}
1816
1817void vdec_core_finish_run(struct vdec_s *vdec, unsigned long mask)
1818{
1819 unsigned long i;
1820 unsigned long t = mask;
6da7a8e8 1821 mutex_lock(&vdec_mutex);
a6c89e96
NQ
1822 while (t) {
1823 i = __ffs(t);
1824 clear_bit(i, &vdec->active_mask);
1825 t &= ~(1 << i);
1826 }
1827
1828 if (vdec->active_mask == 0)
1829 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
6da7a8e8
PY
1830
1831 mutex_unlock(&vdec_mutex);
a6c89e96
NQ
1832}
1833EXPORT_SYMBOL(vdec_core_finish_run);
1834/*
1835 * find what core resources are available for vdec
1836 */
1837static unsigned long vdec_schedule_mask(struct vdec_s *vdec,
1838 unsigned long active_mask)
1839{
1840 unsigned long mask = vdec->core_mask &
1841 ~CORE_MASK_COMBINE;
1842
1843 if (vdec->core_mask & CORE_MASK_COMBINE) {
1844 /* combined cores must be granted together */
1845 if ((mask & ~active_mask) == mask)
1846 return mask;
1847 else
1848 return 0;
1849 } else
1850 return mask & ~vdec->sched_mask & ~active_mask;
b9164398
NQ
1851}
1852
1853/*
e0614bf7 1854 *Decoder callback
b9164398
NQ
1855 * Each decoder instance uses this callback to notify status change, e.g. when
1856 * decoder finished using HW resource.
1857 * a sample callback from decoder's driver is following:
1858 *
1859 * if (hw->vdec_cb) {
1860 * vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
1861 * hw->vdec_cb(vdec, hw->vdec_cb_arg);
1862 * }
1863 */
1864static void vdec_callback(struct vdec_s *vdec, void *data)
1865{
1866 struct vdec_core_s *core = (struct vdec_core_s *)data;
1867
fe96802b 1868#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1869 vdec_profile(vdec, VDEC_PROFILE_EVENT_CB);
1870#endif
1871
1872 up(&core->sem);
1873}
1874
1875static irqreturn_t vdec_isr(int irq, void *dev_id)
1876{
1877 struct vdec_isr_context_s *c =
1878 (struct vdec_isr_context_s *)dev_id;
976f3376 1879 struct vdec_s *vdec = vdec_core->active_vdec;
5f3fbfb7
HZ
1880 irqreturn_t ret = IRQ_HANDLED;
1881 if (vdec)
1882 atomic_set(&vdec->inirq_flag, 1);
1883 if (c->dev_isr) {
1884 ret = c->dev_isr(irq, c->dev_id);
1885 goto isr_done;
1886 }
b9164398 1887
a6c89e96
NQ
1888 if ((c != &vdec_core->isr_context[VDEC_IRQ_0]) &&
1889 (c != &vdec_core->isr_context[VDEC_IRQ_1]) &&
1890 (c != &vdec_core->isr_context[VDEC_IRQ_HEVC_BACK])) {
b9164398
NQ
1891#if 0
1892 pr_warn("vdec interrupt w/o a valid receiver\n");
1893#endif
5f3fbfb7 1894 goto isr_done;
b9164398
NQ
1895 }
1896
1897 if (!vdec) {
1898#if 0
1899 pr_warn("vdec interrupt w/o an active instance running. core = %p\n",
1900 core);
1901#endif
5f3fbfb7 1902 goto isr_done;
b9164398
NQ
1903 }
1904
1905 if (!vdec->irq_handler) {
1906#if 0
1907 pr_warn("vdec instance has no irq handle.\n");
1908#endif
5f3fbfb7 1909 goto isr_done;
b9164398
NQ
1910 }
1911
5f3fbfb7
HZ
1912 ret = vdec->irq_handler(vdec, c->index);
1913isr_done:
1914 if (vdec)
1915 atomic_set(&vdec->inirq_flag, 0);
1916 return ret;
b9164398
NQ
1917}
1918
1919static irqreturn_t vdec_thread_isr(int irq, void *dev_id)
1920{
1921 struct vdec_isr_context_s *c =
1922 (struct vdec_isr_context_s *)dev_id;
976f3376 1923 struct vdec_s *vdec = vdec_core->active_vdec;
5f3fbfb7
HZ
1924 irqreturn_t ret = IRQ_HANDLED;
1925 if (vdec)
1926 atomic_set(&vdec->inirq_thread_flag, 1);
1927 if (c->dev_threaded_isr) {
1928 ret = c->dev_threaded_isr(irq, c->dev_id);
1929 goto thread_isr_done;
1930 }
b9164398 1931 if (!vdec)
5f3fbfb7 1932 goto thread_isr_done;
b9164398
NQ
1933
1934 if (!vdec->threaded_irq_handler)
5f3fbfb7
HZ
1935 goto thread_isr_done;
1936 ret = vdec->threaded_irq_handler(vdec, c->index);
1937thread_isr_done:
1938 if (vdec)
1939 atomic_set(&vdec->inirq_thread_flag, 0);
1940 return ret;
b9164398
NQ
1941}
1942
a6c89e96 1943unsigned long vdec_ready_to_run(struct vdec_s *vdec, unsigned long mask)
b9164398 1944{
a6c89e96 1945 unsigned long ready_mask;
fe96802b 1946 struct vdec_input_s *input = &vdec->input;
a6c89e96
NQ
1947 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
1948 (vdec->status != VDEC_STATUS_ACTIVE))
b9164398
NQ
1949 return false;
1950
1951 if (!vdec->run_ready)
1952 return false;
1953
1954 if ((vdec->slave || vdec->master) &&
1955 (vdec->sched == 0))
1956 return false;
a6c89e96
NQ
1957#ifdef VDEC_DEBUG_SUPPORT
1958 inc_profi_count(mask, vdec->check_count);
1959#endif
1960 if (vdec_core_with_input(mask)) {
1961 /* check frame based input underrun */
1962 if (input && !input->eos && input_frame_based(input)
1963 && (!vdec_input_next_chunk(input))) {
1964#ifdef VDEC_DEBUG_SUPPORT
1965 inc_profi_count(mask, vdec->input_underrun_count);
1966#endif
fe96802b 1967 return false;
a6c89e96
NQ
1968 }
1969 /* check streaming prepare level threshold if not EOS */
1970 if (input && input_stream_based(input) && !input->eos) {
1971 u32 rp, wp, level;
1972
1973 rp = READ_PARSER_REG(PARSER_VIDEO_RP);
1974 wp = READ_PARSER_REG(PARSER_VIDEO_WP);
1975 if (wp < rp)
1976 level = input->size + wp - rp;
1977 else
1978 level = wp - rp;
1979
1980 if ((level < input->prepare_level) &&
1981 (pts_get_rec_num(PTS_TYPE_VIDEO,
1982 vdec->input.total_rd_count) < 2)) {
1983 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
1984#ifdef VDEC_DEBUG_SUPPORT
1985 inc_profi_count(mask, vdec->input_underrun_count);
1986 if (step_mode & 0x200) {
1987 if ((step_mode & 0xff) == vdec->id) {
1988 step_mode |= 0xff;
1989 return mask;
1990 }
1991 }
1992#endif
1993 return false;
1994 } else if (level > input->prepare_level)
1995 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
1996 }
fe96802b
NQ
1997 }
1998
b9164398
NQ
1999 if (step_mode) {
2000 if ((step_mode & 0xff) != vdec->id)
a6c89e96
NQ
2001 return 0;
2002 step_mode |= 0xff; /*VDEC_DEBUG_SUPPORT*/
b9164398
NQ
2003 }
2004
a6c89e96 2005 /*step_mode &= ~0xff; not work for id of 0, removed*/
b9164398 2006
fe96802b 2007#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
2008 vdec_profile(vdec, VDEC_PROFILE_EVENT_CHK_RUN_READY);
2009#endif
2010
a6c89e96
NQ
2011 ready_mask = vdec->run_ready(vdec, mask) & mask;
2012#ifdef VDEC_DEBUG_SUPPORT
2013 if (ready_mask != mask)
2014 inc_profi_count(ready_mask ^ mask, vdec->not_run_ready_count);
2015#endif
fe96802b 2016#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
a6c89e96 2017 if (ready_mask)
b9164398
NQ
2018 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN_READY);
2019#endif
2020
a6c89e96
NQ
2021 return ready_mask;
2022}
2023
2024/* bridge on/off vdec's interrupt processing to vdec core */
2025static void vdec_route_interrupt(struct vdec_s *vdec, unsigned long mask,
2026 bool enable)
2027{
2028 enum vdec_type_e type;
2029
2030 for (type = VDEC_1; type < VDEC_MAX; type++) {
2031 if (mask & (1 << type)) {
2032 struct vdec_isr_context_s *c =
2033 &vdec_core->isr_context[cores_int[type]];
2034 if (enable)
2035 c->vdec = vdec;
2036 else if (c->vdec == vdec)
2037 c->vdec = NULL;
2038 }
2039 }
b9164398
NQ
2040}
2041
87046a60 2042static void vdec_remove_reset(struct vdec_s *vdec)
2043{
2044 struct vdec_input_s *input = &vdec->input;
2045
2046 if (input->target == VDEC_INPUT_TARGET_VLD) {
2047 amvdec_stop();
2048 vdec_reset_core(vdec);
2049 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
2050 amhevc_stop();
2051 hevc_reset_core(vdec);
2052 }
2053 pr_info(" %s vdec %p\n", __func__, vdec);
2054}
2055
fe96802b
NQ
2056/*
2057 * Set up secure protection for each decoder instance running.
2058 * Note: The operation from REE side only resets memory access
2059 * to a default policy and even a non_secure type will still be
2060 * changed to secure type automatically when secure source is
2061 * detected inside TEE.
2062 * Perform need_more_data checking and set flag is decoder
2063 * is not consuming data.
2064 */
a6c89e96 2065void vdec_prepare_run(struct vdec_s *vdec, unsigned long mask)
fe96802b
NQ
2066{
2067 struct vdec_input_s *input = &vdec->input;
a6c89e96 2068 int secure = (vdec_secure(vdec)) ? DMC_DEV_TYPE_SECURE :
fe96802b
NQ
2069 DMC_DEV_TYPE_NON_SECURE;
2070
a6c89e96
NQ
2071 vdec_route_interrupt(vdec, mask, true);
2072
2073 if (!vdec_core_with_input(mask))
2074 return;
2075
fe96802b 2076 if (input->target == VDEC_INPUT_TARGET_VLD)
a6c89e96 2077 tee_config_device_secure(DMC_DEV_ID_VDEC, secure);
fe96802b 2078 else if (input->target == VDEC_INPUT_TARGET_HEVC)
a6c89e96 2079 tee_config_device_secure(DMC_DEV_ID_HEVC, secure);
fe96802b
NQ
2080
2081 if (vdec_stream_based(vdec) &&
2082 ((vdec->need_more_data & VDEC_NEED_MORE_DATA_RUN) &&
2083 (vdec->need_more_data & VDEC_NEED_MORE_DATA_DIRTY) == 0)) {
2084 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
2085 }
2086
2087 vdec->need_more_data |= VDEC_NEED_MORE_DATA_RUN;
2088 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA_DIRTY;
2089}
2090
b9164398
NQ
2091/* struct vdec_core_shread manages all decoder instance in active list. When
2092 * a vdec is added into the active list, it can onlt be in two status:
2093 * VDEC_STATUS_CONNECTED(the decoder does not own HW resource and ready to run)
2094 * VDEC_STATUS_ACTIVE(the decoder owns HW resources and is running).
2095 * Removing a decoder from active list is only performed within core thread.
2096 * Adding a decoder into active list is performed from user thread.
2097 */
2098static int vdec_core_thread(void *data)
2099{
b9164398 2100 struct vdec_core_s *core = (struct vdec_core_s *)data;
158de7c4
HZ
2101 struct vdec_s *lastvdec;
2102 struct sched_param param = {.sched_priority = MAX_RT_PRIO/2};
b9164398
NQ
2103
2104 sched_setscheduler(current, SCHED_FIFO, &param);
2105
2106 allow_signal(SIGTERM);
158de7c4 2107 lastvdec = NULL;
b9164398 2108 while (down_interruptible(&core->sem) == 0) {
a6c89e96
NQ
2109 struct vdec_s *vdec, *tmp, *worker;
2110 unsigned long sched_mask = 0;
b9164398
NQ
2111 LIST_HEAD(disconnecting_list);
2112
2113 if (kthread_should_stop())
2114 break;
6da7a8e8 2115 mutex_lock(&vdec_mutex);
b9164398 2116 /* clean up previous active vdec's input */
a6c89e96
NQ
2117 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
2118 unsigned long mask = vdec->sched_mask &
2119 (vdec->active_mask ^ vdec->sched_mask);
2120
2121 vdec_route_interrupt(vdec, mask, false);
2122
2123#ifdef VDEC_DEBUG_SUPPORT
2124 update_profi_clk_stop(vdec, mask, get_current_clk());
2125#endif
2126 /*
2127 * If decoder released some core resources (mask), then
2128 * check if these core resources are associated
2129 * with any input side and do input clean up accordingly
2130 */
2131 if (vdec_core_with_input(mask)) {
2132 struct vdec_input_s *input = &vdec->input;
2133 while (!list_empty(
2134 &input->vframe_chunk_list)) {
2135 struct vframe_chunk_s *chunk =
2136 vdec_input_next_chunk(input);
87046a60 2137 if (chunk && (chunk->flag &
2138 VFRAME_CHUNK_FLAG_CONSUMED))
a6c89e96
NQ
2139 vdec_input_release_chunk(input,
2140 chunk);
2141 else
2142 break;
2143 }
2144
2145 vdec_save_input_context(vdec);
b9164398
NQ
2146 }
2147
a6c89e96
NQ
2148 vdec->sched_mask &= ~mask;
2149 core->sched_mask &= ~mask;
b9164398
NQ
2150 }
2151
2152 /*
e0614bf7 2153 *todo:
b9164398
NQ
2154 * this is the case when the decoder is in active mode and
2155 * the system side wants to stop it. Currently we rely on
2156 * the decoder instance to go back to VDEC_STATUS_CONNECTED
2157 * from VDEC_STATUS_ACTIVE by its own. However, if for some
2158 * reason the decoder can not exist by itself (dead decoding
2159 * or whatever), then we may have to add another vdec API
2160 * to kill the vdec and release its HW resource and make it
2161 * become inactive again.
2162 * if ((core->active_vdec) &&
2163 * (core->active_vdec->status == VDEC_STATUS_DISCONNECTED)) {
2164 * }
2165 */
2166
b9164398
NQ
2167 /* check disconnected decoders */
2168 list_for_each_entry_safe(vdec, tmp,
2169 &core->connected_vdec_list, list) {
2170 if ((vdec->status == VDEC_STATUS_CONNECTED) &&
2171 (vdec->next_status == VDEC_STATUS_DISCONNECTED)) {
87046a60 2172 if (core->active_vdec == vdec) {
2173 vdec_remove_reset(vdec);
b9164398 2174 core->active_vdec = NULL;
87046a60 2175 }
b9164398
NQ
2176 list_move(&vdec->list, &disconnecting_list);
2177 }
2178 }
6da7a8e8 2179 mutex_unlock(&vdec_mutex);
a6c89e96
NQ
2180 /* elect next vdec to be scheduled */
2181 vdec = core->active_vdec;
2182 if (vdec) {
2183 vdec = list_entry(vdec->list.next, struct vdec_s, list);
b9164398
NQ
2184 list_for_each_entry_from(vdec,
2185 &core->connected_vdec_list, list) {
a6c89e96
NQ
2186 sched_mask = vdec_schedule_mask(vdec,
2187 core->sched_mask);
2188 if (!sched_mask)
2189 continue;
2190 sched_mask = vdec_ready_to_run(vdec,
2191 sched_mask);
2192 if (sched_mask)
b9164398
NQ
2193 break;
2194 }
2195
a6c89e96
NQ
2196 if (&vdec->list == &core->connected_vdec_list)
2197 vdec = NULL;
2198 }
2199
2200 if (!vdec) {
2201 /* search from beginning */
2202 list_for_each_entry(vdec,
2203 &core->connected_vdec_list, list) {
2204 sched_mask = vdec_schedule_mask(vdec,
2205 core->sched_mask);
2206 if (vdec == core->active_vdec) {
2207 if (!sched_mask) {
2208 vdec = NULL;
b9164398 2209 break;
a6c89e96
NQ
2210 }
2211
2212 sched_mask = vdec_ready_to_run(vdec,
2213 sched_mask);
b9164398 2214
a6c89e96 2215 if (!sched_mask) {
b9164398
NQ
2216 vdec = NULL;
2217 break;
2218 }
a6c89e96 2219 break;
b9164398 2220 }
a6c89e96
NQ
2221
2222 if (!sched_mask)
2223 continue;
2224
2225 sched_mask = vdec_ready_to_run(vdec,
2226 sched_mask);
2227 if (sched_mask)
2228 break;
b9164398
NQ
2229 }
2230
2231 if (&vdec->list == &core->connected_vdec_list)
2232 vdec = NULL;
b9164398
NQ
2233 }
2234
a6c89e96
NQ
2235 worker = vdec;
2236
2237 if (vdec) {
2238 unsigned long mask = sched_mask;
2239 unsigned long i;
2240
2241 /* setting active_mask should be atomic.
2242 * it can be modified by decoder driver callbacks.
2243 */
2244 while (sched_mask) {
2245 i = __ffs(sched_mask);
2246 set_bit(i, &vdec->active_mask);
2247 sched_mask &= ~(1 << i);
2248 }
2249
2250 /* vdec's sched_mask is only set from core thread */
2251 vdec->sched_mask |= mask;
158de7c4
HZ
2252 if (lastvdec) {
2253 if ((lastvdec != vdec) && (lastvdec->mc_type != vdec->mc_type))
2254 vdec->mc_loaded = 0;/*clear for reload firmware*/
2255 }
89e7a20f 2256 lastvdec = vdec;
158de7c4
HZ
2257 if (debug & 2)
2258 vdec->mc_loaded = 0;/*alway reload firmware*/
b9164398
NQ
2259 vdec_set_status(vdec, VDEC_STATUS_ACTIVE);
2260
a6c89e96 2261 core->sched_mask |= mask;
b9164398 2262 core->active_vdec = vdec;
fe96802b 2263#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
2264 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN);
2265#endif
a6c89e96
NQ
2266 vdec_prepare_run(vdec, mask);
2267#ifdef VDEC_DEBUG_SUPPORT
2268 inc_profi_count(mask, vdec->run_count);
2269 update_profi_clk_run(vdec, mask, get_current_clk());
2270#endif
2271 vdec->run(vdec, mask, vdec_callback, core);
2272
fe96802b 2273
a6c89e96
NQ
2274 /* we have some cores scheduled, keep working until
2275 * all vdecs are checked with no cores to schedule
2276 */
2277 up(&core->sem);
b9164398
NQ
2278 }
2279
2280 /* remove disconnected decoder from active list */
2281 list_for_each_entry_safe(vdec, tmp, &disconnecting_list, list) {
2282 list_del(&vdec->list);
2283 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
89e7a20f 2284 lastvdec = NULL;
b9164398
NQ
2285 complete(&vdec->inactive_done);
2286 }
2287
a6c89e96
NQ
2288 /* if there is no new work scheduled and nothing
2289 * is running, sleep 20ms
2290 */
877a2735 2291 if ((!worker) && (!core->sched_mask) && (atomic_read(&vdec_core->vdec_nr) > 0)) {
14d1803e 2292 usleep_range(1000, 2000);
b9164398
NQ
2293 up(&core->sem);
2294 }
3f4a083c 2295
b9164398
NQ
2296 }
2297
2298 return 0;
2299}
2300
2301#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */
2302static bool test_hevc(u32 decomp_addr, u32 us_delay)
2303{
2304 int i;
2305
2306 /* SW_RESET IPP */
2307 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 1);
2308 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0);
2309
2310 /* initialize all canvas table */
2311 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0);
2312 for (i = 0; i < 32; i++)
2313 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
2314 0x1 | (i << 8) | decomp_addr);
2315 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1);
2316 WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (0 << 8) | (0<<1) | 1);
2317 for (i = 0; i < 32; i++)
2318 WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0);
2319
2320 /* Initialize mcrcc */
2321 WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2);
2322 WRITE_VREG(HEVCD_MCRCC_CTL2, 0x0);
2323 WRITE_VREG(HEVCD_MCRCC_CTL3, 0x0);
2324 WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0);
2325
2326 /* Decomp initialize */
2327 WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x0);
2328 WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0x0);
2329
2330 /* Frame level initialization */
2331 WRITE_VREG(HEVCD_IPP_TOP_FRMCONFIG, 0x100 | (0x100 << 16));
2332 WRITE_VREG(HEVCD_IPP_TOP_TILECONFIG3, 0x0);
2333 WRITE_VREG(HEVCD_IPP_TOP_LCUCONFIG, 0x1 << 5);
2334 WRITE_VREG(HEVCD_IPP_BITDEPTH_CONFIG, 0x2 | (0x2 << 2));
2335
2336 WRITE_VREG(HEVCD_IPP_CONFIG, 0x0);
2337 WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, 0x0);
2338
2339 /* Enable SWIMP mode */
2340 WRITE_VREG(HEVCD_IPP_SWMPREDIF_CONFIG, 0x1);
2341
2342 /* Enable frame */
2343 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x2);
2344 WRITE_VREG(HEVCD_IPP_TOP_FRMCTL, 0x1);
2345
2346 /* Send SW-command CTB info */
2347 WRITE_VREG(HEVCD_IPP_SWMPREDIF_CTBINFO, 0x1 << 31);
2348
2349 /* Send PU_command */
2350 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO0, (0x4 << 9) | (0x4 << 16));
2351 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO1, 0x1 << 3);
2352 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO2, 0x0);
2353 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO3, 0x0);
2354
2355 udelay(us_delay);
2356
2357 WRITE_VREG(HEVCD_IPP_DBG_SEL, 0x2 << 4);
2358
2359 return (READ_VREG(HEVCD_IPP_DBG_DATA) & 3) == 1;
2360}
2361
865e748b
NQ
2362void vdec_power_reset(void)
2363{
2364 /* enable vdec1 isolation */
2365 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2366 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc0);
2367 /* power off vdec1 memories */
2368 WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL);
2369 /* vdec1 power off */
2370 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2371 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc);
2372
2373 if (has_vdec2()) {
2374 /* enable vdec2 isolation */
2375 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2376 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x300);
2377 /* power off vdec2 memories */
2378 WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL);
2379 /* vdec2 power off */
2380 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2381 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0x30);
2382 }
2383
2384 if (has_hdec()) {
2385 /* enable hcodec isolation */
2386 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2387 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x30);
2388 /* power off hcodec memories */
2389 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
2390 /* hcodec power off */
2391 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2392 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 3);
2393 }
2394
2395 if (has_hevc_vdec()) {
2396 /* enable hevc isolation */
2397 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2398 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc00);
2399 /* power off hevc memories */
2400 WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL);
2401 /* hevc power off */
2402 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2403 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc0);
2404 }
2405}
2406EXPORT_SYMBOL(vdec_power_reset);
2407
b9164398
NQ
2408void vdec_poweron(enum vdec_type_e core)
2409{
2410 void *decomp_addr = NULL;
2411 dma_addr_t decomp_dma_addr;
2412 u32 decomp_addr_aligned = 0;
2413 int hevc_loop = 0;
2414
2415 if (core >= VDEC_MAX)
2416 return;
2417
2418 mutex_lock(&vdec_mutex);
2419
2420 vdec_core->power_ref_count[core]++;
2421 if (vdec_core->power_ref_count[core] > 1) {
2422 mutex_unlock(&vdec_mutex);
2423 return;
2424 }
2425
2426 if (vdec_on(core)) {
2427 mutex_unlock(&vdec_mutex);
2428 return;
2429 }
2430
2431 if (hevc_workaround_needed() &&
2432 (core == VDEC_HEVC)) {
2433 decomp_addr = codec_mm_dma_alloc_coherent(MEM_NAME,
2434 SZ_64K + SZ_4K, &decomp_dma_addr, GFP_KERNEL, 0);
2435
2436 if (decomp_addr) {
2437 decomp_addr_aligned = ALIGN(decomp_dma_addr, SZ_64K);
2438 memset((u8 *)decomp_addr +
2439 (decomp_addr_aligned - decomp_dma_addr),
2440 0xff, SZ_4K);
2441 } else
2442 pr_err("vdec: alloc HEVC gxbb decomp buffer failed.\n");
2443 }
2444
2445 if (core == VDEC_1) {
2446 /* vdec1 power on */
2447 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2448 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~0xc);
2449 /* wait 10uS */
2450 udelay(10);
2451 /* vdec1 soft reset */
2452 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
2453 WRITE_VREG(DOS_SW_RESET0, 0);
2454 /* enable vdec1 clock */
2455 /*
e0614bf7
ZZ
2456 *add power on vdec clock level setting,only for m8 chip,
2457 * m8baby and m8m2 can dynamic adjust vdec clock,
2458 * power on with default clock level
2459 */
65a98643 2460 amports_switch_gate("clk_vdec_mux", 1);
b9164398
NQ
2461 vdec_clock_hi_enable();
2462 /* power up vdec memories */
2463 WRITE_VREG(DOS_MEM_PD_VDEC, 0);
2464 /* remove vdec1 isolation */
2465 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2466 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~0xC0);
2467 /* reset DOS top registers */
2468 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
63e810c0
CG
2469 if (get_cpu_major_id() >=
2470 AM_MESON_CPU_MAJOR_ID_GXBB) {
b9164398 2471 /*
e0614bf7
ZZ
2472 *enable VDEC_1 DMC request
2473 */
b9164398
NQ
2474 unsigned long flags;
2475
2476 spin_lock_irqsave(&vdec_spin_lock, flags);
2477 codec_dmcbus_write(DMC_REQ_CTRL,
2478 codec_dmcbus_read(DMC_REQ_CTRL) | (1 << 13));
2479 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2480 }
2481 } else if (core == VDEC_2) {
2482 if (has_vdec2()) {
2483 /* vdec2 power on */
2484 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2485 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2486 ~0x30);
2487 /* wait 10uS */
2488 udelay(10);
2489 /* vdec2 soft reset */
2490 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
2491 WRITE_VREG(DOS_SW_RESET2, 0);
2492 /* enable vdec1 clock */
2493 vdec2_clock_hi_enable();
2494 /* power up vdec memories */
2495 WRITE_VREG(DOS_MEM_PD_VDEC2, 0);
2496 /* remove vdec2 isolation */
2497 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2498 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2499 ~0x300);
2500 /* reset DOS top registers */
2501 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
2502 }
2503 } else if (core == VDEC_HCODEC) {
2504 if (has_hdec()) {
2505 /* hcodec power on */
2506 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2507 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2508 ~0x3);
2509 /* wait 10uS */
2510 udelay(10);
2511 /* hcodec soft reset */
2512 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
2513 WRITE_VREG(DOS_SW_RESET1, 0);
2514 /* enable hcodec clock */
2515 hcodec_clock_enable();
2516 /* power up hcodec memories */
2517 WRITE_VREG(DOS_MEM_PD_HCODEC, 0);
2518 /* remove hcodec isolation */
2519 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2520 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2521 ~0x30);
2522 }
2523 } else if (core == VDEC_HEVC) {
2524 if (has_hevc_vdec()) {
2525 bool hevc_fixed = false;
2526
2527 while (!hevc_fixed) {
2528 /* hevc power on */
2529 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2530 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2531 ~0xc0);
2532 /* wait 10uS */
2533 udelay(10);
2534 /* hevc soft reset */
2535 WRITE_VREG(DOS_SW_RESET3, 0xffffffff);
2536 WRITE_VREG(DOS_SW_RESET3, 0);
2537 /* enable hevc clock */
65a98643
NQ
2538 amports_switch_gate("clk_hevc_mux", 1);
2539 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
2540 amports_switch_gate("clk_hevcb_mux", 1);
b9164398 2541 hevc_clock_hi_enable();
118bcc65 2542 hevc_back_clock_hi_enable();
b9164398
NQ
2543 /* power up hevc memories */
2544 WRITE_VREG(DOS_MEM_PD_HEVC, 0);
2545 /* remove hevc isolation */
2546 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2547 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2548 ~0xc00);
2549
2550 if (!hevc_workaround_needed())
2551 break;
2552
2553 if (decomp_addr)
2554 hevc_fixed = test_hevc(
2555 decomp_addr_aligned, 20);
2556
2557 if (!hevc_fixed) {
2558 hevc_loop++;
2559
2560 mutex_unlock(&vdec_mutex);
2561
2562 if (hevc_loop >= HEVC_TEST_LIMIT) {
2563 pr_warn("hevc power sequence over limit\n");
2564 pr_warn("=====================================================\n");
2565 pr_warn(" This chip is identified to have HW failure.\n");
2566 pr_warn(" Please contact sqa-platform to replace the platform.\n");
2567 pr_warn("=====================================================\n");
2568
2569 panic("Force panic for chip detection !!!\n");
2570
2571 break;
2572 }
2573
2574 vdec_poweroff(VDEC_HEVC);
2575
2576 mdelay(10);
2577
2578 mutex_lock(&vdec_mutex);
2579 }
2580 }
2581
2582 if (hevc_loop > hevc_max_reset_count)
2583 hevc_max_reset_count = hevc_loop;
2584
2585 WRITE_VREG(DOS_SW_RESET3, 0xffffffff);
2586 udelay(10);
2587 WRITE_VREG(DOS_SW_RESET3, 0);
2588 }
2589 }
2590
2591 if (decomp_addr)
2592 codec_mm_dma_free_coherent(MEM_NAME,
2593 SZ_64K + SZ_4K, decomp_addr, decomp_dma_addr, 0);
2594
2595 mutex_unlock(&vdec_mutex);
2596}
2597EXPORT_SYMBOL(vdec_poweron);
2598
2599void vdec_poweroff(enum vdec_type_e core)
2600{
2601 if (core >= VDEC_MAX)
2602 return;
2603
2604 mutex_lock(&vdec_mutex);
2605
2606 vdec_core->power_ref_count[core]--;
2607 if (vdec_core->power_ref_count[core] > 0) {
2608 mutex_unlock(&vdec_mutex);
2609 return;
2610 }
2611
2612 if (core == VDEC_1) {
63e810c0
CG
2613 if (get_cpu_major_id() >=
2614 AM_MESON_CPU_MAJOR_ID_GXBB) {
b9164398
NQ
2615 /* disable VDEC_1 DMC REQ*/
2616 unsigned long flags;
2617
2618 spin_lock_irqsave(&vdec_spin_lock, flags);
2619 codec_dmcbus_write(DMC_REQ_CTRL,
2620 codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 13)));
2621 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2622 udelay(10);
2623 }
2624 /* enable vdec1 isolation */
2625 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2626 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc0);
2627 /* power off vdec1 memories */
2628 WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL);
2629 /* disable vdec1 clock */
2630 vdec_clock_off();
2631 /* vdec1 power off */
2632 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2633 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc);
2634 } else if (core == VDEC_2) {
2635 if (has_vdec2()) {
2636 /* enable vdec2 isolation */
2637 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2638 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
2639 0x300);
2640 /* power off vdec2 memories */
2641 WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL);
2642 /* disable vdec2 clock */
2643 vdec2_clock_off();
2644 /* vdec2 power off */
2645 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2646 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
2647 0x30);
2648 }
2649 } else if (core == VDEC_HCODEC) {
2650 if (has_hdec()) {
2651 /* enable hcodec isolation */
2652 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2653 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
2654 0x30);
2655 /* power off hcodec memories */
2656 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
2657 /* disable hcodec clock */
2658 hcodec_clock_off();
2659 /* hcodec power off */
2660 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2661 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 3);
2662 }
2663 } else if (core == VDEC_HEVC) {
2664 if (has_hevc_vdec()) {
28e318df
NQ
2665 if (no_powerdown == 0) {
2666 /* enable hevc isolation */
2667 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
b9164398
NQ
2668 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
2669 0xc00);
2670 /* power off hevc memories */
2671 WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL);
a6c89e96 2672
b9164398
NQ
2673 /* disable hevc clock */
2674 hevc_clock_off();
63e810c0 2675 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
a6c89e96
NQ
2676 hevc_back_clock_off();
2677
b9164398
NQ
2678 /* hevc power off */
2679 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2680 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
2681 0xc0);
28e318df
NQ
2682 } else {
2683 pr_info("!!!!!!!!not power down\n");
2684 hevc_reset_core(NULL);
2685 no_powerdown = 0;
2686 }
b9164398
NQ
2687 }
2688 }
2689 mutex_unlock(&vdec_mutex);
2690}
2691EXPORT_SYMBOL(vdec_poweroff);
2692
2693bool vdec_on(enum vdec_type_e core)
2694{
2695 bool ret = false;
2696
2697 if (core == VDEC_1) {
2698 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0xc) == 0) &&
2699 (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100))
2700 ret = true;
2701 } else if (core == VDEC_2) {
2702 if (has_vdec2()) {
2703 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0x30) == 0) &&
2704 (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100))
2705 ret = true;
2706 }
2707 } else if (core == VDEC_HCODEC) {
2708 if (has_hdec()) {
2709 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0x3) == 0) &&
2710 (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000))
2711 ret = true;
2712 }
2713 } else if (core == VDEC_HEVC) {
2714 if (has_hevc_vdec()) {
2715 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0xc0) == 0) &&
2716 (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x1000000))
2717 ret = true;
2718 }
2719 }
2720
2721 return ret;
2722}
2723EXPORT_SYMBOL(vdec_on);
2724
2725#elif 0 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD */
2726void vdec_poweron(enum vdec_type_e core)
2727{
2728 ulong flags;
2729
2730 spin_lock_irqsave(&lock, flags);
2731
2732 if (core == VDEC_1) {
2733 /* vdec1 soft reset */
2734 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
2735 WRITE_VREG(DOS_SW_RESET0, 0);
2736 /* enable vdec1 clock */
2737 vdec_clock_enable();
2738 /* reset DOS top registers */
2739 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
2740 } else if (core == VDEC_2) {
2741 /* vdec2 soft reset */
2742 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
2743 WRITE_VREG(DOS_SW_RESET2, 0);
2744 /* enable vdec2 clock */
2745 vdec2_clock_enable();
2746 /* reset DOS top registers */
2747 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
2748 } else if (core == VDEC_HCODEC) {
2749 /* hcodec soft reset */
2750 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
2751 WRITE_VREG(DOS_SW_RESET1, 0);
2752 /* enable hcodec clock */
2753 hcodec_clock_enable();
2754 }
2755
2756 spin_unlock_irqrestore(&lock, flags);
2757}
2758
2759void vdec_poweroff(enum vdec_type_e core)
2760{
2761 ulong flags;
2762
2763 spin_lock_irqsave(&lock, flags);
2764
2765 if (core == VDEC_1) {
2766 /* disable vdec1 clock */
2767 vdec_clock_off();
2768 } else if (core == VDEC_2) {
2769 /* disable vdec2 clock */
2770 vdec2_clock_off();
2771 } else if (core == VDEC_HCODEC) {
2772 /* disable hcodec clock */
2773 hcodec_clock_off();
2774 }
2775
2776 spin_unlock_irqrestore(&lock, flags);
2777}
2778
2779bool vdec_on(enum vdec_type_e core)
2780{
2781 bool ret = false;
2782
2783 if (core == VDEC_1) {
2784 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100)
2785 ret = true;
2786 } else if (core == VDEC_2) {
2787 if (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100)
2788 ret = true;
2789 } else if (core == VDEC_HCODEC) {
2790 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000)
2791 ret = true;
2792 }
2793
2794 return ret;
2795}
2796#endif
2797
2798int vdec_source_changed(int format, int width, int height, int fps)
2799{
2800 /* todo: add level routines for clock adjustment per chips */
2801 int ret = -1;
2802 static int on_setting;
2803
2804 if (on_setting > 0)
2805 return ret;/*on changing clk,ignore this change*/
2806
2807 if (vdec_source_get(VDEC_1) == width * height * fps)
2808 return ret;
2809
2810
2811 on_setting = 1;
2812 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
5b851ff9 2813 pr_debug("vdec1 video changed to %d x %d %d fps clk->%dMHZ\n",
b9164398
NQ
2814 width, height, fps, vdec_clk_get(VDEC_1));
2815 on_setting = 0;
2816 return ret;
2817
2818}
2819EXPORT_SYMBOL(vdec_source_changed);
2820
87046a60 2821void vdec_reset_core(struct vdec_s *vdec)
2822{
2823 unsigned long flags;
2824 spin_lock_irqsave(&vdec_spin_lock, flags);
2825 codec_dmcbus_write(DMC_REQ_CTRL,
2826 codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 13)));
2827 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2828
2829 while (!(codec_dmcbus_read(DMC_CHAN_STS)
2830 & (1 << 13)))
2831 ;
2832 /*
2833 * 2: assist
2834 * 3: vld_reset
2835 * 4: vld_part_reset
2836 * 5: vfifo reset
2837 * 6: iqidct
2838 * 7: mc
2839 * 8: dblk
2840 * 9: pic_dc
2841 * 10: psc
2842 * 11: mcpu
2843 * 12: ccpu
2844 * 13: ddr
2845 * 14: afifo
2846 */
2847
2848 WRITE_VREG(DOS_SW_RESET0,
2849 (1<<3)|(1<<4)|(1<<5));
2850
2851 WRITE_VREG(DOS_SW_RESET0, 0);
2852
2853 spin_lock_irqsave(&vdec_spin_lock, flags);
2854 codec_dmcbus_write(DMC_REQ_CTRL,
2855 codec_dmcbus_read(DMC_REQ_CTRL) | (1 << 13));
2856 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2857}
2858EXPORT_SYMBOL(vdec_reset_core);
2859
fe96802b
NQ
2860void hevc_reset_core(struct vdec_s *vdec)
2861{
2862 unsigned long flags;
2863 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
2864 spin_lock_irqsave(&vdec_spin_lock, flags);
2865 codec_dmcbus_write(DMC_REQ_CTRL,
2866 codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 4)));
2867 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2868
2869 while (!(codec_dmcbus_read(DMC_CHAN_STS)
2870 & (1 << 4)))
2871 ;
2872
28e318df 2873 if (vdec == NULL || input_frame_based(vdec))
fe96802b
NQ
2874 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
2875
2876 /*
2877 * 2: assist
2878 * 3: parser
2879 * 4: parser_state
2880 * 8: dblk
2881 * 11:mcpu
2882 * 12:ccpu
2883 * 13:ddr
2884 * 14:iqit
2885 * 15:ipp
2886 * 17:qdct
2887 * 18:mpred
2888 * 19:sao
2889 * 24:hevc_afifo
2890 */
2891 WRITE_VREG(DOS_SW_RESET3,
2892 (1<<3)|(1<<4)|(1<<8)|(1<<11)|
2893 (1<<12)|(1<<13)|(1<<14)|(1<<15)|
2894 (1<<17)|(1<<18)|(1<<19)|(1<<24));
2895
2896 WRITE_VREG(DOS_SW_RESET3, 0);
2897
2898
2899 spin_lock_irqsave(&vdec_spin_lock, flags);
2900 codec_dmcbus_write(DMC_REQ_CTRL,
2901 codec_dmcbus_read(DMC_REQ_CTRL) | (1 << 4));
2902 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2903
2904}
2905EXPORT_SYMBOL(hevc_reset_core);
2906
b9164398
NQ
2907int vdec2_source_changed(int format, int width, int height, int fps)
2908{
2909 int ret = -1;
2910 static int on_setting;
2911
2912 if (has_vdec2()) {
2913 /* todo: add level routines for clock adjustment per chips */
2914 if (on_setting != 0)
2915 return ret;/*on changing clk,ignore this change*/
2916
2917 if (vdec_source_get(VDEC_2) == width * height * fps)
2918 return ret;
2919
2920 on_setting = 1;
2921 ret = vdec_source_changed_for_clk_set(format,
2922 width, height, fps);
5b851ff9 2923 pr_debug("vdec2 video changed to %d x %d %d fps clk->%dMHZ\n",
b9164398
NQ
2924 width, height, fps, vdec_clk_get(VDEC_2));
2925 on_setting = 0;
2926 return ret;
2927 }
2928 return 0;
2929}
2930EXPORT_SYMBOL(vdec2_source_changed);
2931
2932int hevc_source_changed(int format, int width, int height, int fps)
2933{
2934 /* todo: add level routines for clock adjustment per chips */
2935 int ret = -1;
2936 static int on_setting;
2937
2938 if (on_setting != 0)
2939 return ret;/*on changing clk,ignore this change*/
2940
2941 if (vdec_source_get(VDEC_HEVC) == width * height * fps)
2942 return ret;
2943
2944 on_setting = 1;
2945 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
5b851ff9 2946 pr_debug("hevc video changed to %d x %d %d fps clk->%dMHZ\n",
b9164398
NQ
2947 width, height, fps, vdec_clk_get(VDEC_HEVC));
2948 on_setting = 0;
2949
2950 return ret;
2951}
2952EXPORT_SYMBOL(hevc_source_changed);
2953
b9164398
NQ
2954static struct am_reg am_risc[] = {
2955 {"MSP", 0x300},
2956 {"MPSR", 0x301},
2957 {"MCPU_INT_BASE", 0x302},
2958 {"MCPU_INTR_GRP", 0x303},
2959 {"MCPU_INTR_MSK", 0x304},
2960 {"MCPU_INTR_REQ", 0x305},
2961 {"MPC-P", 0x306},
2962 {"MPC-D", 0x307},
2963 {"MPC_E", 0x308},
2964 {"MPC_W", 0x309},
2965 {"CSP", 0x320},
2966 {"CPSR", 0x321},
2967 {"CCPU_INT_BASE", 0x322},
2968 {"CCPU_INTR_GRP", 0x323},
2969 {"CCPU_INTR_MSK", 0x324},
2970 {"CCPU_INTR_REQ", 0x325},
2971 {"CPC-P", 0x326},
2972 {"CPC-D", 0x327},
2973 {"CPC_E", 0x328},
2974 {"CPC_W", 0x329},
2975 {"AV_SCRATCH_0", 0x09c0},
2976 {"AV_SCRATCH_1", 0x09c1},
2977 {"AV_SCRATCH_2", 0x09c2},
2978 {"AV_SCRATCH_3", 0x09c3},
2979 {"AV_SCRATCH_4", 0x09c4},
2980 {"AV_SCRATCH_5", 0x09c5},
2981 {"AV_SCRATCH_6", 0x09c6},
2982 {"AV_SCRATCH_7", 0x09c7},
2983 {"AV_SCRATCH_8", 0x09c8},
2984 {"AV_SCRATCH_9", 0x09c9},
2985 {"AV_SCRATCH_A", 0x09ca},
2986 {"AV_SCRATCH_B", 0x09cb},
2987 {"AV_SCRATCH_C", 0x09cc},
2988 {"AV_SCRATCH_D", 0x09cd},
2989 {"AV_SCRATCH_E", 0x09ce},
2990 {"AV_SCRATCH_F", 0x09cf},
2991 {"AV_SCRATCH_G", 0x09d0},
2992 {"AV_SCRATCH_H", 0x09d1},
2993 {"AV_SCRATCH_I", 0x09d2},
2994 {"AV_SCRATCH_J", 0x09d3},
2995 {"AV_SCRATCH_K", 0x09d4},
2996 {"AV_SCRATCH_L", 0x09d5},
2997 {"AV_SCRATCH_M", 0x09d6},
2998 {"AV_SCRATCH_N", 0x09d7},
2999};
3000
3001static ssize_t amrisc_regs_show(struct class *class,
3002 struct class_attribute *attr, char *buf)
3003{
3004 char *pbuf = buf;
3005 struct am_reg *regs = am_risc;
3006 int rsize = sizeof(am_risc) / sizeof(struct am_reg);
3007 int i;
e0614bf7 3008 unsigned int val;
b9164398
NQ
3009 ssize_t ret;
3010
63e810c0 3011 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
b9164398
NQ
3012 mutex_lock(&vdec_mutex);
3013 if (!vdec_on(VDEC_1)) {
3014 mutex_unlock(&vdec_mutex);
3015 pbuf += sprintf(pbuf, "amrisc is power off\n");
3016 ret = pbuf - buf;
3017 return ret;
3018 }
63e810c0 3019 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
b9164398
NQ
3020 /*TODO:M6 define */
3021 /*
3022 * switch_mod_gate_by_type(MOD_VDEC, 1);
3023 */
3024 amports_switch_gate("vdec", 1);
3025 }
3026 pbuf += sprintf(pbuf, "amrisc registers show:\n");
3027 for (i = 0; i < rsize; i++) {
3028 val = READ_VREG(regs[i].offset);
3029 pbuf += sprintf(pbuf, "%s(%#x)\t:%#x(%d)\n",
3030 regs[i].name, regs[i].offset, val, val);
3031 }
63e810c0 3032 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
b9164398 3033 mutex_unlock(&vdec_mutex);
63e810c0 3034 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
b9164398
NQ
3035 /*TODO:M6 define */
3036 /*
3037 * switch_mod_gate_by_type(MOD_VDEC, 0);
3038 */
3039 amports_switch_gate("vdec", 0);
3040 }
3041 ret = pbuf - buf;
3042 return ret;
3043}
3044
3045static ssize_t dump_trace_show(struct class *class,
3046 struct class_attribute *attr, char *buf)
3047{
3048 int i;
3049 char *pbuf = buf;
3050 ssize_t ret;
3051 u16 *trace_buf = kmalloc(debug_trace_num * 2, GFP_KERNEL);
3052
3053 if (!trace_buf) {
3054 pbuf += sprintf(pbuf, "No Memory bug\n");
3055 ret = pbuf - buf;
3056 return ret;
3057 }
63e810c0 3058 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
b9164398
NQ
3059 mutex_lock(&vdec_mutex);
3060 if (!vdec_on(VDEC_1)) {
3061 mutex_unlock(&vdec_mutex);
3062 kfree(trace_buf);
3063 pbuf += sprintf(pbuf, "amrisc is power off\n");
3064 ret = pbuf - buf;
3065 return ret;
3066 }
63e810c0 3067 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
b9164398
NQ
3068 /*TODO:M6 define */
3069 /*
3070 * switch_mod_gate_by_type(MOD_VDEC, 1);
3071 */
3072 amports_switch_gate("vdec", 1);
3073 }
3074 pr_info("dump trace steps:%d start\n", debug_trace_num);
3075 i = 0;
3076 while (i <= debug_trace_num - 16) {
3077 trace_buf[i] = READ_VREG(MPC_E);
3078 trace_buf[i + 1] = READ_VREG(MPC_E);
3079 trace_buf[i + 2] = READ_VREG(MPC_E);
3080 trace_buf[i + 3] = READ_VREG(MPC_E);
3081 trace_buf[i + 4] = READ_VREG(MPC_E);
3082 trace_buf[i + 5] = READ_VREG(MPC_E);
3083 trace_buf[i + 6] = READ_VREG(MPC_E);
3084 trace_buf[i + 7] = READ_VREG(MPC_E);
3085 trace_buf[i + 8] = READ_VREG(MPC_E);
3086 trace_buf[i + 9] = READ_VREG(MPC_E);
3087 trace_buf[i + 10] = READ_VREG(MPC_E);
3088 trace_buf[i + 11] = READ_VREG(MPC_E);
3089 trace_buf[i + 12] = READ_VREG(MPC_E);
3090 trace_buf[i + 13] = READ_VREG(MPC_E);
3091 trace_buf[i + 14] = READ_VREG(MPC_E);
3092 trace_buf[i + 15] = READ_VREG(MPC_E);
3093 i += 16;
3094 };
3095 pr_info("dump trace steps:%d finished\n", debug_trace_num);
63e810c0 3096 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
b9164398 3097 mutex_unlock(&vdec_mutex);
63e810c0 3098 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
b9164398
NQ
3099 /*TODO:M6 define */
3100 /*
3101 * switch_mod_gate_by_type(MOD_VDEC, 0);
3102 */
3103 amports_switch_gate("vdec", 0);
3104 }
3105 for (i = 0; i < debug_trace_num; i++) {
3106 if (i % 4 == 0) {
3107 if (i % 16 == 0)
3108 pbuf += sprintf(pbuf, "\n");
3109 else if (i % 8 == 0)
3110 pbuf += sprintf(pbuf, " ");
3111 else /* 4 */
3112 pbuf += sprintf(pbuf, " ");
3113 }
3114 pbuf += sprintf(pbuf, "%04x:", trace_buf[i]);
3115 }
3116 while (i < debug_trace_num)
3117 ;
3118 kfree(trace_buf);
3119 pbuf += sprintf(pbuf, "\n");
3120 ret = pbuf - buf;
3121 return ret;
3122}
3123
3124static ssize_t clock_level_show(struct class *class,
3125 struct class_attribute *attr, char *buf)
3126{
3127 char *pbuf = buf;
3128 size_t ret;
3129
3130 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_1));
3131
3132 if (has_vdec2())
3133 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_2));
3134
3135 if (has_hevc_vdec())
3136 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_HEVC));
3137
3138 ret = pbuf - buf;
3139 return ret;
3140}
3141
3142static ssize_t store_poweron_clock_level(struct class *class,
3143 struct class_attribute *attr,
3144 const char *buf, size_t size)
3145{
e0614bf7 3146 unsigned int val;
b9164398
NQ
3147 ssize_t ret;
3148
3149 /*ret = sscanf(buf, "%d", &val);*/
3150 ret = kstrtoint(buf, 0, &val);
3151
3152 if (ret != 0)
3153 return -EINVAL;
3154 poweron_clock_level = val;
3155 return size;
3156}
3157
3158static ssize_t show_poweron_clock_level(struct class *class,
3159 struct class_attribute *attr, char *buf)
3160{
3161 return sprintf(buf, "%d\n", poweron_clock_level);
3162}
3163
3164/*
e0614bf7
ZZ
3165 *if keep_vdec_mem == 1
3166 *always don't release
3167 *vdec 64 memory for fast play.
3168 */
b9164398
NQ
3169static ssize_t store_keep_vdec_mem(struct class *class,
3170 struct class_attribute *attr,
3171 const char *buf, size_t size)
3172{
e0614bf7 3173 unsigned int val;
b9164398
NQ
3174 ssize_t ret;
3175
3176 /*ret = sscanf(buf, "%d", &val);*/
3177 ret = kstrtoint(buf, 0, &val);
3178 if (ret != 0)
3179 return -EINVAL;
3180 keep_vdec_mem = val;
3181 return size;
3182}
3183
3184static ssize_t show_keep_vdec_mem(struct class *class,
3185 struct class_attribute *attr, char *buf)
3186{
3187 return sprintf(buf, "%d\n", keep_vdec_mem);
3188}
3189
a6c89e96
NQ
3190#ifdef VDEC_DEBUG_SUPPORT
3191static ssize_t store_debug(struct class *class,
3192 struct class_attribute *attr,
3193 const char *buf, size_t size)
3194{
3195 struct vdec_s *vdec;
3196 struct vdec_core_s *core = vdec_core;
3197 unsigned long flags;
3198
3199 unsigned id;
3200 unsigned val;
3201 ssize_t ret;
3202 char cbuf[32];
3203
3204 cbuf[0] = 0;
3205 ret = sscanf(buf, "%s %x %x", cbuf, &id, &val);
3206 /*pr_info(
3207 "%s(%s)=>ret %ld: %s, %x, %x\n",
3208 __func__, buf, ret, cbuf, id, val);*/
3209 if (strcmp(cbuf, "schedule") == 0) {
3210 pr_info("VDEC_DEBUG: force schedule\n");
3211 up(&core->sem);
3212 } else if (strcmp(cbuf, "power_off") == 0) {
3213 pr_info("VDEC_DEBUG: power off core %d\n", id);
3214 vdec_poweroff(id);
3215 } else if (strcmp(cbuf, "power_on") == 0) {
3216 pr_info("VDEC_DEBUG: power_on core %d\n", id);
3217 vdec_poweron(id);
3218 } else if (strcmp(cbuf, "wr") == 0) {
3219 pr_info("VDEC_DEBUG: WRITE_VREG(0x%x, 0x%x)\n",
3220 id, val);
3221 WRITE_VREG(id, val);
3222 } else if (strcmp(cbuf, "rd") == 0) {
3223 pr_info("VDEC_DEBUG: READ_VREG(0x%x) = 0x%x\n",
3224 id, READ_VREG(id));
3225 } else if (strcmp(cbuf, "read_hevc_clk_reg") == 0) {
3226 pr_info(
3227 "VDEC_DEBUG: HHI_VDEC4_CLK_CNTL = 0x%x, HHI_VDEC2_CLK_CNTL = 0x%x\n",
3228 READ_HHI_REG(HHI_VDEC4_CLK_CNTL),
3229 READ_HHI_REG(HHI_VDEC2_CLK_CNTL));
3230 }
3231
3232 flags = vdec_core_lock(vdec_core);
3233
3234 list_for_each_entry(vdec,
3235 &core->connected_vdec_list, list) {
3236 pr_info("vdec: status %d, id %d\n", vdec->status, vdec->id);
3237 if (((vdec->status == VDEC_STATUS_CONNECTED
3238 || vdec->status == VDEC_STATUS_ACTIVE)) &&
3239 (vdec->id == id)) {
3240 /*to add*/
3241 break;
3242 }
3243 }
3244 vdec_core_unlock(vdec_core, flags);
3245 return size;
3246}
3247
3248static ssize_t show_debug(struct class *class,
3249 struct class_attribute *attr, char *buf)
3250{
3251 char *pbuf = buf;
3252 struct vdec_s *vdec;
3253 struct vdec_core_s *core = vdec_core;
3254 unsigned long flags = vdec_core_lock(vdec_core);
1e37ecab 3255 u64 tmp;
a6c89e96
NQ
3256
3257 pbuf += sprintf(pbuf,
3258 "============== help:\n");
3259 pbuf += sprintf(pbuf,
3260 "'echo xxx > debug' usuage:\n");
3261 pbuf += sprintf(pbuf,
3262 "schedule - trigger schedule thread to run\n");
3263 pbuf += sprintf(pbuf,
3264 "power_off core_num - call vdec_poweroff(core_num)\n");
3265 pbuf += sprintf(pbuf,
3266 "power_on core_num - call vdec_poweron(core_num)\n");
3267 pbuf += sprintf(pbuf,
3268 "wr adr val - call WRITE_VREG(adr, val)\n");
3269 pbuf += sprintf(pbuf,
3270 "rd adr - call READ_VREG(adr)\n");
3271 pbuf += sprintf(pbuf,
3272 "read_hevc_clk_reg - read HHI register for hevc clk\n");
3273 pbuf += sprintf(pbuf,
3274 "===================\n");
3275
3276 pbuf += sprintf(pbuf,
3277 "name(core)\tschedule_count\trun_count\tinput_underrun\tdecbuf_not_ready\trun_time\n");
3278 list_for_each_entry(vdec,
3279 &core->connected_vdec_list, list) {
3280 enum vdec_type_e type;
976f3376
HZ
3281 if ((vdec->status == VDEC_STATUS_CONNECTED
3282 || vdec->status == VDEC_STATUS_ACTIVE)) {
a6c89e96
NQ
3283 for (type = VDEC_1; type < VDEC_MAX; type++) {
3284 if (vdec->core_mask & (1 << type)) {
3285 pbuf += sprintf(pbuf, "%s(%d):",
3286 vdec->vf_provider_name, type);
3287 pbuf += sprintf(pbuf, "\t%d",
3288 vdec->check_count[type]);
3289 pbuf += sprintf(pbuf, "\t%d",
3290 vdec->run_count[type]);
3291 pbuf += sprintf(pbuf, "\t%d",
3292 vdec->input_underrun_count[type]);
3293 pbuf += sprintf(pbuf, "\t%d",
3294 vdec->not_run_ready_count[type]);
1e37ecab
AX
3295 tmp = vdec->run_clk[type] * 100;
3296 do_div(tmp, vdec->total_clk[type]);
a6c89e96
NQ
3297 pbuf += sprintf(pbuf,
3298 "\t%d%%\n",
3299 vdec->total_clk[type] == 0 ? 0 :
1e37ecab 3300 (u32)tmp);
a6c89e96
NQ
3301 }
3302 }
976f3376 3303 }
a6c89e96
NQ
3304 }
3305
3306 vdec_core_unlock(vdec_core, flags);
3307 return pbuf - buf;
3308
3309}
3310#endif
b9164398
NQ
3311
3312/*irq num as same as .dts*/
3313/*
e0614bf7
ZZ
3314 * interrupts = <0 3 1
3315 * 0 23 1
3316 * 0 32 1
3317 * 0 43 1
3318 * 0 44 1
3319 * 0 45 1>;
3320 * interrupt-names = "vsync",
3321 * "demux",
3322 * "parser",
3323 * "mailbox_0",
3324 * "mailbox_1",
3325 * "mailbox_2";
3326 */
b9164398
NQ
3327s32 vdec_request_threaded_irq(enum vdec_irq_num num,
3328 irq_handler_t handler,
3329 irq_handler_t thread_fn,
3330 unsigned long irqflags,
3331 const char *devname, void *dev)
3332{
3333 s32 res_irq;
3334 s32 ret = 0;
3335
3336 if (num >= VDEC_IRQ_MAX) {
3337 pr_err("[%s] request irq error, irq num too big!", __func__);
3338 return -EINVAL;
3339 }
3340
3341 if (vdec_core->isr_context[num].irq < 0) {
3342 res_irq = platform_get_irq(
3343 vdec_core->vdec_core_platform_device, num);
3344 if (res_irq < 0) {
3345 pr_err("[%s] get irq error!", __func__);
3346 return -EINVAL;
3347 }
3348
3349 vdec_core->isr_context[num].irq = res_irq;
3350 vdec_core->isr_context[num].dev_isr = handler;
3351 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
3352 vdec_core->isr_context[num].dev_id = dev;
3353
3354 ret = request_threaded_irq(res_irq,
3355 vdec_isr,
3356 vdec_thread_isr,
3357 (thread_fn) ? IRQF_ONESHOT : irqflags,
3358 devname,
3359 &vdec_core->isr_context[num]);
3360
3361 if (ret) {
3362 vdec_core->isr_context[num].irq = -1;
3363 vdec_core->isr_context[num].dev_isr = NULL;
3364 vdec_core->isr_context[num].dev_threaded_isr = NULL;
3365 vdec_core->isr_context[num].dev_id = NULL;
3366
3367 pr_err("vdec irq register error for %s.\n", devname);
3368 return -EIO;
3369 }
3370 } else {
3371 vdec_core->isr_context[num].dev_isr = handler;
3372 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
3373 vdec_core->isr_context[num].dev_id = dev;
3374 }
3375
3376 return ret;
3377}
3378EXPORT_SYMBOL(vdec_request_threaded_irq);
3379
3380s32 vdec_request_irq(enum vdec_irq_num num, irq_handler_t handler,
3381 const char *devname, void *dev)
3382{
5b851ff9 3383 pr_debug("vdec_request_irq %p, %s\n", handler, devname);
b9164398
NQ
3384
3385 return vdec_request_threaded_irq(num,
3386 handler,
3387 NULL,/*no thread_fn*/
3388 IRQF_SHARED,
3389 devname,
3390 dev);
3391}
3392EXPORT_SYMBOL(vdec_request_irq);
3393
3394void vdec_free_irq(enum vdec_irq_num num, void *dev)
3395{
3396 if (num >= VDEC_IRQ_MAX) {
3397 pr_err("[%s] request irq error, irq num too big!", __func__);
3398 return;
3399 }
b9164398 3400 /*
e0614bf7 3401 *assume amrisc is stopped already and there is no mailbox interrupt
b9164398
NQ
3402 * when we reset pointers here.
3403 */
3404 vdec_core->isr_context[num].dev_isr = NULL;
3405 vdec_core->isr_context[num].dev_threaded_isr = NULL;
3406 vdec_core->isr_context[num].dev_id = NULL;
5f3fbfb7 3407 synchronize_irq(vdec_core->isr_context[num].irq);
b9164398
NQ
3408}
3409EXPORT_SYMBOL(vdec_free_irq);
3410
a6c89e96
NQ
3411struct vdec_s *vdec_get_default_vdec_for_userdata(void)
3412{
3413 struct vdec_s *vdec;
3414 struct vdec_s *ret_vdec;
3415 struct vdec_core_s *core = vdec_core;
3416 unsigned long flags;
3417 int id;
3418
3419 flags = vdec_core_lock(vdec_core);
3420
3421 id = 0x10000000;
3422 ret_vdec = NULL;
3423 if (!list_empty(&core->connected_vdec_list)) {
3424 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3425 if (vdec->id < id) {
3426 id = vdec->id;
3427 ret_vdec = vdec;
3428 }
3429 }
3430 }
3431
3432 vdec_core_unlock(vdec_core, flags);
3433
3434 return ret_vdec;
3435}
3436EXPORT_SYMBOL(vdec_get_default_vdec_for_userdata);
3437
3438int vdec_read_user_data(struct vdec_s *vdec,
3439 struct userdata_param_t *p_userdata_param)
3440{
3441 int ret = 0;
3442
3443 if (!vdec)
3444 vdec = vdec_get_default_vdec_for_userdata();
3445
3446 if (vdec) {
3447 if (vdec->user_data_read)
3448 ret = vdec->user_data_read(vdec, p_userdata_param);
3449 }
3450 return ret;
3451}
3452EXPORT_SYMBOL(vdec_read_user_data);
3453
3454int vdec_wakeup_userdata_poll(struct vdec_s *vdec)
3455{
b78f4cd9
PY
3456 if (vdec && vdec == vdec_get_default_vdec_for_userdata()) {
3457 if (vdec->wakeup_userdata_poll)
3458 vdec->wakeup_userdata_poll();
3459 }
a6c89e96
NQ
3460
3461 return 0;
3462}
3463EXPORT_SYMBOL(vdec_wakeup_userdata_poll);
3464
3465void vdec_reset_userdata_fifo(struct vdec_s *vdec, int bInit)
3466{
3467 if (!vdec)
3468 vdec = vdec_get_default_vdec_for_userdata();
3469
3470 if (vdec) {
3471 if (vdec->reset_userdata_fifo)
3472 vdec->reset_userdata_fifo(vdec, bInit);
3473 }
3474}
3475EXPORT_SYMBOL(vdec_reset_userdata_fifo);
3476
b9164398
NQ
3477static int dump_mode;
3478static ssize_t dump_risc_mem_store(struct class *class,
3479 struct class_attribute *attr,
3480 const char *buf, size_t size)/*set*/
3481{
e0614bf7 3482 unsigned int val;
b9164398
NQ
3483 ssize_t ret;
3484 char dump_mode_str[4] = "PRL";
3485
3486 /*ret = sscanf(buf, "%d", &val);*/
3487 ret = kstrtoint(buf, 0, &val);
3488
3489 if (ret != 0)
3490 return -EINVAL;
3491 dump_mode = val & 0x3;
3492 pr_info("set dump mode to %d,%c_mem\n",
3493 dump_mode, dump_mode_str[dump_mode]);
3494 return size;
3495}
3496static u32 read_amrisc_reg(int reg)
3497{
3498 WRITE_VREG(0x31b, reg);
3499 return READ_VREG(0x31c);
3500}
3501
3502static void dump_pmem(void)
3503{
3504 int i;
3505
3506 WRITE_VREG(0x301, 0x8000);
3507 WRITE_VREG(0x31d, 0);
3508 pr_info("start dump amrisc pmem of risc\n");
3509 for (i = 0; i < 0xfff; i++) {
3510 /*same as .o format*/
3511 pr_info("%08x // 0x%04x:\n", read_amrisc_reg(i), i);
3512 }
3513}
3514
3515static void dump_lmem(void)
3516{
3517 int i;
3518
3519 WRITE_VREG(0x301, 0x8000);
3520 WRITE_VREG(0x31d, 2);
3521 pr_info("start dump amrisc lmem\n");
3522 for (i = 0; i < 0x3ff; i++) {
3523 /*same as */
3524 pr_info("[%04x] = 0x%08x:\n", i, read_amrisc_reg(i));
3525 }
3526}
3527
3528static ssize_t dump_risc_mem_show(struct class *class,
3529 struct class_attribute *attr, char *buf)
3530{
3531 char *pbuf = buf;
3532 int ret;
3533
63e810c0 3534 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
b9164398
NQ
3535 mutex_lock(&vdec_mutex);
3536 if (!vdec_on(VDEC_1)) {
3537 mutex_unlock(&vdec_mutex);
3538 pbuf += sprintf(pbuf, "amrisc is power off\n");
3539 ret = pbuf - buf;
3540 return ret;
3541 }
63e810c0 3542 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
b9164398
NQ
3543 /*TODO:M6 define */
3544 /*
3545 * switch_mod_gate_by_type(MOD_VDEC, 1);
3546 */
3547 amports_switch_gate("vdec", 1);
3548 }
3549 /*start do**/
3550 switch (dump_mode) {
3551 case 0:
3552 dump_pmem();
3553 break;
3554 case 2:
3555 dump_lmem();
3556 break;
3557 default:
3558 break;
3559 }
3560
3561 /*done*/
63e810c0 3562 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
b9164398 3563 mutex_unlock(&vdec_mutex);
63e810c0 3564 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
b9164398
NQ
3565 /*TODO:M6 define */
3566 /*
3567 * switch_mod_gate_by_type(MOD_VDEC, 0);
3568 */
3569 amports_switch_gate("vdec", 0);
3570 }
3571 return sprintf(buf, "done\n");
3572}
3573
3574static ssize_t core_show(struct class *class, struct class_attribute *attr,
3575 char *buf)
3576{
3577 struct vdec_core_s *core = vdec_core;
3578 char *pbuf = buf;
3579
3580 if (list_empty(&core->connected_vdec_list))
3581 pbuf += sprintf(pbuf, "connected vdec list empty\n");
3582 else {
3583 struct vdec_s *vdec;
3584
a6c89e96
NQ
3585 pbuf += sprintf(pbuf,
3586 " Core: last_sched %p, sched_mask %lx\n",
3587 core->active_vdec,
3588 core->sched_mask);
3589
b9164398
NQ
3590 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3591 pbuf += sprintf(pbuf,
a6c89e96 3592 "\tvdec.%d (%p (%s)), status = %s,\ttype = %s, \tactive_mask = %lx\n",
fe96802b
NQ
3593 vdec->id,
3594 vdec,
3595 vdec_device_name[vdec->format * 2],
b9164398 3596 vdec_status_str(vdec),
a6c89e96
NQ
3597 vdec_type_str(vdec),
3598 vdec->active_mask);
b9164398
NQ
3599 }
3600 }
3601
3602 return pbuf - buf;
3603}
3604
fe96802b
NQ
3605static ssize_t vdec_status_show(struct class *class,
3606 struct class_attribute *attr, char *buf)
3607{
3608 char *pbuf = buf;
3609 struct vdec_s *vdec;
3610 struct vdec_info vs;
3611 unsigned char vdec_num = 0;
3612 struct vdec_core_s *core = vdec_core;
3613 unsigned long flags = vdec_core_lock(vdec_core);
3614
3615 if (list_empty(&core->connected_vdec_list)) {
3616 pbuf += sprintf(pbuf, "No vdec.\n");
3617 goto out;
3618 }
3619
3620 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3621 if (VDEC_STATUS_CONNECTED == vdec->status) {
3622 memset(&vs, 0, sizeof(vs));
3623 if (vdec_status(vdec, &vs)) {
3624 pbuf += sprintf(pbuf, "err.\n");
3625 goto out;
3626 }
3627 pbuf += sprintf(pbuf,
3628 "vdec channel %u statistics:\n",
3629 vdec_num);
3630 pbuf += sprintf(pbuf,
3631 "%13s : %s\n", "device name",
3632 vs.vdec_name);
3633 pbuf += sprintf(pbuf,
3634 "%13s : %u\n", "frame width",
3635 vs.frame_width);
3636 pbuf += sprintf(pbuf,
3637 "%13s : %u\n", "frame height",
3638 vs.frame_height);
3639 pbuf += sprintf(pbuf,
3640 "%13s : %u %s\n", "frame rate",
3641 vs.frame_rate, "fps");
3642 pbuf += sprintf(pbuf,
3643 "%13s : %u %s\n", "bit rate",
3644 vs.bit_rate / 1024 * 8, "kbps");
3645 pbuf += sprintf(pbuf,
3646 "%13s : %u\n", "status",
3647 vs.status);
3648 pbuf += sprintf(pbuf,
3649 "%13s : %u\n", "frame dur",
3650 vs.frame_dur);
3651 pbuf += sprintf(pbuf,
3652 "%13s : %u %s\n", "frame data",
3653 vs.frame_data / 1024, "KB");
3654 pbuf += sprintf(pbuf,
3655 "%13s : %u\n", "frame count",
3656 vs.frame_count);
3657 pbuf += sprintf(pbuf,
3658 "%13s : %u\n", "drop count",
3659 vs.drop_frame_count);
3660 pbuf += sprintf(pbuf,
3661 "%13s : %u\n", "fra err count",
3662 vs.error_frame_count);
3663 pbuf += sprintf(pbuf,
3664 "%13s : %u\n", "hw err count",
3665 vs.error_count);
3666 pbuf += sprintf(pbuf,
3667 "%13s : %llu %s\n\n", "total data",
3668 vs.total_data / 1024, "KB");
3669
3670 vdec_num++;
3671 }
3672 }
3673out:
3674 vdec_core_unlock(vdec_core, flags);
3675 return pbuf - buf;
3676}
3677
3678static ssize_t dump_vdec_blocks_show(struct class *class,
3679 struct class_attribute *attr, char *buf)
3680{
3681 struct vdec_core_s *core = vdec_core;
3682 char *pbuf = buf;
3683
3684 if (list_empty(&core->connected_vdec_list))
3685 pbuf += sprintf(pbuf, "connected vdec list empty\n");
3686 else {
3687 struct vdec_s *vdec;
3688 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3689 pbuf += vdec_input_dump_blocks(&vdec->input,
3690 pbuf, PAGE_SIZE - (pbuf - buf));
3691 }
3692 }
3693
3694 return pbuf - buf;
3695}
3696static ssize_t dump_vdec_chunks_show(struct class *class,
3697 struct class_attribute *attr, char *buf)
3698{
3699 struct vdec_core_s *core = vdec_core;
3700 char *pbuf = buf;
3701
3702 if (list_empty(&core->connected_vdec_list))
3703 pbuf += sprintf(pbuf, "connected vdec list empty\n");
3704 else {
3705 struct vdec_s *vdec;
3706 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3707 pbuf += vdec_input_dump_chunks(&vdec->input,
3708 pbuf, PAGE_SIZE - (pbuf - buf));
3709 }
3710 }
3711
3712 return pbuf - buf;
3713}
3714
fe96802b
NQ
3715static ssize_t dump_decoder_state_show(struct class *class,
3716 struct class_attribute *attr, char *buf)
3717{
3718 char *pbuf = buf;
3719 struct vdec_s *vdec;
3720 struct vdec_core_s *core = vdec_core;
3721 unsigned long flags = vdec_core_lock(vdec_core);
3722
3723 if (list_empty(&core->connected_vdec_list)) {
3724 pbuf += sprintf(pbuf, "No vdec.\n");
3725 } else {
3726 list_for_each_entry(vdec,
3727 &core->connected_vdec_list, list) {
3728 if ((vdec->status == VDEC_STATUS_CONNECTED
3729 || vdec->status == VDEC_STATUS_ACTIVE)
3730 && vdec->dump_state)
3731 vdec->dump_state(vdec);
3732 }
3733 }
3734 vdec_core_unlock(vdec_core, flags);
3735
3736 return pbuf - buf;
3737}
d481db31 3738
d481db31 3739
fe96802b 3740
b9164398
NQ
3741static struct class_attribute vdec_class_attrs[] = {
3742 __ATTR_RO(amrisc_regs),
3743 __ATTR_RO(dump_trace),
3744 __ATTR_RO(clock_level),
3745 __ATTR(poweron_clock_level, S_IRUGO | S_IWUSR | S_IWGRP,
3746 show_poweron_clock_level, store_poweron_clock_level),
3747 __ATTR(dump_risc_mem, S_IRUGO | S_IWUSR | S_IWGRP,
3748 dump_risc_mem_show, dump_risc_mem_store),
3749 __ATTR(keep_vdec_mem, S_IRUGO | S_IWUSR | S_IWGRP,
3750 show_keep_vdec_mem, store_keep_vdec_mem),
3751 __ATTR_RO(core),
fe96802b
NQ
3752 __ATTR_RO(vdec_status),
3753 __ATTR_RO(dump_vdec_blocks),
3754 __ATTR_RO(dump_vdec_chunks),
d481db31 3755 __ATTR_RO(dump_decoder_state),
a6c89e96
NQ
3756#ifdef VDEC_DEBUG_SUPPORT
3757 __ATTR(debug, S_IRUGO | S_IWUSR | S_IWGRP,
3758 show_debug, store_debug),
3759#endif
b9164398
NQ
3760 __ATTR_NULL
3761};
3762
3763static struct class vdec_class = {
3764 .name = "vdec",
3765 .class_attrs = vdec_class_attrs,
3766 };
3767
b9164398
NQ
3768struct device *get_vdec_device(void)
3769{
3770 return &vdec_core->vdec_core_platform_device->dev;
3771}
3772EXPORT_SYMBOL(get_vdec_device);
3773
3774static int vdec_probe(struct platform_device *pdev)
3775{
3776 s32 i, r;
3777
3778 vdec_core = (struct vdec_core_s *)devm_kzalloc(&pdev->dev,
3779 sizeof(struct vdec_core_s), GFP_KERNEL);
3780 if (vdec_core == NULL) {
3781 pr_err("vdec core allocation failed.\n");
3782 return -ENOMEM;
3783 }
3784
3785 atomic_set(&vdec_core->vdec_nr, 0);
3786 sema_init(&vdec_core->sem, 1);
3787
3788 r = class_register(&vdec_class);
3789 if (r) {
3790 pr_info("vdec class create fail.\n");
3791 return r;
3792 }
3793
3794 vdec_core->vdec_core_platform_device = pdev;
3795
3796 platform_set_drvdata(pdev, vdec_core);
3797
3798 for (i = 0; i < VDEC_IRQ_MAX; i++) {
3799 vdec_core->isr_context[i].index = i;
3800 vdec_core->isr_context[i].irq = -1;
3801 }
3802
a6c89e96
NQ
3803 r = vdec_request_threaded_irq(VDEC_IRQ_0, NULL, NULL,
3804 IRQF_ONESHOT, "vdec-0", NULL);
3805 if (r < 0) {
3806 pr_err("vdec interrupt request failed\n");
3807 return r;
3808 }
3809
b9164398
NQ
3810 r = vdec_request_threaded_irq(VDEC_IRQ_1, NULL, NULL,
3811 IRQF_ONESHOT, "vdec-1", NULL);
3812 if (r < 0) {
3813 pr_err("vdec interrupt request failed\n");
3814 return r;
3815 }
a6c89e96 3816#if 0
df841122 3817 if (get_cpu_major_id() >= MESON_CPU_MAJOR_ID_G12A) {
a6c89e96
NQ
3818 r = vdec_request_threaded_irq(VDEC_IRQ_HEVC_BACK, NULL, NULL,
3819 IRQF_ONESHOT, "vdec-hevc_back", NULL);
3820 if (r < 0) {
3821 pr_err("vdec interrupt request failed\n");
3822 return r;
3823 }
3824 }
3825#endif
b9164398
NQ
3826 r = of_reserved_mem_device_init(&pdev->dev);
3827 if (r == 0)
3828 pr_info("vdec_probe done\n");
3829
3830 vdec_core->cma_dev = &pdev->dev;
3831
63e810c0 3832 if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_M8) {
b9164398
NQ
3833 /* default to 250MHz */
3834 vdec_clock_hi_enable();
3835 }
3836
63e810c0 3837 if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXBB) {
b9164398
NQ
3838 /* set vdec dmc request to urgent */
3839 WRITE_DMCREG(DMC_AM5_CHAN_CTRL, 0x3f203cf);
3840 }
b9164398
NQ
3841 INIT_LIST_HEAD(&vdec_core->connected_vdec_list);
3842 spin_lock_init(&vdec_core->lock);
fe96802b 3843 ida_init(&vdec_core->ida);
b9164398
NQ
3844 vdec_core->thread = kthread_run(vdec_core_thread, vdec_core,
3845 "vdec-core");
3846
158de7c4
HZ
3847 vdec_core->vdec_core_wq = alloc_ordered_workqueue("%s",__WQ_LEGACY |
3848 WQ_MEM_RECLAIM |WQ_HIGHPRI/*high priority*/, "vdec-work");
3849 /*work queue priority lower than vdec-core.*/
b9164398
NQ
3850 return 0;
3851}
3852
3853static int vdec_remove(struct platform_device *pdev)
3854{
3855 int i;
3856
3857 for (i = 0; i < VDEC_IRQ_MAX; i++) {
3858 if (vdec_core->isr_context[i].irq >= 0) {
3859 free_irq(vdec_core->isr_context[i].irq,
3860 &vdec_core->isr_context[i]);
3861 vdec_core->isr_context[i].irq = -1;
3862 vdec_core->isr_context[i].dev_isr = NULL;
3863 vdec_core->isr_context[i].dev_threaded_isr = NULL;
3864 vdec_core->isr_context[i].dev_id = NULL;
3865 }
3866 }
3867
3868 kthread_stop(vdec_core->thread);
3869
fe96802b 3870 destroy_workqueue(vdec_core->vdec_core_wq);
b9164398
NQ
3871 class_unregister(&vdec_class);
3872
3873 return 0;
3874}
3875
3876static const struct of_device_id amlogic_vdec_dt_match[] = {
3877 {
3878 .compatible = "amlogic, vdec",
3879 },
3880 {},
3881};
3882
fe96802b 3883static struct mconfig vdec_configs[] = {
fe96802b
NQ
3884 MC_PU32("debug_trace_num", &debug_trace_num),
3885 MC_PI32("hevc_max_reset_count", &hevc_max_reset_count),
3886 MC_PU32("clk_config", &clk_config),
3887 MC_PI32("step_mode", &step_mode),
3888 MC_PI32("poweron_clock_level", &poweron_clock_level),
3889};
3890static struct mconfig_node vdec_node;
3891
b9164398
NQ
3892static struct platform_driver vdec_driver = {
3893 .probe = vdec_probe,
3894 .remove = vdec_remove,
3895 .driver = {
3896 .name = "vdec",
3897 .of_match_table = amlogic_vdec_dt_match,
3898 }
3899};
3900
3901int vdec_module_init(void)
3902{
3903 if (platform_driver_register(&vdec_driver)) {
3904 pr_info("failed to register vdec module\n");
3905 return -ENODEV;
3906 }
fe96802b
NQ
3907 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
3908 "vdec", vdec_configs, CONFIG_FOR_RW);
b9164398
NQ
3909 return 0;
3910}
3911EXPORT_SYMBOL(vdec_module_init);
3912
3913void vdec_module_exit(void)
3914{
3915 platform_driver_unregister(&vdec_driver);
3916}
3917EXPORT_SYMBOL(vdec_module_exit);
3918
3919#if 0
3920static int __init vdec_module_init(void)
3921{
3922 if (platform_driver_register(&vdec_driver)) {
3923 pr_info("failed to register vdec module\n");
3924 return -ENODEV;
3925 }
fe96802b
NQ
3926 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
3927 "vdec", vdec_configs, CONFIG_FOR_RW);
b9164398
NQ
3928 return 0;
3929}
3930
3931static void __exit vdec_module_exit(void)
3932{
3933 platform_driver_unregister(&vdec_driver);
3934}
3935#endif
3936
3937static int vdec_mem_device_init(struct reserved_mem *rmem, struct device *dev)
3938{
b9164398
NQ
3939 vdec_core->cma_dev = dev;
3940
3941 return 0;
3942}
3943
3944static const struct reserved_mem_ops rmem_vdec_ops = {
3945 .device_init = vdec_mem_device_init,
3946};
3947
3948static int __init vdec_mem_setup(struct reserved_mem *rmem)
3949{
3950 rmem->ops = &rmem_vdec_ops;
3951 pr_info("vdec: reserved mem setup\n");
3952
3953 return 0;
3954}
3955
3956RESERVEDMEM_OF_DECLARE(vdec, "amlogic, vdec-memory", vdec_mem_setup);
a6c89e96
NQ
3957/*
3958uint force_hevc_clock_cntl;
3959EXPORT_SYMBOL(force_hevc_clock_cntl);
3960
3961module_param(force_hevc_clock_cntl, uint, 0664);
3962*/
158de7c4 3963module_param(debug, uint, 0664);
b9164398
NQ
3964module_param(debug_trace_num, uint, 0664);
3965module_param(hevc_max_reset_count, int, 0664);
3966module_param(clk_config, uint, 0664);
3967module_param(step_mode, int, 0664);
a6c89e96 3968module_param(debugflags, int, 0664);
fe96802b 3969
b9164398
NQ
3970/*
3971*module_init(vdec_module_init);
3972*module_exit(vdec_module_exit);
3973*/
fe96802b
NQ
3974#define CREATE_TRACE_POINTS
3975#include "vdec_trace.h"
b9164398
NQ
3976MODULE_DESCRIPTION("AMLOGIC vdec driver");
3977MODULE_LICENSE("GPL");
3978MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");