decoder: fixed the issure of the h264 4k source can not playback.
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_media.git] / drivers / frame_provider / decoder / utils / vdec.c
CommitLineData
b9164398
NQ
1/*
2 * drivers/amlogic/media/frame_provider/decoder/utils/vdec.c
3 *
4 * Copyright (C) 2016 Amlogic, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16*/
5b851ff9 17#define DEBUG
b9164398
NQ
18#include <linux/kernel.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/kthread.h>
24#include <linux/platform_device.h>
25#include <linux/uaccess.h>
26#include <linux/semaphore.h>
27#include <linux/sched/rt.h>
28#include <linux/interrupt.h>
29#include <linux/amlogic/media/utils/vformat.h>
30#include <linux/amlogic/iomap.h>
31#include <linux/amlogic/media/canvas/canvas.h>
32#include <linux/amlogic/media/vfm/vframe.h>
33#include <linux/amlogic/media/vfm/vframe_provider.h>
34#include <linux/amlogic/media/vfm/vframe_receiver.h>
35#include <linux/amlogic/media/video_sink/ionvideo_ext.h>
36#include <linux/amlogic/media/vfm/vfm_ext.h>
37
38#include <linux/amlogic/media/utils/vdec_reg.h>
39#include "vdec.h"
fe96802b
NQ
40#include "vdec_trace.h"
41#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
42#include "vdec_profile.h"
43#endif
44#include <linux/of.h>
45#include <linux/of_fdt.h>
46#include <linux/libfdt_env.h>
47#include <linux/of_reserved_mem.h>
48#include <linux/dma-contiguous.h>
49#include <linux/cma.h>
50#include <linux/module.h>
51#include <linux/slab.h>
52#include <linux/dma-mapping.h>
53#include <linux/dma-contiguous.h>
54#include "../../../stream_input/amports/amports_priv.h"
55
56#include <linux/amlogic/media/utils/amports_config.h>
57#include "../utils/amvdec.h"
58#include "vdec_input.h"
59
60#include "../../../common/media_clock/clk/clk.h"
61#include <linux/reset.h>
fe96802b 62#include <linux/amlogic/cpu_version.h>
b9164398
NQ
63#include <linux/amlogic/media/codec_mm/codec_mm.h>
64#include <linux/amlogic/media/video_sink/video_keeper.h>
fe96802b
NQ
65#include <linux/amlogic/media/codec_mm/configs.h>
66#include <linux/amlogic/media/frame_sync/ptsserv.h>
67#include "secprot.h"
b9164398
NQ
68
69static DEFINE_MUTEX(vdec_mutex);
70
71#define MC_SIZE (4096 * 4)
72#define CMA_ALLOC_SIZE SZ_64M
73#define MEM_NAME "vdec_prealloc"
74static int inited_vcodec_num;
75static int poweron_clock_level;
76static int keep_vdec_mem;
77static unsigned int debug_trace_num = 16 * 20;
78static int step_mode;
79static unsigned int clk_config;
8247f369 80
3f4a083c
HZ
81/*
82&1: sched_priority to MAX_RT_PRIO -1.
83&2: always reload firmware.
84*/
85static unsigned int debug;
86
b9164398 87static int hevc_max_reset_count;
fe96802b
NQ
88#define MAX_INSTANCE_MUN 9
89
28e318df 90static int no_powerdown;
b9164398
NQ
91static DEFINE_SPINLOCK(vdec_spin_lock);
92
93#define HEVC_TEST_LIMIT 100
94#define GXBB_REV_A_MINOR 0xA
95
96struct am_reg {
97 char *name;
98 int offset;
99};
100
101struct vdec_isr_context_s {
102 int index;
103 int irq;
104 irq_handler_t dev_isr;
105 irq_handler_t dev_threaded_isr;
106 void *dev_id;
107};
108
109struct vdec_core_s {
110 struct list_head connected_vdec_list;
111 spinlock_t lock;
fe96802b 112 struct ida ida;
b9164398
NQ
113 atomic_t vdec_nr;
114 struct vdec_s *vfm_vdec;
115 struct vdec_s *active_vdec;
fe96802b 116 struct vdec_s *hint_fr_vdec;
b9164398
NQ
117 struct platform_device *vdec_core_platform_device;
118 struct device *cma_dev;
b9164398
NQ
119 struct semaphore sem;
120 struct task_struct *thread;
fe96802b 121 struct workqueue_struct *vdec_core_wq;
b9164398
NQ
122
123 struct vdec_isr_context_s isr_context[VDEC_IRQ_MAX];
124 int power_ref_count[VDEC_MAX];
3f4a083c 125 void *last_vdec;
b9164398
NQ
126};
127
128static struct vdec_core_s *vdec_core;
129
fe96802b
NQ
130static const char * const vdec_status_string[] = {
131 "VDEC_STATUS_UNINITIALIZED",
132 "VDEC_STATUS_DISCONNECTED",
133 "VDEC_STATUS_CONNECTED",
134 "VDEC_STATUS_ACTIVE"
135};
136
137static int debugflags;
138
139int vdec_get_debug_flags(void)
140{
141 return debugflags;
142}
143EXPORT_SYMBOL(vdec_get_debug_flags);
144
145unsigned char is_mult_inc(unsigned int type)
146{
147 unsigned char ret = 0;
148 if (vdec_get_debug_flags() & 0xf000)
149 ret = (vdec_get_debug_flags() & 0x1000)
150 ? 1 : 0;
151 else if (type & PORT_TYPE_DECODER_SCHED)
152 ret = 1;
153 return ret;
154}
155EXPORT_SYMBOL(is_mult_inc);
156
b9164398
NQ
157unsigned long vdec_core_lock(struct vdec_core_s *core)
158{
159 unsigned long flags;
160
161 spin_lock_irqsave(&core->lock, flags);
162
163 return flags;
164}
165
166void vdec_core_unlock(struct vdec_core_s *core, unsigned long flags)
167{
168 spin_unlock_irqrestore(&core->lock, flags);
169}
170
171static int get_canvas(unsigned int index, unsigned int base)
172{
173 int start;
174 int canvas_index = index * base;
175
176 if ((base > 4) || (base == 0))
177 return -1;
178
179 if ((AMVDEC_CANVAS_START_INDEX + canvas_index + base - 1)
180 <= AMVDEC_CANVAS_MAX1) {
181 start = AMVDEC_CANVAS_START_INDEX + base * index;
182 } else {
183 canvas_index -= (AMVDEC_CANVAS_MAX1 -
184 AMVDEC_CANVAS_START_INDEX + 1) / base * base;
185 if (canvas_index <= AMVDEC_CANVAS_MAX2)
186 start = canvas_index / base;
187 else
188 return -1;
189 }
190
191 if (base == 1) {
192 return start;
193 } else if (base == 2) {
194 return ((start + 1) << 16) | ((start + 1) << 8) | start;
195 } else if (base == 3) {
196 return ((start + 2) << 16) | ((start + 1) << 8) | start;
197 } else if (base == 4) {
198 return (((start + 3) << 24) | (start + 2) << 16) |
199 ((start + 1) << 8) | start;
200 }
201
202 return -1;
203}
204
205
fe96802b 206int vdec_status(struct vdec_s *vdec, struct vdec_info *vstatus)
b9164398
NQ
207{
208 if (vdec->dec_status)
209 return vdec->dec_status(vdec, vstatus);
210
211 return -1;
212}
213EXPORT_SYMBOL(vdec_status);
214
215int vdec_set_trickmode(struct vdec_s *vdec, unsigned long trickmode)
216{
217 int r;
218
219 if (vdec->set_trickmode) {
220 r = vdec->set_trickmode(vdec, trickmode);
221
222 if ((r == 0) && (vdec->slave) && (vdec->slave->set_trickmode))
223 r = vdec->slave->set_trickmode(vdec->slave,
224 trickmode);
225 }
226
227 return -1;
228}
229EXPORT_SYMBOL(vdec_set_trickmode);
230
d481db31
NQ
231int vdec_set_isreset(struct vdec_s *vdec, int isreset)
232{
233 vdec->is_reset = isreset;
234 pr_info("is_reset=%d\n", isreset);
235 if (vdec->set_isreset)
236 return vdec->set_isreset(vdec, isreset);
237 return 0;
238}
239EXPORT_SYMBOL(vdec_set_isreset);
240
28e318df
NQ
241int vdec_set_dv_metawithel(struct vdec_s *vdec, int isdvmetawithel)
242{
243 vdec->dolby_meta_with_el = isdvmetawithel;
244 pr_info("isdvmetawithel=%d\n", isdvmetawithel);
245 return 0;
246}
c23e8aee 247EXPORT_SYMBOL(vdec_set_dv_metawithel);
28e318df
NQ
248
249void vdec_set_no_powerdown(int flag)
250{
251 no_powerdown = flag;
252 pr_info("no_powerdown=%d\n", no_powerdown);
253 return;
254}
c23e8aee 255EXPORT_SYMBOL(vdec_set_no_powerdown);
28e318df 256
fe96802b
NQ
257void vdec_count_info(struct vdec_info *vs, unsigned int err,
258 unsigned int offset)
259{
260 if (err)
261 vs->error_frame_count++;
262 if (offset) {
263 if (0 == vs->frame_count) {
264 vs->offset = 0;
265 vs->samp_cnt = 0;
266 }
267 vs->frame_data = offset > vs->total_data ?
268 offset - vs->total_data : vs->total_data - offset;
269 vs->total_data = offset;
270 if (vs->samp_cnt < 96000 * 2) { /* 2s */
271 if (0 == vs->samp_cnt)
272 vs->offset = offset;
273 vs->samp_cnt += vs->frame_dur;
274 } else {
275 vs->bit_rate = (offset - vs->offset) / 2;
276 /*pr_info("bitrate : %u\n",vs->bit_rate);*/
277 vs->samp_cnt = 0;
278 }
279 vs->frame_count++;
280 }
281 /*pr_info("size : %u, offset : %u, dur : %u, cnt : %u\n",
282 vs->offset,offset,vs->frame_dur,vs->samp_cnt);*/
283 return;
284}
285EXPORT_SYMBOL(vdec_count_info);
c23e8aee
HZ
286int vdec_is_support_4k(void)
287{
288 //return !is_meson_gxl_package_805X();
289 return 1;
290}
291EXPORT_SYMBOL(vdec_is_support_4k);
fe96802b 292
b9164398
NQ
293/*
294* clk_config:
295 *0:default
296 *1:no gp0_pll;
297 *2:always used gp0_pll;
298 *>=10:fixed n M clk;
299 *== 100 , 100M clks;
300*/
301unsigned int get_vdec_clk_config_settings(void)
302{
303 return clk_config;
304}
305void update_vdec_clk_config_settings(unsigned int config)
306{
307 clk_config = config;
308}
309EXPORT_SYMBOL(update_vdec_clk_config_settings);
310
311static bool hevc_workaround_needed(void)
312{
313 return (get_cpu_type() == MESON_CPU_MAJOR_ID_GXBB) &&
314 (get_meson_cpu_version(MESON_CPU_VERSION_LVL_MINOR)
315 == GXBB_REV_A_MINOR);
316}
317
318struct device *get_codec_cma_device(void)
319{
320 return vdec_core->cma_dev;
321}
322
fe96802b 323#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
324static const char * const vdec_device_name[] = {
325 "amvdec_mpeg12", "ammvdec_mpeg12",
326 "amvdec_mpeg4", "ammvdec_mpeg4",
327 "amvdec_h264", "ammvdec_h264",
328 "amvdec_mjpeg", "ammvdec_mjpeg",
329 "amvdec_real", "ammvdec_real",
330 "amjpegdec", "ammjpegdec",
331 "amvdec_vc1", "ammvdec_vc1",
332 "amvdec_avs", "ammvdec_avs",
333 "amvdec_yuv", "ammvdec_yuv",
334 "amvdec_h264mvc", "ammvdec_h264mvc",
335 "amvdec_h264_4k2k", "ammvdec_h264_4k2k",
336 "amvdec_h265", "ammvdec_h265",
337 "amvenc_avc", "amvenc_avc",
338 "jpegenc", "jpegenc",
339 "amvdec_vp9", "ammvdec_vp9"
340};
341
b9164398
NQ
342
343#else
344
345static const char * const vdec_device_name[] = {
346 "amvdec_mpeg12",
347 "amvdec_mpeg4",
348 "amvdec_h264",
349 "amvdec_mjpeg",
350 "amvdec_real",
351 "amjpegdec",
352 "amvdec_vc1",
353 "amvdec_avs",
354 "amvdec_yuv",
355 "amvdec_h264mvc",
356 "amvdec_h264_4k2k",
357 "amvdec_h265",
358 "amvenc_avc",
359 "jpegenc",
360 "amvdec_vp9"
361};
362
b9164398
NQ
363#endif
364
365int vdec_set_decinfo(struct vdec_s *vdec, struct dec_sysinfo *p)
366{
367 if (copy_from_user((void *)&vdec->sys_info_store, (void *)p,
368 sizeof(struct dec_sysinfo)))
369 return -EFAULT;
370
371 return 0;
372}
373EXPORT_SYMBOL(vdec_set_decinfo);
374
375/* construct vdec strcture */
376struct vdec_s *vdec_create(struct stream_port_s *port,
377 struct vdec_s *master)
378{
379 struct vdec_s *vdec;
380 int type = VDEC_TYPE_SINGLE;
fe96802b
NQ
381 int id;
382 if (is_mult_inc(port->type))
b9164398
NQ
383 type = (port->type & PORT_TYPE_FRAME) ?
384 VDEC_TYPE_FRAME_BLOCK :
385 VDEC_TYPE_STREAM_PARSER;
386
fe96802b
NQ
387 id = ida_simple_get(&vdec_core->ida,
388 0, MAX_INSTANCE_MUN, GFP_KERNEL);
389 if (id < 0) {
390 pr_info("vdec_create request id failed!ret =%d\n", id);
391 return NULL;
392 }
b9164398
NQ
393 vdec = vzalloc(sizeof(struct vdec_s));
394
395 /* TBD */
396 if (vdec) {
397 vdec->magic = 0x43454456;
fe96802b 398 vdec->id = -1;
b9164398
NQ
399 vdec->type = type;
400 vdec->port = port;
401 vdec->sys_info = &vdec->sys_info_store;
402
403 INIT_LIST_HEAD(&vdec->list);
404
b9164398 405 atomic_inc(&vdec_core->vdec_nr);
fe96802b
NQ
406 vdec->id = id;
407 vdec_input_init(&vdec->input, vdec);
b9164398
NQ
408 if (master) {
409 vdec->master = master;
410 master->slave = vdec;
411 master->sched = 1;
412 }
413 }
414
5b851ff9 415 pr_debug("vdec_create instance %p, total %d\n", vdec,
b9164398
NQ
416 atomic_read(&vdec_core->vdec_nr));
417
fe96802b
NQ
418 //trace_vdec_create(vdec); /*DEBUG_TMP*/
419
b9164398
NQ
420 return vdec;
421}
422EXPORT_SYMBOL(vdec_create);
423
424int vdec_set_format(struct vdec_s *vdec, int format)
425{
426 vdec->format = format;
fe96802b 427 vdec->port_flag |= PORT_FLAG_VFORMAT;
b9164398 428
fe96802b 429 if (vdec->slave) {
b9164398 430 vdec->slave->format = format;
fe96802b
NQ
431 vdec->slave->port_flag |= PORT_FLAG_VFORMAT;
432 }
433
434 //trace_vdec_set_format(vdec, format);/*DEBUG_TMP*/
b9164398
NQ
435
436 return 0;
437}
438EXPORT_SYMBOL(vdec_set_format);
439
440int vdec_set_pts(struct vdec_s *vdec, u32 pts)
441{
442 vdec->pts = pts;
fe96802b 443 vdec->pts64 = div64_u64((u64)pts * 100, 9);
b9164398 444 vdec->pts_valid = true;
fe96802b 445 //trace_vdec_set_pts(vdec, (u64)pts);/*DEBUG_TMP*/
b9164398
NQ
446 return 0;
447}
448EXPORT_SYMBOL(vdec_set_pts);
449
450int vdec_set_pts64(struct vdec_s *vdec, u64 pts64)
451{
452 vdec->pts64 = pts64;
fe96802b 453 vdec->pts = (u32)div64_u64(pts64 * 9, 100);
b9164398 454 vdec->pts_valid = true;
fe96802b
NQ
455
456 //trace_vdec_set_pts64(vdec, pts64);/*DEBUG_TMP*/
b9164398
NQ
457 return 0;
458}
459EXPORT_SYMBOL(vdec_set_pts64);
460
461void vdec_set_status(struct vdec_s *vdec, int status)
462{
fe96802b 463 //trace_vdec_set_status(vdec, status);/*DEBUG_TMP*/
b9164398
NQ
464 vdec->status = status;
465}
466EXPORT_SYMBOL(vdec_set_status);
467
468void vdec_set_next_status(struct vdec_s *vdec, int status)
469{
fe96802b 470 //trace_vdec_set_next_status(vdec, status);/*DEBUG_TMP*/
b9164398
NQ
471 vdec->next_status = status;
472}
473EXPORT_SYMBOL(vdec_set_next_status);
474
475int vdec_set_video_path(struct vdec_s *vdec, int video_path)
476{
477 vdec->frame_base_video_path = video_path;
478 return 0;
479}
480EXPORT_SYMBOL(vdec_set_video_path);
481
fe96802b
NQ
482int vdec_set_receive_id(struct vdec_s *vdec, int receive_id)
483{
484 vdec->vf_receiver_inst = receive_id;
485 return 0;
486}
487EXPORT_SYMBOL(vdec_set_receive_id);
488
b9164398
NQ
489/* add frame data to input chain */
490int vdec_write_vframe(struct vdec_s *vdec, const char *buf, size_t count)
491{
492 return vdec_input_add_frame(&vdec->input, buf, count);
493}
494EXPORT_SYMBOL(vdec_write_vframe);
495
fe96802b
NQ
496/* add a work queue thread for vdec*/
497void vdec_schedule_work(struct work_struct *work)
498{
499 if (vdec_core->vdec_core_wq)
500 queue_work(vdec_core->vdec_core_wq, work);
501 else
502 schedule_work(work);
503}
504EXPORT_SYMBOL(vdec_schedule_work);
505
506static struct vdec_s *vdec_get_associate(struct vdec_s *vdec)
507{
508 if (vdec->master)
509 return vdec->master;
510 else if (vdec->slave)
511 return vdec->slave;
512 return NULL;
513}
514
515static void vdec_sync_input_read(struct vdec_s *vdec)
516{
517 if (!vdec_stream_based(vdec))
518 return;
519
520 if (vdec_dual(vdec)) {
521 u32 me, other;
522 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
523 me = READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
524 other =
525 vdec_get_associate(vdec)->input.stream_cookie;
526 if (me > other)
527 return;
528 else if (me == other) {
529 me = READ_VREG(VLD_MEM_VIFIFO_RP);
530 other =
531 vdec_get_associate(vdec)->input.swap_rp;
532 if (me > other) {
533 WRITE_PARSER_REG(PARSER_VIDEO_RP,
534 vdec_get_associate(vdec)->
535 input.swap_rp);
536 return;
537 }
538 }
539 WRITE_PARSER_REG(PARSER_VIDEO_RP,
540 READ_VREG(VLD_MEM_VIFIFO_RP));
541 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
542 me = READ_VREG(HEVC_SHIFT_BYTE_COUNT);
543 if (((me & 0x80000000) == 0) &&
544 (vdec->input.streaming_rp & 0x80000000))
545 me += 1ULL << 32;
546 other = vdec_get_associate(vdec)->input.streaming_rp;
547 if (me > other) {
548 WRITE_PARSER_REG(PARSER_VIDEO_RP,
549 vdec_get_associate(vdec)->
550 input.swap_rp);
551 return;
552 }
553
554 WRITE_PARSER_REG(PARSER_VIDEO_RP,
555 READ_VREG(HEVC_STREAM_RD_PTR));
556 }
557 } else if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
558 WRITE_PARSER_REG(PARSER_VIDEO_RP,
559 READ_VREG(VLD_MEM_VIFIFO_RP));
560 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
561 WRITE_PARSER_REG(PARSER_VIDEO_RP,
562 READ_VREG(HEVC_STREAM_RD_PTR));
563 }
564}
565
566static void vdec_sync_input_write(struct vdec_s *vdec)
567{
568 if (!vdec_stream_based(vdec))
569 return;
570
571 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
572 WRITE_VREG(VLD_MEM_VIFIFO_WP,
573 READ_PARSER_REG(PARSER_VIDEO_WP));
574 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
575 WRITE_VREG(HEVC_STREAM_WR_PTR,
576 READ_PARSER_REG(PARSER_VIDEO_WP));
577 }
578}
579
b9164398
NQ
580/*
581*get next frame from input chain
582*/
583/*
584*THE VLD_FIFO is 512 bytes and Video buffer level
585 * empty interrupt is set to 0x80 bytes threshold
586 */
587#define VLD_PADDING_SIZE 1024
588#define HEVC_PADDING_SIZE (1024*16)
b9164398
NQ
589int vdec_prepare_input(struct vdec_s *vdec, struct vframe_chunk_s **p)
590{
fe96802b 591 struct vdec_input_s *input = &vdec->input;
b9164398
NQ
592 struct vframe_chunk_s *chunk = NULL;
593 struct vframe_block_list_s *block = NULL;
594 int dummy;
595
596 /* full reset to HW input */
597 if (input->target == VDEC_INPUT_TARGET_VLD) {
598 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
599
600 /* reset VLD fifo for all vdec */
601 WRITE_VREG(DOS_SW_RESET0, (1<<5) | (1<<4) | (1<<3));
602 WRITE_VREG(DOS_SW_RESET0, 0);
603
fe96802b 604 dummy = READ_RESET_REG(RESET0_REGISTER);
b9164398
NQ
605 WRITE_VREG(POWER_CTL_VLD, 1 << 4);
606 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
607#if 0
608 /*move to driver*/
609 if (input_frame_based(input))
610 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
611
612 /*
613 * 2: assist
614 * 3: parser
615 * 4: parser_state
616 * 8: dblk
617 * 11:mcpu
618 * 12:ccpu
619 * 13:ddr
620 * 14:iqit
621 * 15:ipp
622 * 17:qdct
623 * 18:mpred
624 * 19:sao
625 * 24:hevc_afifo
626 */
627 WRITE_VREG(DOS_SW_RESET3,
628 (1<<3)|(1<<4)|(1<<8)|(1<<11)|(1<<12)|(1<<14)|(1<<15)|
629 (1<<17)|(1<<18)|(1<<19));
630 WRITE_VREG(DOS_SW_RESET3, 0);
631#endif
632 }
633
634 /*
635 *setup HW decoder input buffer (VLD context)
636 * based on input->type and input->target
637 */
638 if (input_frame_based(input)) {
639 chunk = vdec_input_next_chunk(&vdec->input);
640
641 if (chunk == NULL) {
642 *p = NULL;
643 return -1;
644 }
645
646 block = chunk->block;
647
648 if (input->target == VDEC_INPUT_TARGET_VLD) {
649 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR, block->start);
650 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR, block->start +
651 block->size - 8);
652 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
653 round_down(block->start + chunk->offset,
fe96802b 654 VDEC_FIFO_ALIGN));
b9164398
NQ
655
656 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
657 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
658
659 /* set to manual mode */
660 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
661 WRITE_VREG(VLD_MEM_VIFIFO_RP,
662 round_down(block->start + chunk->offset,
fe96802b 663 VDEC_FIFO_ALIGN));
b9164398
NQ
664 dummy = chunk->offset + chunk->size +
665 VLD_PADDING_SIZE;
666 if (dummy >= block->size)
667 dummy -= block->size;
668 WRITE_VREG(VLD_MEM_VIFIFO_WP,
fe96802b
NQ
669 round_down(block->start + dummy,
670 VDEC_FIFO_ALIGN));
b9164398
NQ
671
672 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 3);
673 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
674
675 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
676 (0x11 << 16) | (1<<10) | (7<<3));
677
678 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
679 WRITE_VREG(HEVC_STREAM_START_ADDR, block->start);
680 WRITE_VREG(HEVC_STREAM_END_ADDR, block->start +
681 block->size);
682 WRITE_VREG(HEVC_STREAM_RD_PTR, block->start +
683 chunk->offset);
684 dummy = chunk->offset + chunk->size +
685 HEVC_PADDING_SIZE;
686 if (dummy >= block->size)
687 dummy -= block->size;
688 WRITE_VREG(HEVC_STREAM_WR_PTR,
fe96802b
NQ
689 round_down(block->start + dummy,
690 VDEC_FIFO_ALIGN));
b9164398
NQ
691
692 /* set endian */
693 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
694 }
695
696 *p = chunk;
697 return chunk->size;
698
699 } else {
fe96802b 700 /* stream based */
b9164398
NQ
701 u32 rp = 0, wp = 0, fifo_len = 0;
702 int size;
fe96802b
NQ
703 bool swap_valid = input->swap_valid;
704 unsigned long swap_page_phys = input->swap_page_phys;
705
706 if (vdec_dual(vdec) &&
707 ((vdec->flag & VDEC_FLAG_SELF_INPUT_CONTEXT) == 0)) {
708 /* keep using previous input context */
709 struct vdec_s *master = (vdec->slave) ?
710 vdec : vdec->master;
711 if (master->input.last_swap_slave) {
712 swap_valid = master->slave->input.swap_valid;
713 swap_page_phys =
714 master->slave->input.swap_page_phys;
715 } else {
716 swap_valid = master->input.swap_valid;
717 swap_page_phys = master->input.swap_page_phys;
718 }
719 }
720
721 if (swap_valid) {
b9164398 722 if (input->target == VDEC_INPUT_TARGET_VLD) {
fe96802b
NQ
723 if (vdec->format == VFORMAT_H264)
724 SET_VREG_MASK(POWER_CTL_VLD,
725 (1 << 9));
726
b9164398
NQ
727 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
728
729 /* restore read side */
730 WRITE_VREG(VLD_MEM_SWAP_ADDR,
fe96802b 731 swap_page_phys);
b9164398
NQ
732 WRITE_VREG(VLD_MEM_SWAP_CTL, 1);
733
734 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
735 ;
736 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
737
738 /* restore wrap count */
739 WRITE_VREG(VLD_MEM_VIFIFO_WRAP_COUNT,
740 input->stream_cookie);
741
742 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
743 fifo_len = READ_VREG(VLD_MEM_VIFIFO_LEVEL);
744
745 /* enable */
746 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
747 (0x11 << 16) | (1<<10));
748
fe96802b
NQ
749 /* sync with front end */
750 vdec_sync_input_read(vdec);
751 vdec_sync_input_write(vdec);
b9164398
NQ
752
753 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
754 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
755 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
756
757 /* restore read side */
758 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
fe96802b 759 swap_page_phys);
b9164398
NQ
760 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 1);
761
762 while (READ_VREG(HEVC_STREAM_SWAP_CTRL)
763 & (1<<7))
764 ;
765 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
766
767 /* restore stream offset */
768 WRITE_VREG(HEVC_SHIFT_BYTE_COUNT,
769 input->stream_cookie);
770
771 rp = READ_VREG(HEVC_STREAM_RD_PTR);
772 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
773 >> 16) & 0x7f;
774
775
776 /* enable */
777
fe96802b
NQ
778 /* sync with front end */
779 vdec_sync_input_read(vdec);
780 vdec_sync_input_write(vdec);
b9164398
NQ
781
782 wp = READ_VREG(HEVC_STREAM_WR_PTR);
fe96802b
NQ
783
784 /*pr_info("vdec: restore context\r\n");*/
b9164398
NQ
785 }
786
787 } else {
788 if (input->target == VDEC_INPUT_TARGET_VLD) {
789 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR,
790 input->start);
791 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR,
792 input->start + input->size - 8);
793 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
794 input->start);
795
796 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
797 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
798
799 /* set to manual mode */
800 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
801 WRITE_VREG(VLD_MEM_VIFIFO_RP, input->start);
802 WRITE_VREG(VLD_MEM_VIFIFO_WP,
fe96802b 803 READ_PARSER_REG(PARSER_VIDEO_WP));
b9164398
NQ
804
805 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
806
807 /* enable */
808 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
809 (0x11 << 16) | (1<<10));
810
811 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
812
813 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
814 WRITE_VREG(HEVC_STREAM_START_ADDR,
815 input->start);
816 WRITE_VREG(HEVC_STREAM_END_ADDR,
817 input->start + input->size);
818 WRITE_VREG(HEVC_STREAM_RD_PTR,
819 input->start);
820 WRITE_VREG(HEVC_STREAM_WR_PTR,
fe96802b 821 READ_PARSER_REG(PARSER_VIDEO_WP));
b9164398
NQ
822
823 rp = READ_VREG(HEVC_STREAM_RD_PTR);
824 wp = READ_VREG(HEVC_STREAM_WR_PTR);
825 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
826 >> 16) & 0x7f;
827
828 /* enable */
829 }
830 }
831 *p = NULL;
832 if (wp >= rp)
833 size = wp - rp + fifo_len;
834 else
835 size = wp + input->size - rp + fifo_len;
836 if (size < 0) {
837 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
838 __func__, input->size, wp, rp, fifo_len, size);
839 size = 0;
840 }
841 return size;
842 }
843}
844EXPORT_SYMBOL(vdec_prepare_input);
845
846void vdec_enable_input(struct vdec_s *vdec)
847{
848 struct vdec_input_s *input = &vdec->input;
849
850 if (vdec->status != VDEC_STATUS_ACTIVE)
851 return;
852
853 if (input->target == VDEC_INPUT_TARGET_VLD)
854 SET_VREG_MASK(VLD_MEM_VIFIFO_CONTROL, (1<<2) | (1<<1));
855 else if (input->target == VDEC_INPUT_TARGET_HEVC) {
856 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
857 if (vdec_stream_based(vdec))
858 CLEAR_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
859 else
860 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
861 SET_VREG_MASK(HEVC_STREAM_FIFO_CTL, (1<<29));
862 }
863}
864EXPORT_SYMBOL(vdec_enable_input);
865
fe96802b
NQ
866int vdec_set_input_buffer(struct vdec_s *vdec, u32 start, u32 size)
867{
868 int r = vdec_input_set_buffer(&vdec->input, start, size);
869
870 if (r)
871 return r;
872
873 if (vdec->slave)
874 r = vdec_input_set_buffer(&vdec->slave->input, start, size);
875
876 return r;
877}
878EXPORT_SYMBOL(vdec_set_input_buffer);
879
880/*
881 * vdec_eos returns the possibility that there are
882 * more input can be used by decoder through vdec_prepare_input
883 * Note: this function should be called prior to vdec_vframe_dirty
884 * by decoder driver to determine if EOS happens for stream based
885 * decoding when there is no sufficient data for a frame
886 */
887bool vdec_has_more_input(struct vdec_s *vdec)
888{
889 struct vdec_input_s *input = &vdec->input;
890
891 if (!input->eos)
892 return true;
893
894 if (input_frame_based(input))
895 return vdec_input_next_input_chunk(input) != NULL;
896 else {
897 if (input->target == VDEC_INPUT_TARGET_VLD)
898 return READ_VREG(VLD_MEM_VIFIFO_WP) !=
899 READ_PARSER_REG(PARSER_VIDEO_WP);
900 else {
901 return (READ_VREG(HEVC_STREAM_WR_PTR) & ~0x3) !=
902 (READ_PARSER_REG(PARSER_VIDEO_WP) & ~0x3);
903 }
904 }
905}
906EXPORT_SYMBOL(vdec_has_more_input);
907
908void vdec_set_prepare_level(struct vdec_s *vdec, int level)
909{
910 vdec->input.prepare_level = level;
911}
912EXPORT_SYMBOL(vdec_set_prepare_level);
913
b9164398
NQ
914void vdec_set_flag(struct vdec_s *vdec, u32 flag)
915{
916 vdec->flag = flag;
917}
fe96802b
NQ
918EXPORT_SYMBOL(vdec_set_flag);
919
920void vdec_set_eos(struct vdec_s *vdec, bool eos)
921{
922 vdec->input.eos = eos;
923
924 if (vdec->slave)
925 vdec->slave->input.eos = eos;
926}
927EXPORT_SYMBOL(vdec_set_eos);
b9164398
NQ
928
929void vdec_set_next_sched(struct vdec_s *vdec, struct vdec_s *next_vdec)
930{
931 if (vdec && next_vdec) {
932 vdec->sched = 0;
933 next_vdec->sched = 1;
934 }
935}
fe96802b
NQ
936EXPORT_SYMBOL(vdec_set_next_sched);
937
938/*
939 * Swap Context: S0 S1 S2 S3 S4
940 * Sample sequence: M S M M S
941 * Master Context: S0 S0 S2 S3 S3
942 * Slave context: NA S1 S1 S2 S4
943 * ^
944 * ^
945 * ^
946 * the tricky part
947 * If there are back to back decoding of master or slave
948 * then the context of the counter part should be updated
949 * with current decoder. In this example, S1 should be
950 * updated to S2.
951 * This is done by swap the swap_page and related info
952 * between two layers.
953 */
954static void vdec_borrow_input_context(struct vdec_s *vdec)
955{
956 struct page *swap_page;
957 unsigned long swap_page_phys;
958 struct vdec_input_s *me;
959 struct vdec_input_s *other;
960
961 if (!vdec_dual(vdec))
962 return;
963
964 me = &vdec->input;
965 other = &vdec_get_associate(vdec)->input;
966
967 /* swap the swap_context, borrow counter part's
968 * swap context storage and update all related info.
969 * After vdec_vframe_dirty, vdec_save_input_context
970 * will be called to update current vdec's
971 * swap context
972 */
973 swap_page = other->swap_page;
974 other->swap_page = me->swap_page;
975 me->swap_page = swap_page;
976
977 swap_page_phys = other->swap_page_phys;
978 other->swap_page_phys = me->swap_page_phys;
979 me->swap_page_phys = swap_page_phys;
980
981 other->swap_rp = me->swap_rp;
982 other->streaming_rp = me->streaming_rp;
983 other->stream_cookie = me->stream_cookie;
984 other->swap_valid = me->swap_valid;
985}
986
b9164398
NQ
987void vdec_vframe_dirty(struct vdec_s *vdec, struct vframe_chunk_s *chunk)
988{
989 if (chunk)
990 chunk->flag |= VFRAME_CHUNK_FLAG_CONSUMED;
991
992 if (vdec_stream_based(vdec)) {
fe96802b
NQ
993 vdec->input.swap_needed = true;
994
995 if (vdec_dual(vdec)) {
996 vdec_get_associate(vdec)->input.dirty_count = 0;
997 vdec->input.dirty_count++;
998 if (vdec->input.dirty_count > 1) {
999 vdec->input.dirty_count = 1;
1000 vdec_borrow_input_context(vdec);
1001 }
b9164398 1002 }
fe96802b
NQ
1003
1004 /* for stream based mode, we update read and write pointer
1005 * also in case decoder wants to keep working on decoding
1006 * for more frames while input front end has more data
1007 */
1008 vdec_sync_input_read(vdec);
1009 vdec_sync_input_write(vdec);
1010
1011 vdec->need_more_data |= VDEC_NEED_MORE_DATA_DIRTY;
1012 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
b9164398
NQ
1013 }
1014}
1015EXPORT_SYMBOL(vdec_vframe_dirty);
1016
fe96802b
NQ
1017bool vdec_need_more_data(struct vdec_s *vdec)
1018{
1019 if (vdec_stream_based(vdec))
1020 return vdec->need_more_data & VDEC_NEED_MORE_DATA;
1021
1022 return false;
1023}
1024EXPORT_SYMBOL(vdec_need_more_data);
1025
b9164398
NQ
1026void vdec_save_input_context(struct vdec_s *vdec)
1027{
fe96802b 1028 struct vdec_input_s *input = &vdec->input;
b9164398 1029
fe96802b 1030#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1031 vdec_profile(vdec, VDEC_PROFILE_EVENT_SAVE_INPUT);
1032#endif
1033
1034 if (input->target == VDEC_INPUT_TARGET_VLD)
1035 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1<<15);
1036
1037 if (input_stream_based(input) && (input->swap_needed)) {
1038 if (input->target == VDEC_INPUT_TARGET_VLD) {
1039 WRITE_VREG(VLD_MEM_SWAP_ADDR,
fe96802b 1040 input->swap_page_phys);
b9164398
NQ
1041 WRITE_VREG(VLD_MEM_SWAP_CTL, 3);
1042 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1043 ;
1044 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1045 vdec->input.stream_cookie =
1046 READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
fe96802b
NQ
1047 vdec->input.swap_rp =
1048 READ_VREG(VLD_MEM_VIFIFO_RP);
1049 vdec->input.total_rd_count =
1050 (u64)vdec->input.stream_cookie *
1051 vdec->input.size + vdec->input.swap_rp -
1052 READ_VREG(VLD_MEM_VIFIFO_BYTES_AVAIL);
b9164398
NQ
1053 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1054 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
fe96802b 1055 input->swap_page_phys);
b9164398
NQ
1056 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 3);
1057
1058 while (READ_VREG(HEVC_STREAM_SWAP_CTRL) & (1<<7))
1059 ;
1060 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1061
1062 vdec->input.stream_cookie =
1063 READ_VREG(HEVC_SHIFT_BYTE_COUNT);
fe96802b
NQ
1064 vdec->input.swap_rp =
1065 READ_VREG(HEVC_STREAM_RD_PTR);
1066 if (((vdec->input.stream_cookie & 0x80000000) == 0) &&
1067 (vdec->input.streaming_rp & 0x80000000))
1068 vdec->input.streaming_rp += 1ULL << 32;
1069 vdec->input.streaming_rp &= 0xffffffffULL << 32;
1070 vdec->input.streaming_rp |= vdec->input.stream_cookie;
1071 vdec->input.total_rd_count = vdec->input.streaming_rp;
b9164398
NQ
1072 }
1073
1074 input->swap_valid = true;
fe96802b
NQ
1075 input->swap_needed = false;
1076 /*pr_info("vdec: save context\r\n");*/
b9164398 1077
fe96802b
NQ
1078 vdec_sync_input_read(vdec);
1079
1080 if (vdec_dual(vdec)) {
1081 struct vdec_s *master = (vdec->slave) ?
1082 vdec : vdec->master;
1083 master->input.last_swap_slave = (master->slave == vdec);
1084 /* pr_info("master->input.last_swap_slave = %d\n",
1085 master->input.last_swap_slave); */
1086 }
b9164398
NQ
1087 }
1088}
1089EXPORT_SYMBOL(vdec_save_input_context);
1090
1091void vdec_clean_input(struct vdec_s *vdec)
1092{
1093 struct vdec_input_s *input = &vdec->input;
1094
1095 while (!list_empty(&input->vframe_chunk_list)) {
1096 struct vframe_chunk_s *chunk =
1097 vdec_input_next_chunk(input);
1098 if (chunk->flag & VFRAME_CHUNK_FLAG_CONSUMED)
1099 vdec_input_release_chunk(input, chunk);
1100 else
1101 break;
1102 }
1103 vdec_save_input_context(vdec);
1104}
1105EXPORT_SYMBOL(vdec_clean_input);
1106
fe96802b 1107int vdec_sync_input(struct vdec_s *vdec)
b9164398 1108{
fe96802b
NQ
1109 struct vdec_input_s *input = &vdec->input;
1110 u32 rp = 0, wp = 0, fifo_len = 0;
1111 int size;
1112
1113 vdec_sync_input_read(vdec);
1114 vdec_sync_input_write(vdec);
1115 if (input->target == VDEC_INPUT_TARGET_VLD) {
1116 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1117 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1118
1119 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1120 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1121 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1122 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1123 >> 16) & 0x7f;
1124 }
1125 if (wp >= rp)
1126 size = wp - rp + fifo_len;
1127 else
1128 size = wp + input->size - rp + fifo_len;
1129 if (size < 0) {
1130 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
1131 __func__, input->size, wp, rp, fifo_len, size);
1132 size = 0;
b9164398 1133 }
fe96802b
NQ
1134 return size;
1135
1136}
1137EXPORT_SYMBOL(vdec_sync_input);
1138
1139const char *vdec_status_str(struct vdec_s *vdec)
1140{
1141 return vdec->status < ARRAY_SIZE(vdec_status_string) ?
1142 vdec_status_string[vdec->status] : "INVALID";
b9164398
NQ
1143}
1144
1145const char *vdec_type_str(struct vdec_s *vdec)
1146{
1147 switch (vdec->type) {
1148 case VDEC_TYPE_SINGLE:
1149 return "VDEC_TYPE_SINGLE";
1150 case VDEC_TYPE_STREAM_PARSER:
1151 return "VDEC_TYPE_STREAM_PARSER";
1152 case VDEC_TYPE_FRAME_BLOCK:
1153 return "VDEC_TYPE_FRAME_BLOCK";
1154 case VDEC_TYPE_FRAME_CIRCULAR:
1155 return "VDEC_TYPE_FRAME_CIRCULAR";
1156 default:
1157 return "VDEC_TYPE_INVALID";
1158 }
1159}
1160
1161const char *vdec_device_name_str(struct vdec_s *vdec)
1162{
1163 return vdec_device_name[vdec->format * 2 + 1];
1164}
fe96802b 1165EXPORT_SYMBOL(vdec_device_name_str);
b9164398
NQ
1166
1167void walk_vdec_core_list(char *s)
1168{
1169 struct vdec_s *vdec;
1170 struct vdec_core_s *core = vdec_core;
1171 unsigned long flags;
1172
1173 pr_info("%s --->\n", s);
1174
1175 flags = vdec_core_lock(vdec_core);
1176
1177 if (list_empty(&core->connected_vdec_list)) {
1178 pr_info("connected vdec list empty\n");
1179 } else {
1180 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
1181 pr_info("\tvdec (%p), status = %s\n", vdec,
1182 vdec_status_str(vdec));
1183 }
1184 }
1185
1186 vdec_core_unlock(vdec_core, flags);
1187}
1188EXPORT_SYMBOL(walk_vdec_core_list);
1189
fe96802b
NQ
1190/* insert vdec to vdec_core for scheduling,
1191 * for dual running decoders, connect/disconnect always runs in pairs
1192 */
b9164398
NQ
1193int vdec_connect(struct vdec_s *vdec)
1194{
1195 unsigned long flags;
1196
fe96802b
NQ
1197 //trace_vdec_connect(vdec);/*DEBUG_TMP*/
1198
b9164398
NQ
1199 if (vdec->status != VDEC_STATUS_DISCONNECTED)
1200 return 0;
1201
1202 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
1203 vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
1204
1205 init_completion(&vdec->inactive_done);
1206
1207 if (vdec->slave) {
1208 vdec_set_status(vdec->slave, VDEC_STATUS_CONNECTED);
1209 vdec_set_next_status(vdec->slave, VDEC_STATUS_CONNECTED);
1210
1211 init_completion(&vdec->slave->inactive_done);
1212 }
1213
1214 flags = vdec_core_lock(vdec_core);
1215
1216 list_add_tail(&vdec->list, &vdec_core->connected_vdec_list);
1217
1218 if (vdec->slave) {
1219 list_add_tail(&vdec->slave->list,
1220 &vdec_core->connected_vdec_list);
1221 }
1222
1223 vdec_core_unlock(vdec_core, flags);
1224
1225 up(&vdec_core->sem);
1226
1227 return 0;
1228}
1229EXPORT_SYMBOL(vdec_connect);
1230
1231/* remove vdec from vdec_core scheduling */
1232int vdec_disconnect(struct vdec_s *vdec)
1233{
fe96802b 1234#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1235 vdec_profile(vdec, VDEC_PROFILE_EVENT_DISCONNECT);
1236#endif
fe96802b 1237 //trace_vdec_disconnect(vdec);/*DEBUG_TMP*/
b9164398
NQ
1238
1239 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
1240 (vdec->status != VDEC_STATUS_ACTIVE)) {
1241 return 0;
1242 }
1243
1244 /*
1245 *when a vdec is under the management of scheduler
1246 * the status change will only be from vdec_core_thread
1247 */
1248 vdec_set_next_status(vdec, VDEC_STATUS_DISCONNECTED);
1249
1250 if (vdec->slave)
1251 vdec_set_next_status(vdec->slave, VDEC_STATUS_DISCONNECTED);
1252 else if (vdec->master)
1253 vdec_set_next_status(vdec->master, VDEC_STATUS_DISCONNECTED);
1254
1255 up(&vdec_core->sem);
1256
1257 wait_for_completion(&vdec->inactive_done);
1258
1259 if (vdec->slave)
1260 wait_for_completion(&vdec->slave->inactive_done);
1261 else if (vdec->master)
1262 wait_for_completion(&vdec->master->inactive_done);
1263
1264 return 0;
1265}
1266EXPORT_SYMBOL(vdec_disconnect);
1267
1268/* release vdec structure */
1269int vdec_destroy(struct vdec_s *vdec)
1270{
fe96802b
NQ
1271 //trace_vdec_destroy(vdec);/*DEBUG_TMP*/
1272
1273 vdec_input_release(&vdec->input);
b9164398 1274
fe96802b 1275#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1276 vdec_profile_flush(vdec);
1277#endif
fe96802b 1278 ida_simple_remove(&vdec_core->ida, vdec->id);
b9164398
NQ
1279 vfree(vdec);
1280
1281 atomic_dec(&vdec_core->vdec_nr);
1282
1283 return 0;
1284}
1285EXPORT_SYMBOL(vdec_destroy);
1286
1287/*
1288 * Only support time sliced decoding for frame based input,
1289 * so legacy decoder can exist with time sliced decoder.
1290 */
1291static const char *get_dev_name(bool use_legacy_vdec, int format)
1292{
fe96802b 1293#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1294 if (use_legacy_vdec)
1295 return vdec_device_name[format * 2];
1296 else
1297 return vdec_device_name[format * 2 + 1];
1298#else
1299 return vdec_device_name[format];
1300#endif
1301}
1302
b9164398
NQ
1303/*
1304*register vdec_device
1305 * create output, vfm or create ionvideo output
1306 */
1307s32 vdec_init(struct vdec_s *vdec, int is_4k)
1308{
1309 int r = 0;
1310 struct vdec_s *p = vdec;
b9164398 1311 const char *dev_name;
fe96802b 1312 int id = PLATFORM_DEVID_AUTO;/*if have used my self*/
b9164398
NQ
1313
1314 dev_name = get_dev_name(vdec_single(vdec), vdec->format);
1315
1316 if (dev_name == NULL)
1317 return -ENODEV;
1318
1319 pr_info("vdec_init, dev_name:%s, vdec_type=%s\n",
1320 dev_name, vdec_type_str(vdec));
1321
1322 /*
1323 *todo: VFM patch control should be configurable,
1324 * for now all stream based input uses default VFM path.
1325 */
1326 if (vdec_stream_based(vdec) && !vdec_dual(vdec)) {
1327 if (vdec_core->vfm_vdec == NULL) {
5b851ff9 1328 pr_debug("vdec_init set vfm decoder %p\n", vdec);
b9164398
NQ
1329 vdec_core->vfm_vdec = vdec;
1330 } else {
1331 pr_info("vdec_init vfm path busy.\n");
1332 return -EBUSY;
1333 }
1334 }
1335
b9164398
NQ
1336 mutex_lock(&vdec_mutex);
1337 inited_vcodec_num++;
1338 mutex_unlock(&vdec_mutex);
1339
1340 vdec_input_set_type(&vdec->input, vdec->type,
1341 (vdec->format == VFORMAT_HEVC ||
1342 vdec->format == VFORMAT_VP9) ?
1343 VDEC_INPUT_TARGET_HEVC :
1344 VDEC_INPUT_TARGET_VLD);
1345
1346 p->cma_dev = vdec_core->cma_dev;
1347 p->get_canvas = get_canvas;
1348 /* todo */
1349 if (!vdec_dual(vdec))
fe96802b 1350 p->use_vfm_path = vdec_stream_based(vdec);
b9164398 1351 /* vdec_dev_reg.flag = 0; */
fe96802b
NQ
1352 if (vdec->id >= 0)
1353 id = vdec->id;
1354 p->dev = platform_device_register_data(
b9164398
NQ
1355 &vdec_core->vdec_core_platform_device->dev,
1356 dev_name,
fe96802b 1357 id,
b9164398
NQ
1358 &p, sizeof(struct vdec_s *));
1359
1360 if (IS_ERR(p->dev)) {
1361 r = PTR_ERR(p->dev);
1362 pr_err("vdec: Decoder device %s register failed (%d)\n",
1363 dev_name, r);
1364
1365 mutex_lock(&vdec_mutex);
1366 inited_vcodec_num--;
1367 mutex_unlock(&vdec_mutex);
1368
fe96802b
NQ
1369 goto error;
1370 } else if (!p->dev->dev.driver) {
1371 pr_info("vdec: Decoder device %s driver probe failed.\n",
1372 dev_name);
1373 r = -ENODEV;
1374
b9164398
NQ
1375 goto error;
1376 }
1377
1378 if ((p->type == VDEC_TYPE_FRAME_BLOCK) && (p->run == NULL)) {
1379 r = -ENODEV;
1380 pr_err("vdec: Decoder device not handled (%s)\n", dev_name);
1381
1382 mutex_lock(&vdec_mutex);
1383 inited_vcodec_num--;
1384 mutex_unlock(&vdec_mutex);
1385
1386 goto error;
1387 }
1388
1389 if (p->use_vfm_path) {
1390 vdec->vf_receiver_inst = -1;
fe96802b 1391 vdec->vfm_map_id[0] = 0;
b9164398
NQ
1392 } else if (!vdec_dual(vdec)) {
1393 /* create IONVIDEO instance and connect decoder's
1394 * vf_provider interface to it
1395 */
1396 if (p->type != VDEC_TYPE_FRAME_BLOCK) {
1397 r = -ENODEV;
1398 pr_err("vdec: Incorrect decoder type\n");
1399
1400 mutex_lock(&vdec_mutex);
1401 inited_vcodec_num--;
1402 mutex_unlock(&vdec_mutex);
1403
1404 goto error;
1405 }
1406 if (p->frame_base_video_path == FRAME_BASE_PATH_IONVIDEO) {
1407#if 1
ff4c2158
NQ
1408 r = ionvideo_assign_map(&vdec->vf_receiver_name,
1409 &vdec->vf_receiver_inst);
b9164398
NQ
1410#else
1411 /*
1412 * temporarily just use decoder instance ID as iondriver ID
1413 * to solve OMX iondriver instance number check time sequence
1414 * only the limitation is we can NOT mix different video
1415 * decoders since same ID will be used for different decoder
1416 * formats.
1417 */
1418 vdec->vf_receiver_inst = p->dev->id;
1419 r = ionvideo_assign_map(&vdec->vf_receiver_name,
1420 &vdec->vf_receiver_inst);
1421#endif
1422 if (r < 0) {
1423 pr_err("IonVideo frame receiver allocation failed.\n");
1424
1425 mutex_lock(&vdec_mutex);
1426 inited_vcodec_num--;
1427 mutex_unlock(&vdec_mutex);
1428
1429 goto error;
1430 }
1431
fe96802b
NQ
1432 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
1433 "%s %s", vdec->vf_provider_name,
1434 vdec->vf_receiver_name);
1435 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
1436 "vdec-map-%d", vdec->id);
b9164398
NQ
1437 } else if (p->frame_base_video_path ==
1438 FRAME_BASE_PATH_AMLVIDEO_AMVIDEO) {
1439 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
1440 "%s %s", vdec->vf_provider_name,
fe96802b 1441 "amlvideo deinterlace amvideo");
b9164398 1442 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
fe96802b 1443 "vdec-map-%d", vdec->id);
b9164398
NQ
1444 } else if (p->frame_base_video_path ==
1445 FRAME_BASE_PATH_AMLVIDEO1_AMVIDEO2) {
1446 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
1447 "%s %s", vdec->vf_provider_name,
1448 "ppmgr amlvideo.1 amvide2");
1449 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
fe96802b 1450 "vdec-map-%d", vdec->id);
b9164398
NQ
1451 }
1452
1453 if (vfm_map_add(vdec->vfm_map_id,
1454 vdec->vfm_map_chain) < 0) {
1455 r = -ENOMEM;
1456 pr_err("Decoder pipeline map creation failed %s.\n",
1457 vdec->vfm_map_id);
1458 vdec->vfm_map_id[0] = 0;
1459
1460 mutex_lock(&vdec_mutex);
1461 inited_vcodec_num--;
1462 mutex_unlock(&vdec_mutex);
1463
1464 goto error;
1465 }
1466
5b851ff9 1467 pr_debug("vfm map %s created\n", vdec->vfm_map_id);
b9164398
NQ
1468
1469 /*
1470 *assume IONVIDEO driver already have a few vframe_receiver
1471 * registered.
1472 * 1. Call iondriver function to allocate a IONVIDEO path and
1473 * provide receiver's name and receiver op.
1474 * 2. Get decoder driver's provider name from driver instance
1475 * 3. vfm_map_add(name, "<decoder provider name>
1476 * <iondriver receiver name>"), e.g.
1477 * vfm_map_add("vdec_ion_map_0", "mpeg4_0 iondriver_1");
1478 * 4. vf_reg_provider and vf_reg_receiver
1479 * Note: the decoder provider's op uses vdec as op_arg
1480 * the iondriver receiver's op uses iondev device as
1481 * op_arg
1482 */
1483
1484 }
1485
1486 if (!vdec_single(vdec)) {
1487 vf_reg_provider(&p->vframe_provider);
1488
1489 vf_notify_receiver(p->vf_provider_name,
1490 VFRAME_EVENT_PROVIDER_START,
1491 vdec);
fe96802b
NQ
1492
1493 if (vdec_core->hint_fr_vdec == NULL)
1494 vdec_core->hint_fr_vdec = vdec;
1495
1496 if (vdec_core->hint_fr_vdec == vdec) {
1497 if (p->sys_info->rate != 0) {
d481db31
NQ
1498 if (!vdec->is_reset)
1499 vf_notify_receiver(p->vf_provider_name,
1500 VFRAME_EVENT_PROVIDER_FR_HINT,
1501 (void *)
1502 ((unsigned long)
1503 p->sys_info->rate));
fe96802b
NQ
1504 vdec->fr_hint_state = VDEC_HINTED;
1505 } else {
1506 vdec->fr_hint_state = VDEC_NEED_HINT;
1507 }
1508 }
b9164398
NQ
1509 }
1510
28e318df 1511 p->dolby_meta_with_el = 0;
5b851ff9 1512 pr_debug("vdec_init, vf_provider_name = %s\n", p->vf_provider_name);
fe96802b
NQ
1513 vdec_input_prepare_bufs(/*prepared buffer for fast playing.*/
1514 &vdec->input,
1515 vdec->sys_info->width,
1516 vdec->sys_info->height);
b9164398
NQ
1517 /* vdec is now ready to be active */
1518 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
1519
1520 return 0;
1521
1522error:
1523 return r;
1524}
1525EXPORT_SYMBOL(vdec_init);
1526
fe96802b
NQ
1527/* vdec_create/init/release/destroy are applied to both dual running decoders
1528 */
b9164398
NQ
1529void vdec_release(struct vdec_s *vdec)
1530{
fe96802b
NQ
1531 //trace_vdec_release(vdec);/*DEBUG_TMP*/
1532
b9164398
NQ
1533 vdec_disconnect(vdec);
1534
fe96802b
NQ
1535 if (vdec->vframe_provider.name) {
1536 if (!vdec_single(vdec)) {
1537 if (vdec_core->hint_fr_vdec == vdec
d481db31
NQ
1538 && vdec->fr_hint_state == VDEC_HINTED
1539 && !vdec->is_reset)
fe96802b
NQ
1540 vf_notify_receiver(
1541 vdec->vf_provider_name,
1542 VFRAME_EVENT_PROVIDER_FR_END_HINT,
1543 NULL);
1544 vdec->fr_hint_state = VDEC_NO_NEED_HINT;
1545 }
b9164398 1546 vf_unreg_provider(&vdec->vframe_provider);
fe96802b 1547 }
b9164398
NQ
1548
1549 if (vdec_core->vfm_vdec == vdec)
1550 vdec_core->vfm_vdec = NULL;
1551
fe96802b
NQ
1552 if (vdec_core->hint_fr_vdec == vdec)
1553 vdec_core->hint_fr_vdec = NULL;
1554
b9164398
NQ
1555 if (vdec->vf_receiver_inst >= 0) {
1556 if (vdec->vfm_map_id[0]) {
1557 vfm_map_remove(vdec->vfm_map_id);
1558 vdec->vfm_map_id[0] = 0;
1559 }
b9164398
NQ
1560 }
1561
1562 platform_device_unregister(vdec->dev);
b9164398
NQ
1563 vdec_destroy(vdec);
1564
1565 mutex_lock(&vdec_mutex);
1566 inited_vcodec_num--;
1567 mutex_unlock(&vdec_mutex);
fe96802b 1568
5b851ff9 1569 pr_debug("vdec_release instance %p, total %d\n", vdec,
fe96802b 1570 atomic_read(&vdec_core->vdec_nr));
b9164398
NQ
1571}
1572EXPORT_SYMBOL(vdec_release);
1573
fe96802b
NQ
1574/* For dual running decoders, vdec_reset is only called with master vdec.
1575 */
b9164398
NQ
1576int vdec_reset(struct vdec_s *vdec)
1577{
fe96802b
NQ
1578 //trace_vdec_reset(vdec); /*DEBUG_TMP*/
1579
b9164398
NQ
1580 vdec_disconnect(vdec);
1581
1582 if (vdec->vframe_provider.name)
1583 vf_unreg_provider(&vdec->vframe_provider);
1584
1585 if ((vdec->slave) && (vdec->slave->vframe_provider.name))
1586 vf_unreg_provider(&vdec->slave->vframe_provider);
1587
1588 if (vdec->reset) {
1589 vdec->reset(vdec);
1590 if (vdec->slave)
1591 vdec->slave->reset(vdec->slave);
1592 }
3f4a083c 1593 vdec->mc_loaded = 0;/*clear for reload firmware.*/
b9164398
NQ
1594 vdec_input_release(&vdec->input);
1595
1596 vf_reg_provider(&vdec->vframe_provider);
1597 vf_notify_receiver(vdec->vf_provider_name,
1598 VFRAME_EVENT_PROVIDER_START, vdec);
1599
1600 if (vdec->slave) {
1601 vf_reg_provider(&vdec->slave->vframe_provider);
1602 vf_notify_receiver(vdec->slave->vf_provider_name,
1603 VFRAME_EVENT_PROVIDER_START, vdec->slave);
3f4a083c 1604 vdec->slave->mc_loaded = 0;/*clear for reload firmware.*/
b9164398
NQ
1605 }
1606
1607 vdec_connect(vdec);
1608
1609 return 0;
1610}
1611EXPORT_SYMBOL(vdec_reset);
1612
fe96802b
NQ
1613void vdec_free_cmabuf(void)
1614{
1615 mutex_lock(&vdec_mutex);
1616
1617 if (inited_vcodec_num > 0) {
1618 mutex_unlock(&vdec_mutex);
1619 return;
1620 }
1621 mutex_unlock(&vdec_mutex);
1622}
1623
b9164398
NQ
1624static struct vdec_s *active_vdec(struct vdec_core_s *core)
1625{
1626 struct vdec_s *vdec;
1627 struct list_head *p;
1628
1629 list_for_each(p, &core->connected_vdec_list) {
1630 vdec = list_entry(p, struct vdec_s, list);
1631 if (vdec->status == VDEC_STATUS_ACTIVE)
1632 return vdec;
1633 }
1634
1635 return NULL;
1636}
1637
1638/*
1639*Decoder callback
1640 * Each decoder instance uses this callback to notify status change, e.g. when
1641 * decoder finished using HW resource.
1642 * a sample callback from decoder's driver is following:
1643 *
1644 * if (hw->vdec_cb) {
1645 * vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
1646 * hw->vdec_cb(vdec, hw->vdec_cb_arg);
1647 * }
1648 */
1649static void vdec_callback(struct vdec_s *vdec, void *data)
1650{
1651 struct vdec_core_s *core = (struct vdec_core_s *)data;
1652
fe96802b 1653#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1654 vdec_profile(vdec, VDEC_PROFILE_EVENT_CB);
1655#endif
1656
1657 up(&core->sem);
1658}
1659
1660static irqreturn_t vdec_isr(int irq, void *dev_id)
1661{
1662 struct vdec_isr_context_s *c =
1663 (struct vdec_isr_context_s *)dev_id;
1664 struct vdec_s *vdec = vdec_core->active_vdec;
1665
1666 if (c->dev_isr)
1667 return c->dev_isr(irq, c->dev_id);
1668
1669 if (c != &vdec_core->isr_context[VDEC_IRQ_1]) {
1670#if 0
1671 pr_warn("vdec interrupt w/o a valid receiver\n");
1672#endif
1673 return IRQ_HANDLED;
1674 }
1675
1676 if (!vdec) {
1677#if 0
1678 pr_warn("vdec interrupt w/o an active instance running. core = %p\n",
1679 core);
1680#endif
1681 return IRQ_HANDLED;
1682 }
1683
1684 if (!vdec->irq_handler) {
1685#if 0
1686 pr_warn("vdec instance has no irq handle.\n");
1687#endif
1688 return IRQ_HANDLED;
1689 }
1690
1691 return vdec->irq_handler(vdec);
1692}
1693
1694static irqreturn_t vdec_thread_isr(int irq, void *dev_id)
1695{
1696 struct vdec_isr_context_s *c =
1697 (struct vdec_isr_context_s *)dev_id;
1698 struct vdec_s *vdec = vdec_core->active_vdec;
1699
1700 if (c->dev_threaded_isr)
1701 return c->dev_threaded_isr(irq, c->dev_id);
1702
1703 if (!vdec)
1704 return IRQ_HANDLED;
1705
1706 if (!vdec->threaded_irq_handler)
1707 return IRQ_HANDLED;
1708
1709 return vdec->threaded_irq_handler(vdec);
1710}
1711
1712static inline bool vdec_ready_to_run(struct vdec_s *vdec)
1713{
1714 bool r;
fe96802b 1715 struct vdec_input_s *input = &vdec->input;
b9164398
NQ
1716
1717 if (vdec->status != VDEC_STATUS_CONNECTED)
1718 return false;
1719
1720 if (!vdec->run_ready)
1721 return false;
1722
1723 if ((vdec->slave || vdec->master) &&
1724 (vdec->sched == 0))
1725 return false;
77a00b7e 1726
d481db31 1727 /* check frame based input underrun */
77a00b7e
NQ
1728 if (input && !input->eos && input_frame_based(input)
1729 && (!vdec_input_next_chunk(input)))
d481db31 1730 return false;
77a00b7e 1731
fe96802b
NQ
1732 /* check streaming prepare level threshold if not EOS */
1733 if (input && input_stream_based(input) && !input->eos) {
1734 u32 rp, wp, level;
1735
1736 rp = READ_PARSER_REG(PARSER_VIDEO_RP);
1737 wp = READ_PARSER_REG(PARSER_VIDEO_WP);
1738 if (wp < rp)
1739 level = input->size + wp - rp;
1740 else
1741 level = wp - rp;
1742
1743 if ((level < input->prepare_level) &&
1744 (pts_get_rec_num(PTS_TYPE_VIDEO,
1745 vdec->input.total_rd_count) < 2)) {
1746 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
1747 return false;
28e318df
NQ
1748 } else if (level > input->prepare_level)
1749 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
fe96802b
NQ
1750 }
1751
b9164398
NQ
1752 if (step_mode) {
1753 if ((step_mode & 0xff) != vdec->id)
1754 return false;
1755 }
1756
1757 step_mode &= ~0xff;
1758
fe96802b 1759#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1760 vdec_profile(vdec, VDEC_PROFILE_EVENT_CHK_RUN_READY);
1761#endif
1762
1763 r = vdec->run_ready(vdec);
1764
fe96802b 1765#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1766 if (r)
1767 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN_READY);
1768#endif
1769
1770 return r;
1771}
1772
fe96802b
NQ
1773/*
1774 * Set up secure protection for each decoder instance running.
1775 * Note: The operation from REE side only resets memory access
1776 * to a default policy and even a non_secure type will still be
1777 * changed to secure type automatically when secure source is
1778 * detected inside TEE.
1779 * Perform need_more_data checking and set flag is decoder
1780 * is not consuming data.
1781 */
1782static inline void vdec_prepare_run(struct vdec_s *vdec)
1783{
1784 struct vdec_input_s *input = &vdec->input;
1785 int type = (vdec_secure(vdec)) ? DMC_DEV_TYPE_SECURE :
1786 DMC_DEV_TYPE_NON_SECURE;
1787
1788 if (input->target == VDEC_INPUT_TARGET_VLD)
1789 tee_config_device_secure(DMC_DEV_ID_VDEC, type);
1790 else if (input->target == VDEC_INPUT_TARGET_HEVC)
1791 tee_config_device_secure(DMC_DEV_ID_HEVC, type);
1792
1793 if (vdec_stream_based(vdec) &&
1794 ((vdec->need_more_data & VDEC_NEED_MORE_DATA_RUN) &&
1795 (vdec->need_more_data & VDEC_NEED_MORE_DATA_DIRTY) == 0)) {
1796 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
1797 }
1798
1799 vdec->need_more_data |= VDEC_NEED_MORE_DATA_RUN;
1800 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA_DIRTY;
1801}
1802
b9164398
NQ
1803/* struct vdec_core_shread manages all decoder instance in active list. When
1804 * a vdec is added into the active list, it can onlt be in two status:
1805 * VDEC_STATUS_CONNECTED(the decoder does not own HW resource and ready to run)
1806 * VDEC_STATUS_ACTIVE(the decoder owns HW resources and is running).
1807 * Removing a decoder from active list is only performed within core thread.
1808 * Adding a decoder into active list is performed from user thread.
1809 */
1810static int vdec_core_thread(void *data)
1811{
b9164398 1812 struct vdec_core_s *core = (struct vdec_core_s *)data;
3f4a083c
HZ
1813 struct vdec_s *lastvdec;
1814 struct sched_param param = {.sched_priority = MAX_RT_PRIO/2};
b9164398
NQ
1815
1816 sched_setscheduler(current, SCHED_FIFO, &param);
1817
1818 allow_signal(SIGTERM);
3f4a083c 1819 lastvdec = NULL;
b9164398
NQ
1820 while (down_interruptible(&core->sem) == 0) {
1821 struct vdec_s *vdec, *tmp;
1822 LIST_HEAD(disconnecting_list);
1823
1824 if (kthread_should_stop())
1825 break;
1826
1827 /* clean up previous active vdec's input */
1828 if ((core->active_vdec) &&
1829 (core->active_vdec->status == VDEC_STATUS_CONNECTED)) {
1830 struct vdec_input_s *input = &core->active_vdec->input;
1831
1832 while (!list_empty(&input->vframe_chunk_list)) {
1833 struct vframe_chunk_s *chunk =
1834 vdec_input_next_chunk(input);
1835 if (chunk->flag & VFRAME_CHUNK_FLAG_CONSUMED)
1836 vdec_input_release_chunk(input, chunk);
1837 else
1838 break;
1839 }
1840
1841 vdec_save_input_context(core->active_vdec);
1842 }
1843
1844 /*
1845 *todo:
1846 * this is the case when the decoder is in active mode and
1847 * the system side wants to stop it. Currently we rely on
1848 * the decoder instance to go back to VDEC_STATUS_CONNECTED
1849 * from VDEC_STATUS_ACTIVE by its own. However, if for some
1850 * reason the decoder can not exist by itself (dead decoding
1851 * or whatever), then we may have to add another vdec API
1852 * to kill the vdec and release its HW resource and make it
1853 * become inactive again.
1854 * if ((core->active_vdec) &&
1855 * (core->active_vdec->status == VDEC_STATUS_DISCONNECTED)) {
1856 * }
1857 */
1858
fe96802b 1859 mutex_lock(&vdec_mutex);
b9164398
NQ
1860
1861 /* check disconnected decoders */
1862 list_for_each_entry_safe(vdec, tmp,
1863 &core->connected_vdec_list, list) {
1864 if ((vdec->status == VDEC_STATUS_CONNECTED) &&
1865 (vdec->next_status == VDEC_STATUS_DISCONNECTED)) {
1866 if (core->active_vdec == vdec)
1867 core->active_vdec = NULL;
1868 list_move(&vdec->list, &disconnecting_list);
1869 }
1870 }
1871
1872 /* activate next decoder instance if there is none */
1873 vdec = active_vdec(core);
1874
1875 if (!vdec) {
1876 /*
1877 *round-robin decoder scheduling
1878 * start from the decoder after previous active
1879 * decoder instance, if not, then start from beginning
1880 */
1881 if (core->active_vdec)
1882 vdec = list_entry(
1883 core->active_vdec->list.next,
1884 struct vdec_s, list);
1885 else
1886 vdec = list_entry(
1887 core->connected_vdec_list.next,
1888 struct vdec_s, list);
1889
1890 list_for_each_entry_from(vdec,
1891 &core->connected_vdec_list, list) {
1892 if (vdec_ready_to_run(vdec))
1893 break;
1894 }
1895
1896 if ((&vdec->list == &core->connected_vdec_list) &&
1897 (core->active_vdec)) {
1898 /* search from beginning */
1899 list_for_each_entry(vdec,
1900 &core->connected_vdec_list, list) {
1901 if (vdec_ready_to_run(vdec))
1902 break;
1903
1904 if (vdec == core->active_vdec) {
1905 vdec = NULL;
1906 break;
1907 }
1908 }
1909 }
1910
1911 if (&vdec->list == &core->connected_vdec_list)
1912 vdec = NULL;
1913
1914 core->active_vdec = NULL;
1915 }
1916
fe96802b 1917 mutex_unlock(&vdec_mutex);
b9164398
NQ
1918
1919 /* start the vdec instance */
1920 if ((vdec) && (vdec->status != VDEC_STATUS_ACTIVE)) {
3f4a083c
HZ
1921 if (lastvdec != vdec)
1922 vdec->mc_loaded = 0;/*clear for reload firmware.*/
1923 if (debug & 2)
1924 vdec->mc_loaded = 0;/*alway reload firmware.*/
b9164398
NQ
1925 vdec_set_status(vdec, VDEC_STATUS_ACTIVE);
1926
1927 /* activatate the decoder instance to run */
1928 core->active_vdec = vdec;
fe96802b 1929#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1930 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN);
1931#endif
fe96802b
NQ
1932 vdec_prepare_run(vdec);
1933
b9164398 1934 vdec->run(vdec, vdec_callback, core);
3f4a083c 1935 lastvdec = vdec;
b9164398
NQ
1936 }
1937
1938 /* remove disconnected decoder from active list */
1939 list_for_each_entry_safe(vdec, tmp, &disconnecting_list, list) {
1940 list_del(&vdec->list);
1941 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
1942 complete(&vdec->inactive_done);
1943 }
1944
1945 if (!core->active_vdec) {
1946 msleep(20);
1947 up(&core->sem);
1948 }
3f4a083c 1949
b9164398
NQ
1950 }
1951
1952 return 0;
1953}
1954
1955#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */
1956static bool test_hevc(u32 decomp_addr, u32 us_delay)
1957{
1958 int i;
1959
1960 /* SW_RESET IPP */
1961 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 1);
1962 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0);
1963
1964 /* initialize all canvas table */
1965 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0);
1966 for (i = 0; i < 32; i++)
1967 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
1968 0x1 | (i << 8) | decomp_addr);
1969 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1);
1970 WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (0 << 8) | (0<<1) | 1);
1971 for (i = 0; i < 32; i++)
1972 WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0);
1973
1974 /* Initialize mcrcc */
1975 WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2);
1976 WRITE_VREG(HEVCD_MCRCC_CTL2, 0x0);
1977 WRITE_VREG(HEVCD_MCRCC_CTL3, 0x0);
1978 WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0);
1979
1980 /* Decomp initialize */
1981 WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x0);
1982 WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0x0);
1983
1984 /* Frame level initialization */
1985 WRITE_VREG(HEVCD_IPP_TOP_FRMCONFIG, 0x100 | (0x100 << 16));
1986 WRITE_VREG(HEVCD_IPP_TOP_TILECONFIG3, 0x0);
1987 WRITE_VREG(HEVCD_IPP_TOP_LCUCONFIG, 0x1 << 5);
1988 WRITE_VREG(HEVCD_IPP_BITDEPTH_CONFIG, 0x2 | (0x2 << 2));
1989
1990 WRITE_VREG(HEVCD_IPP_CONFIG, 0x0);
1991 WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, 0x0);
1992
1993 /* Enable SWIMP mode */
1994 WRITE_VREG(HEVCD_IPP_SWMPREDIF_CONFIG, 0x1);
1995
1996 /* Enable frame */
1997 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x2);
1998 WRITE_VREG(HEVCD_IPP_TOP_FRMCTL, 0x1);
1999
2000 /* Send SW-command CTB info */
2001 WRITE_VREG(HEVCD_IPP_SWMPREDIF_CTBINFO, 0x1 << 31);
2002
2003 /* Send PU_command */
2004 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO0, (0x4 << 9) | (0x4 << 16));
2005 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO1, 0x1 << 3);
2006 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO2, 0x0);
2007 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO3, 0x0);
2008
2009 udelay(us_delay);
2010
2011 WRITE_VREG(HEVCD_IPP_DBG_SEL, 0x2 << 4);
2012
2013 return (READ_VREG(HEVCD_IPP_DBG_DATA) & 3) == 1;
2014}
2015
2016void vdec_poweron(enum vdec_type_e core)
2017{
2018 void *decomp_addr = NULL;
2019 dma_addr_t decomp_dma_addr;
2020 u32 decomp_addr_aligned = 0;
2021 int hevc_loop = 0;
2022
2023 if (core >= VDEC_MAX)
2024 return;
2025
2026 mutex_lock(&vdec_mutex);
2027
2028 vdec_core->power_ref_count[core]++;
2029 if (vdec_core->power_ref_count[core] > 1) {
2030 mutex_unlock(&vdec_mutex);
2031 return;
2032 }
2033
2034 if (vdec_on(core)) {
2035 mutex_unlock(&vdec_mutex);
2036 return;
2037 }
2038
2039 if (hevc_workaround_needed() &&
2040 (core == VDEC_HEVC)) {
2041 decomp_addr = codec_mm_dma_alloc_coherent(MEM_NAME,
2042 SZ_64K + SZ_4K, &decomp_dma_addr, GFP_KERNEL, 0);
2043
2044 if (decomp_addr) {
2045 decomp_addr_aligned = ALIGN(decomp_dma_addr, SZ_64K);
2046 memset((u8 *)decomp_addr +
2047 (decomp_addr_aligned - decomp_dma_addr),
2048 0xff, SZ_4K);
2049 } else
2050 pr_err("vdec: alloc HEVC gxbb decomp buffer failed.\n");
2051 }
2052
2053 if (core == VDEC_1) {
2054 /* vdec1 power on */
2055 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2056 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~0xc);
2057 /* wait 10uS */
2058 udelay(10);
2059 /* vdec1 soft reset */
2060 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
2061 WRITE_VREG(DOS_SW_RESET0, 0);
2062 /* enable vdec1 clock */
2063 /*
2064 *add power on vdec clock level setting,only for m8 chip,
2065 * m8baby and m8m2 can dynamic adjust vdec clock,
2066 * power on with default clock level
2067 */
2068 vdec_clock_hi_enable();
2069 /* power up vdec memories */
2070 WRITE_VREG(DOS_MEM_PD_VDEC, 0);
2071 /* remove vdec1 isolation */
2072 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2073 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~0xC0);
2074 /* reset DOS top registers */
2075 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
2076 if (get_cpu_type() >=
2077 MESON_CPU_MAJOR_ID_GXBB) {
2078 /*
2079 *enable VDEC_1 DMC request
2080 */
2081 unsigned long flags;
2082
2083 spin_lock_irqsave(&vdec_spin_lock, flags);
2084 codec_dmcbus_write(DMC_REQ_CTRL,
2085 codec_dmcbus_read(DMC_REQ_CTRL) | (1 << 13));
2086 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2087 }
2088 } else if (core == VDEC_2) {
2089 if (has_vdec2()) {
2090 /* vdec2 power on */
2091 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2092 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2093 ~0x30);
2094 /* wait 10uS */
2095 udelay(10);
2096 /* vdec2 soft reset */
2097 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
2098 WRITE_VREG(DOS_SW_RESET2, 0);
2099 /* enable vdec1 clock */
2100 vdec2_clock_hi_enable();
2101 /* power up vdec memories */
2102 WRITE_VREG(DOS_MEM_PD_VDEC2, 0);
2103 /* remove vdec2 isolation */
2104 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2105 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2106 ~0x300);
2107 /* reset DOS top registers */
2108 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
2109 }
2110 } else if (core == VDEC_HCODEC) {
2111 if (has_hdec()) {
2112 /* hcodec power on */
2113 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2114 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2115 ~0x3);
2116 /* wait 10uS */
2117 udelay(10);
2118 /* hcodec soft reset */
2119 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
2120 WRITE_VREG(DOS_SW_RESET1, 0);
2121 /* enable hcodec clock */
2122 hcodec_clock_enable();
2123 /* power up hcodec memories */
2124 WRITE_VREG(DOS_MEM_PD_HCODEC, 0);
2125 /* remove hcodec isolation */
2126 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2127 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2128 ~0x30);
2129 }
2130 } else if (core == VDEC_HEVC) {
2131 if (has_hevc_vdec()) {
2132 bool hevc_fixed = false;
2133
2134 while (!hevc_fixed) {
2135 /* hevc power on */
2136 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2137 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2138 ~0xc0);
2139 /* wait 10uS */
2140 udelay(10);
2141 /* hevc soft reset */
2142 WRITE_VREG(DOS_SW_RESET3, 0xffffffff);
2143 WRITE_VREG(DOS_SW_RESET3, 0);
2144 /* enable hevc clock */
2145 hevc_clock_hi_enable();
2146 /* power up hevc memories */
2147 WRITE_VREG(DOS_MEM_PD_HEVC, 0);
2148 /* remove hevc isolation */
2149 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2150 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2151 ~0xc00);
2152
2153 if (!hevc_workaround_needed())
2154 break;
2155
2156 if (decomp_addr)
2157 hevc_fixed = test_hevc(
2158 decomp_addr_aligned, 20);
2159
2160 if (!hevc_fixed) {
2161 hevc_loop++;
2162
2163 mutex_unlock(&vdec_mutex);
2164
2165 if (hevc_loop >= HEVC_TEST_LIMIT) {
2166 pr_warn("hevc power sequence over limit\n");
2167 pr_warn("=====================================================\n");
2168 pr_warn(" This chip is identified to have HW failure.\n");
2169 pr_warn(" Please contact sqa-platform to replace the platform.\n");
2170 pr_warn("=====================================================\n");
2171
2172 panic("Force panic for chip detection !!!\n");
2173
2174 break;
2175 }
2176
2177 vdec_poweroff(VDEC_HEVC);
2178
2179 mdelay(10);
2180
2181 mutex_lock(&vdec_mutex);
2182 }
2183 }
2184
2185 if (hevc_loop > hevc_max_reset_count)
2186 hevc_max_reset_count = hevc_loop;
2187
2188 WRITE_VREG(DOS_SW_RESET3, 0xffffffff);
2189 udelay(10);
2190 WRITE_VREG(DOS_SW_RESET3, 0);
2191 }
2192 }
2193
2194 if (decomp_addr)
2195 codec_mm_dma_free_coherent(MEM_NAME,
2196 SZ_64K + SZ_4K, decomp_addr, decomp_dma_addr, 0);
2197
2198 mutex_unlock(&vdec_mutex);
2199}
2200EXPORT_SYMBOL(vdec_poweron);
2201
2202void vdec_poweroff(enum vdec_type_e core)
2203{
2204 if (core >= VDEC_MAX)
2205 return;
2206
2207 mutex_lock(&vdec_mutex);
2208
2209 vdec_core->power_ref_count[core]--;
2210 if (vdec_core->power_ref_count[core] > 0) {
2211 mutex_unlock(&vdec_mutex);
2212 return;
2213 }
2214
2215 if (core == VDEC_1) {
2216 if (get_cpu_type() >=
2217 MESON_CPU_MAJOR_ID_GXBB) {
2218 /* disable VDEC_1 DMC REQ*/
2219 unsigned long flags;
2220
2221 spin_lock_irqsave(&vdec_spin_lock, flags);
2222 codec_dmcbus_write(DMC_REQ_CTRL,
2223 codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 13)));
2224 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2225 udelay(10);
2226 }
2227 /* enable vdec1 isolation */
2228 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2229 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc0);
2230 /* power off vdec1 memories */
2231 WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL);
2232 /* disable vdec1 clock */
2233 vdec_clock_off();
2234 /* vdec1 power off */
2235 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2236 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc);
2237 } else if (core == VDEC_2) {
2238 if (has_vdec2()) {
2239 /* enable vdec2 isolation */
2240 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2241 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
2242 0x300);
2243 /* power off vdec2 memories */
2244 WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL);
2245 /* disable vdec2 clock */
2246 vdec2_clock_off();
2247 /* vdec2 power off */
2248 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2249 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
2250 0x30);
2251 }
2252 } else if (core == VDEC_HCODEC) {
2253 if (has_hdec()) {
2254 /* enable hcodec isolation */
2255 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2256 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
2257 0x30);
2258 /* power off hcodec memories */
2259 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
2260 /* disable hcodec clock */
2261 hcodec_clock_off();
2262 /* hcodec power off */
2263 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2264 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 3);
2265 }
2266 } else if (core == VDEC_HEVC) {
2267 if (has_hevc_vdec()) {
28e318df
NQ
2268 if (no_powerdown == 0) {
2269 /* enable hevc isolation */
2270 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
b9164398
NQ
2271 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
2272 0xc00);
2273 /* power off hevc memories */
2274 WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL);
2275 /* disable hevc clock */
2276 hevc_clock_off();
2277 /* hevc power off */
2278 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2279 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
2280 0xc0);
28e318df
NQ
2281 } else {
2282 pr_info("!!!!!!!!not power down\n");
2283 hevc_reset_core(NULL);
2284 no_powerdown = 0;
2285 }
b9164398
NQ
2286 }
2287 }
2288 mutex_unlock(&vdec_mutex);
2289}
2290EXPORT_SYMBOL(vdec_poweroff);
2291
2292bool vdec_on(enum vdec_type_e core)
2293{
2294 bool ret = false;
2295
2296 if (core == VDEC_1) {
2297 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0xc) == 0) &&
2298 (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100))
2299 ret = true;
2300 } else if (core == VDEC_2) {
2301 if (has_vdec2()) {
2302 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0x30) == 0) &&
2303 (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100))
2304 ret = true;
2305 }
2306 } else if (core == VDEC_HCODEC) {
2307 if (has_hdec()) {
2308 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0x3) == 0) &&
2309 (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000))
2310 ret = true;
2311 }
2312 } else if (core == VDEC_HEVC) {
2313 if (has_hevc_vdec()) {
2314 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0xc0) == 0) &&
2315 (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x1000000))
2316 ret = true;
2317 }
2318 }
2319
2320 return ret;
2321}
2322EXPORT_SYMBOL(vdec_on);
2323
2324#elif 0 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD */
2325void vdec_poweron(enum vdec_type_e core)
2326{
2327 ulong flags;
2328
2329 spin_lock_irqsave(&lock, flags);
2330
2331 if (core == VDEC_1) {
2332 /* vdec1 soft reset */
2333 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
2334 WRITE_VREG(DOS_SW_RESET0, 0);
2335 /* enable vdec1 clock */
2336 vdec_clock_enable();
2337 /* reset DOS top registers */
2338 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
2339 } else if (core == VDEC_2) {
2340 /* vdec2 soft reset */
2341 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
2342 WRITE_VREG(DOS_SW_RESET2, 0);
2343 /* enable vdec2 clock */
2344 vdec2_clock_enable();
2345 /* reset DOS top registers */
2346 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
2347 } else if (core == VDEC_HCODEC) {
2348 /* hcodec soft reset */
2349 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
2350 WRITE_VREG(DOS_SW_RESET1, 0);
2351 /* enable hcodec clock */
2352 hcodec_clock_enable();
2353 }
2354
2355 spin_unlock_irqrestore(&lock, flags);
2356}
2357
2358void vdec_poweroff(enum vdec_type_e core)
2359{
2360 ulong flags;
2361
2362 spin_lock_irqsave(&lock, flags);
2363
2364 if (core == VDEC_1) {
2365 /* disable vdec1 clock */
2366 vdec_clock_off();
2367 } else if (core == VDEC_2) {
2368 /* disable vdec2 clock */
2369 vdec2_clock_off();
2370 } else if (core == VDEC_HCODEC) {
2371 /* disable hcodec clock */
2372 hcodec_clock_off();
2373 }
2374
2375 spin_unlock_irqrestore(&lock, flags);
2376}
2377
2378bool vdec_on(enum vdec_type_e core)
2379{
2380 bool ret = false;
2381
2382 if (core == VDEC_1) {
2383 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100)
2384 ret = true;
2385 } else if (core == VDEC_2) {
2386 if (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100)
2387 ret = true;
2388 } else if (core == VDEC_HCODEC) {
2389 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000)
2390 ret = true;
2391 }
2392
2393 return ret;
2394}
2395#endif
2396
2397int vdec_source_changed(int format, int width, int height, int fps)
2398{
2399 /* todo: add level routines for clock adjustment per chips */
2400 int ret = -1;
2401 static int on_setting;
2402
2403 if (on_setting > 0)
2404 return ret;/*on changing clk,ignore this change*/
2405
2406 if (vdec_source_get(VDEC_1) == width * height * fps)
2407 return ret;
2408
2409
2410 on_setting = 1;
2411 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
5b851ff9 2412 pr_debug("vdec1 video changed to %d x %d %d fps clk->%dMHZ\n",
b9164398
NQ
2413 width, height, fps, vdec_clk_get(VDEC_1));
2414 on_setting = 0;
2415 return ret;
2416
2417}
2418EXPORT_SYMBOL(vdec_source_changed);
2419
fe96802b
NQ
2420void hevc_reset_core(struct vdec_s *vdec)
2421{
2422 unsigned long flags;
2423 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
2424 spin_lock_irqsave(&vdec_spin_lock, flags);
2425 codec_dmcbus_write(DMC_REQ_CTRL,
2426 codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 4)));
2427 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2428
2429 while (!(codec_dmcbus_read(DMC_CHAN_STS)
2430 & (1 << 4)))
2431 ;
2432
28e318df 2433 if (vdec == NULL || input_frame_based(vdec))
fe96802b
NQ
2434 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
2435
2436 /*
2437 * 2: assist
2438 * 3: parser
2439 * 4: parser_state
2440 * 8: dblk
2441 * 11:mcpu
2442 * 12:ccpu
2443 * 13:ddr
2444 * 14:iqit
2445 * 15:ipp
2446 * 17:qdct
2447 * 18:mpred
2448 * 19:sao
2449 * 24:hevc_afifo
2450 */
2451 WRITE_VREG(DOS_SW_RESET3,
2452 (1<<3)|(1<<4)|(1<<8)|(1<<11)|
2453 (1<<12)|(1<<13)|(1<<14)|(1<<15)|
2454 (1<<17)|(1<<18)|(1<<19)|(1<<24));
2455
2456 WRITE_VREG(DOS_SW_RESET3, 0);
2457
2458
2459 spin_lock_irqsave(&vdec_spin_lock, flags);
2460 codec_dmcbus_write(DMC_REQ_CTRL,
2461 codec_dmcbus_read(DMC_REQ_CTRL) | (1 << 4));
2462 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2463
2464}
2465EXPORT_SYMBOL(hevc_reset_core);
2466
b9164398
NQ
2467int vdec2_source_changed(int format, int width, int height, int fps)
2468{
2469 int ret = -1;
2470 static int on_setting;
2471
2472 if (has_vdec2()) {
2473 /* todo: add level routines for clock adjustment per chips */
2474 if (on_setting != 0)
2475 return ret;/*on changing clk,ignore this change*/
2476
2477 if (vdec_source_get(VDEC_2) == width * height * fps)
2478 return ret;
2479
2480 on_setting = 1;
2481 ret = vdec_source_changed_for_clk_set(format,
2482 width, height, fps);
5b851ff9 2483 pr_debug("vdec2 video changed to %d x %d %d fps clk->%dMHZ\n",
b9164398
NQ
2484 width, height, fps, vdec_clk_get(VDEC_2));
2485 on_setting = 0;
2486 return ret;
2487 }
2488 return 0;
2489}
2490EXPORT_SYMBOL(vdec2_source_changed);
2491
2492int hevc_source_changed(int format, int width, int height, int fps)
2493{
2494 /* todo: add level routines for clock adjustment per chips */
2495 int ret = -1;
2496 static int on_setting;
2497
2498 if (on_setting != 0)
2499 return ret;/*on changing clk,ignore this change*/
2500
2501 if (vdec_source_get(VDEC_HEVC) == width * height * fps)
2502 return ret;
2503
2504 on_setting = 1;
2505 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
5b851ff9 2506 pr_debug("hevc video changed to %d x %d %d fps clk->%dMHZ\n",
b9164398
NQ
2507 width, height, fps, vdec_clk_get(VDEC_HEVC));
2508 on_setting = 0;
2509
2510 return ret;
2511}
2512EXPORT_SYMBOL(hevc_source_changed);
2513
b9164398
NQ
2514static struct am_reg am_risc[] = {
2515 {"MSP", 0x300},
2516 {"MPSR", 0x301},
2517 {"MCPU_INT_BASE", 0x302},
2518 {"MCPU_INTR_GRP", 0x303},
2519 {"MCPU_INTR_MSK", 0x304},
2520 {"MCPU_INTR_REQ", 0x305},
2521 {"MPC-P", 0x306},
2522 {"MPC-D", 0x307},
2523 {"MPC_E", 0x308},
2524 {"MPC_W", 0x309},
2525 {"CSP", 0x320},
2526 {"CPSR", 0x321},
2527 {"CCPU_INT_BASE", 0x322},
2528 {"CCPU_INTR_GRP", 0x323},
2529 {"CCPU_INTR_MSK", 0x324},
2530 {"CCPU_INTR_REQ", 0x325},
2531 {"CPC-P", 0x326},
2532 {"CPC-D", 0x327},
2533 {"CPC_E", 0x328},
2534 {"CPC_W", 0x329},
2535 {"AV_SCRATCH_0", 0x09c0},
2536 {"AV_SCRATCH_1", 0x09c1},
2537 {"AV_SCRATCH_2", 0x09c2},
2538 {"AV_SCRATCH_3", 0x09c3},
2539 {"AV_SCRATCH_4", 0x09c4},
2540 {"AV_SCRATCH_5", 0x09c5},
2541 {"AV_SCRATCH_6", 0x09c6},
2542 {"AV_SCRATCH_7", 0x09c7},
2543 {"AV_SCRATCH_8", 0x09c8},
2544 {"AV_SCRATCH_9", 0x09c9},
2545 {"AV_SCRATCH_A", 0x09ca},
2546 {"AV_SCRATCH_B", 0x09cb},
2547 {"AV_SCRATCH_C", 0x09cc},
2548 {"AV_SCRATCH_D", 0x09cd},
2549 {"AV_SCRATCH_E", 0x09ce},
2550 {"AV_SCRATCH_F", 0x09cf},
2551 {"AV_SCRATCH_G", 0x09d0},
2552 {"AV_SCRATCH_H", 0x09d1},
2553 {"AV_SCRATCH_I", 0x09d2},
2554 {"AV_SCRATCH_J", 0x09d3},
2555 {"AV_SCRATCH_K", 0x09d4},
2556 {"AV_SCRATCH_L", 0x09d5},
2557 {"AV_SCRATCH_M", 0x09d6},
2558 {"AV_SCRATCH_N", 0x09d7},
2559};
2560
2561static ssize_t amrisc_regs_show(struct class *class,
2562 struct class_attribute *attr, char *buf)
2563{
2564 char *pbuf = buf;
2565 struct am_reg *regs = am_risc;
2566 int rsize = sizeof(am_risc) / sizeof(struct am_reg);
2567 int i;
2568 unsigned val;
2569 ssize_t ret;
2570
2571 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8) {
2572 mutex_lock(&vdec_mutex);
2573 if (!vdec_on(VDEC_1)) {
2574 mutex_unlock(&vdec_mutex);
2575 pbuf += sprintf(pbuf, "amrisc is power off\n");
2576 ret = pbuf - buf;
2577 return ret;
2578 }
2579 } else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M6) {
2580 /*TODO:M6 define */
2581 /*
2582 * switch_mod_gate_by_type(MOD_VDEC, 1);
2583 */
2584 amports_switch_gate("vdec", 1);
2585 }
2586 pbuf += sprintf(pbuf, "amrisc registers show:\n");
2587 for (i = 0; i < rsize; i++) {
2588 val = READ_VREG(regs[i].offset);
2589 pbuf += sprintf(pbuf, "%s(%#x)\t:%#x(%d)\n",
2590 regs[i].name, regs[i].offset, val, val);
2591 }
2592 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8)
2593 mutex_unlock(&vdec_mutex);
2594 else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M6) {
2595 /*TODO:M6 define */
2596 /*
2597 * switch_mod_gate_by_type(MOD_VDEC, 0);
2598 */
2599 amports_switch_gate("vdec", 0);
2600 }
2601 ret = pbuf - buf;
2602 return ret;
2603}
2604
2605static ssize_t dump_trace_show(struct class *class,
2606 struct class_attribute *attr, char *buf)
2607{
2608 int i;
2609 char *pbuf = buf;
2610 ssize_t ret;
2611 u16 *trace_buf = kmalloc(debug_trace_num * 2, GFP_KERNEL);
2612
2613 if (!trace_buf) {
2614 pbuf += sprintf(pbuf, "No Memory bug\n");
2615 ret = pbuf - buf;
2616 return ret;
2617 }
2618 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8) {
2619 mutex_lock(&vdec_mutex);
2620 if (!vdec_on(VDEC_1)) {
2621 mutex_unlock(&vdec_mutex);
2622 kfree(trace_buf);
2623 pbuf += sprintf(pbuf, "amrisc is power off\n");
2624 ret = pbuf - buf;
2625 return ret;
2626 }
2627 } else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M6) {
2628 /*TODO:M6 define */
2629 /*
2630 * switch_mod_gate_by_type(MOD_VDEC, 1);
2631 */
2632 amports_switch_gate("vdec", 1);
2633 }
2634 pr_info("dump trace steps:%d start\n", debug_trace_num);
2635 i = 0;
2636 while (i <= debug_trace_num - 16) {
2637 trace_buf[i] = READ_VREG(MPC_E);
2638 trace_buf[i + 1] = READ_VREG(MPC_E);
2639 trace_buf[i + 2] = READ_VREG(MPC_E);
2640 trace_buf[i + 3] = READ_VREG(MPC_E);
2641 trace_buf[i + 4] = READ_VREG(MPC_E);
2642 trace_buf[i + 5] = READ_VREG(MPC_E);
2643 trace_buf[i + 6] = READ_VREG(MPC_E);
2644 trace_buf[i + 7] = READ_VREG(MPC_E);
2645 trace_buf[i + 8] = READ_VREG(MPC_E);
2646 trace_buf[i + 9] = READ_VREG(MPC_E);
2647 trace_buf[i + 10] = READ_VREG(MPC_E);
2648 trace_buf[i + 11] = READ_VREG(MPC_E);
2649 trace_buf[i + 12] = READ_VREG(MPC_E);
2650 trace_buf[i + 13] = READ_VREG(MPC_E);
2651 trace_buf[i + 14] = READ_VREG(MPC_E);
2652 trace_buf[i + 15] = READ_VREG(MPC_E);
2653 i += 16;
2654 };
2655 pr_info("dump trace steps:%d finished\n", debug_trace_num);
2656 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8)
2657 mutex_unlock(&vdec_mutex);
2658 else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M6) {
2659 /*TODO:M6 define */
2660 /*
2661 * switch_mod_gate_by_type(MOD_VDEC, 0);
2662 */
2663 amports_switch_gate("vdec", 0);
2664 }
2665 for (i = 0; i < debug_trace_num; i++) {
2666 if (i % 4 == 0) {
2667 if (i % 16 == 0)
2668 pbuf += sprintf(pbuf, "\n");
2669 else if (i % 8 == 0)
2670 pbuf += sprintf(pbuf, " ");
2671 else /* 4 */
2672 pbuf += sprintf(pbuf, " ");
2673 }
2674 pbuf += sprintf(pbuf, "%04x:", trace_buf[i]);
2675 }
2676 while (i < debug_trace_num)
2677 ;
2678 kfree(trace_buf);
2679 pbuf += sprintf(pbuf, "\n");
2680 ret = pbuf - buf;
2681 return ret;
2682}
2683
2684static ssize_t clock_level_show(struct class *class,
2685 struct class_attribute *attr, char *buf)
2686{
2687 char *pbuf = buf;
2688 size_t ret;
2689
2690 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_1));
2691
2692 if (has_vdec2())
2693 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_2));
2694
2695 if (has_hevc_vdec())
2696 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_HEVC));
2697
2698 ret = pbuf - buf;
2699 return ret;
2700}
2701
2702static ssize_t store_poweron_clock_level(struct class *class,
2703 struct class_attribute *attr,
2704 const char *buf, size_t size)
2705{
2706 unsigned val;
2707 ssize_t ret;
2708
2709 /*ret = sscanf(buf, "%d", &val);*/
2710 ret = kstrtoint(buf, 0, &val);
2711
2712 if (ret != 0)
2713 return -EINVAL;
2714 poweron_clock_level = val;
2715 return size;
2716}
2717
2718static ssize_t show_poweron_clock_level(struct class *class,
2719 struct class_attribute *attr, char *buf)
2720{
2721 return sprintf(buf, "%d\n", poweron_clock_level);
2722}
2723
2724/*
2725*if keep_vdec_mem == 1
2726*always don't release
2727*vdec 64 memory for fast play.
2728*/
2729static ssize_t store_keep_vdec_mem(struct class *class,
2730 struct class_attribute *attr,
2731 const char *buf, size_t size)
2732{
2733 unsigned val;
2734 ssize_t ret;
2735
2736 /*ret = sscanf(buf, "%d", &val);*/
2737 ret = kstrtoint(buf, 0, &val);
2738 if (ret != 0)
2739 return -EINVAL;
2740 keep_vdec_mem = val;
2741 return size;
2742}
2743
2744static ssize_t show_keep_vdec_mem(struct class *class,
2745 struct class_attribute *attr, char *buf)
2746{
2747 return sprintf(buf, "%d\n", keep_vdec_mem);
2748}
2749
2750
2751/*irq num as same as .dts*/
2752/*
2753* interrupts = <0 3 1
2754* 0 23 1
2755* 0 32 1
2756* 0 43 1
2757* 0 44 1
2758* 0 45 1>;
2759* interrupt-names = "vsync",
2760* "demux",
2761* "parser",
2762* "mailbox_0",
2763* "mailbox_1",
2764* "mailbox_2";
2765*/
2766s32 vdec_request_threaded_irq(enum vdec_irq_num num,
2767 irq_handler_t handler,
2768 irq_handler_t thread_fn,
2769 unsigned long irqflags,
2770 const char *devname, void *dev)
2771{
2772 s32 res_irq;
2773 s32 ret = 0;
2774
2775 if (num >= VDEC_IRQ_MAX) {
2776 pr_err("[%s] request irq error, irq num too big!", __func__);
2777 return -EINVAL;
2778 }
2779
2780 if (vdec_core->isr_context[num].irq < 0) {
2781 res_irq = platform_get_irq(
2782 vdec_core->vdec_core_platform_device, num);
2783 if (res_irq < 0) {
2784 pr_err("[%s] get irq error!", __func__);
2785 return -EINVAL;
2786 }
2787
2788 vdec_core->isr_context[num].irq = res_irq;
2789 vdec_core->isr_context[num].dev_isr = handler;
2790 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
2791 vdec_core->isr_context[num].dev_id = dev;
2792
2793 ret = request_threaded_irq(res_irq,
2794 vdec_isr,
2795 vdec_thread_isr,
2796 (thread_fn) ? IRQF_ONESHOT : irqflags,
2797 devname,
2798 &vdec_core->isr_context[num]);
2799
2800 if (ret) {
2801 vdec_core->isr_context[num].irq = -1;
2802 vdec_core->isr_context[num].dev_isr = NULL;
2803 vdec_core->isr_context[num].dev_threaded_isr = NULL;
2804 vdec_core->isr_context[num].dev_id = NULL;
2805
2806 pr_err("vdec irq register error for %s.\n", devname);
2807 return -EIO;
2808 }
2809 } else {
2810 vdec_core->isr_context[num].dev_isr = handler;
2811 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
2812 vdec_core->isr_context[num].dev_id = dev;
2813 }
2814
2815 return ret;
2816}
2817EXPORT_SYMBOL(vdec_request_threaded_irq);
2818
2819s32 vdec_request_irq(enum vdec_irq_num num, irq_handler_t handler,
2820 const char *devname, void *dev)
2821{
5b851ff9 2822 pr_debug("vdec_request_irq %p, %s\n", handler, devname);
b9164398
NQ
2823
2824 return vdec_request_threaded_irq(num,
2825 handler,
2826 NULL,/*no thread_fn*/
2827 IRQF_SHARED,
2828 devname,
2829 dev);
2830}
2831EXPORT_SYMBOL(vdec_request_irq);
2832
2833void vdec_free_irq(enum vdec_irq_num num, void *dev)
2834{
2835 if (num >= VDEC_IRQ_MAX) {
2836 pr_err("[%s] request irq error, irq num too big!", __func__);
2837 return;
2838 }
2839
2840 synchronize_irq(vdec_core->isr_context[num].irq);
2841
2842 /*
2843 *assume amrisc is stopped already and there is no mailbox interrupt
2844 * when we reset pointers here.
2845 */
2846 vdec_core->isr_context[num].dev_isr = NULL;
2847 vdec_core->isr_context[num].dev_threaded_isr = NULL;
2848 vdec_core->isr_context[num].dev_id = NULL;
2849}
2850EXPORT_SYMBOL(vdec_free_irq);
2851
2852static int dump_mode;
2853static ssize_t dump_risc_mem_store(struct class *class,
2854 struct class_attribute *attr,
2855 const char *buf, size_t size)/*set*/
2856{
2857 unsigned val;
2858 ssize_t ret;
2859 char dump_mode_str[4] = "PRL";
2860
2861 /*ret = sscanf(buf, "%d", &val);*/
2862 ret = kstrtoint(buf, 0, &val);
2863
2864 if (ret != 0)
2865 return -EINVAL;
2866 dump_mode = val & 0x3;
2867 pr_info("set dump mode to %d,%c_mem\n",
2868 dump_mode, dump_mode_str[dump_mode]);
2869 return size;
2870}
2871static u32 read_amrisc_reg(int reg)
2872{
2873 WRITE_VREG(0x31b, reg);
2874 return READ_VREG(0x31c);
2875}
2876
2877static void dump_pmem(void)
2878{
2879 int i;
2880
2881 WRITE_VREG(0x301, 0x8000);
2882 WRITE_VREG(0x31d, 0);
2883 pr_info("start dump amrisc pmem of risc\n");
2884 for (i = 0; i < 0xfff; i++) {
2885 /*same as .o format*/
2886 pr_info("%08x // 0x%04x:\n", read_amrisc_reg(i), i);
2887 }
2888}
2889
2890static void dump_lmem(void)
2891{
2892 int i;
2893
2894 WRITE_VREG(0x301, 0x8000);
2895 WRITE_VREG(0x31d, 2);
2896 pr_info("start dump amrisc lmem\n");
2897 for (i = 0; i < 0x3ff; i++) {
2898 /*same as */
2899 pr_info("[%04x] = 0x%08x:\n", i, read_amrisc_reg(i));
2900 }
2901}
2902
2903static ssize_t dump_risc_mem_show(struct class *class,
2904 struct class_attribute *attr, char *buf)
2905{
2906 char *pbuf = buf;
2907 int ret;
2908
2909 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8) {
2910 mutex_lock(&vdec_mutex);
2911 if (!vdec_on(VDEC_1)) {
2912 mutex_unlock(&vdec_mutex);
2913 pbuf += sprintf(pbuf, "amrisc is power off\n");
2914 ret = pbuf - buf;
2915 return ret;
2916 }
2917 } else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M6) {
2918 /*TODO:M6 define */
2919 /*
2920 * switch_mod_gate_by_type(MOD_VDEC, 1);
2921 */
2922 amports_switch_gate("vdec", 1);
2923 }
2924 /*start do**/
2925 switch (dump_mode) {
2926 case 0:
2927 dump_pmem();
2928 break;
2929 case 2:
2930 dump_lmem();
2931 break;
2932 default:
2933 break;
2934 }
2935
2936 /*done*/
2937 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8)
2938 mutex_unlock(&vdec_mutex);
2939 else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M6) {
2940 /*TODO:M6 define */
2941 /*
2942 * switch_mod_gate_by_type(MOD_VDEC, 0);
2943 */
2944 amports_switch_gate("vdec", 0);
2945 }
2946 return sprintf(buf, "done\n");
2947}
2948
2949static ssize_t core_show(struct class *class, struct class_attribute *attr,
2950 char *buf)
2951{
2952 struct vdec_core_s *core = vdec_core;
2953 char *pbuf = buf;
2954
2955 if (list_empty(&core->connected_vdec_list))
2956 pbuf += sprintf(pbuf, "connected vdec list empty\n");
2957 else {
2958 struct vdec_s *vdec;
2959
2960 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
2961 pbuf += sprintf(pbuf,
fe96802b
NQ
2962 "\tvdec.%d (%p (%s)), status = %s,\ttype = %s\n",
2963 vdec->id,
2964 vdec,
2965 vdec_device_name[vdec->format * 2],
b9164398
NQ
2966 vdec_status_str(vdec),
2967 vdec_type_str(vdec));
2968 }
2969 }
2970
2971 return pbuf - buf;
2972}
2973
fe96802b
NQ
2974static ssize_t vdec_status_show(struct class *class,
2975 struct class_attribute *attr, char *buf)
2976{
2977 char *pbuf = buf;
2978 struct vdec_s *vdec;
2979 struct vdec_info vs;
2980 unsigned char vdec_num = 0;
2981 struct vdec_core_s *core = vdec_core;
2982 unsigned long flags = vdec_core_lock(vdec_core);
2983
2984 if (list_empty(&core->connected_vdec_list)) {
2985 pbuf += sprintf(pbuf, "No vdec.\n");
2986 goto out;
2987 }
2988
2989 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
2990 if (VDEC_STATUS_CONNECTED == vdec->status) {
2991 memset(&vs, 0, sizeof(vs));
2992 if (vdec_status(vdec, &vs)) {
2993 pbuf += sprintf(pbuf, "err.\n");
2994 goto out;
2995 }
2996 pbuf += sprintf(pbuf,
2997 "vdec channel %u statistics:\n",
2998 vdec_num);
2999 pbuf += sprintf(pbuf,
3000 "%13s : %s\n", "device name",
3001 vs.vdec_name);
3002 pbuf += sprintf(pbuf,
3003 "%13s : %u\n", "frame width",
3004 vs.frame_width);
3005 pbuf += sprintf(pbuf,
3006 "%13s : %u\n", "frame height",
3007 vs.frame_height);
3008 pbuf += sprintf(pbuf,
3009 "%13s : %u %s\n", "frame rate",
3010 vs.frame_rate, "fps");
3011 pbuf += sprintf(pbuf,
3012 "%13s : %u %s\n", "bit rate",
3013 vs.bit_rate / 1024 * 8, "kbps");
3014 pbuf += sprintf(pbuf,
3015 "%13s : %u\n", "status",
3016 vs.status);
3017 pbuf += sprintf(pbuf,
3018 "%13s : %u\n", "frame dur",
3019 vs.frame_dur);
3020 pbuf += sprintf(pbuf,
3021 "%13s : %u %s\n", "frame data",
3022 vs.frame_data / 1024, "KB");
3023 pbuf += sprintf(pbuf,
3024 "%13s : %u\n", "frame count",
3025 vs.frame_count);
3026 pbuf += sprintf(pbuf,
3027 "%13s : %u\n", "drop count",
3028 vs.drop_frame_count);
3029 pbuf += sprintf(pbuf,
3030 "%13s : %u\n", "fra err count",
3031 vs.error_frame_count);
3032 pbuf += sprintf(pbuf,
3033 "%13s : %u\n", "hw err count",
3034 vs.error_count);
3035 pbuf += sprintf(pbuf,
3036 "%13s : %llu %s\n\n", "total data",
3037 vs.total_data / 1024, "KB");
3038
3039 vdec_num++;
3040 }
3041 }
3042out:
3043 vdec_core_unlock(vdec_core, flags);
3044 return pbuf - buf;
3045}
3046
3047static ssize_t dump_vdec_blocks_show(struct class *class,
3048 struct class_attribute *attr, char *buf)
3049{
3050 struct vdec_core_s *core = vdec_core;
3051 char *pbuf = buf;
3052
3053 if (list_empty(&core->connected_vdec_list))
3054 pbuf += sprintf(pbuf, "connected vdec list empty\n");
3055 else {
3056 struct vdec_s *vdec;
3057 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3058 pbuf += vdec_input_dump_blocks(&vdec->input,
3059 pbuf, PAGE_SIZE - (pbuf - buf));
3060 }
3061 }
3062
3063 return pbuf - buf;
3064}
3065static ssize_t dump_vdec_chunks_show(struct class *class,
3066 struct class_attribute *attr, char *buf)
3067{
3068 struct vdec_core_s *core = vdec_core;
3069 char *pbuf = buf;
3070
3071 if (list_empty(&core->connected_vdec_list))
3072 pbuf += sprintf(pbuf, "connected vdec list empty\n");
3073 else {
3074 struct vdec_s *vdec;
3075 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3076 pbuf += vdec_input_dump_chunks(&vdec->input,
3077 pbuf, PAGE_SIZE - (pbuf - buf));
3078 }
3079 }
3080
3081 return pbuf - buf;
3082}
3083
fe96802b
NQ
3084static ssize_t dump_decoder_state_show(struct class *class,
3085 struct class_attribute *attr, char *buf)
3086{
3087 char *pbuf = buf;
3088 struct vdec_s *vdec;
3089 struct vdec_core_s *core = vdec_core;
3090 unsigned long flags = vdec_core_lock(vdec_core);
3091
3092 if (list_empty(&core->connected_vdec_list)) {
3093 pbuf += sprintf(pbuf, "No vdec.\n");
3094 } else {
3095 list_for_each_entry(vdec,
3096 &core->connected_vdec_list, list) {
3097 if ((vdec->status == VDEC_STATUS_CONNECTED
3098 || vdec->status == VDEC_STATUS_ACTIVE)
3099 && vdec->dump_state)
3100 vdec->dump_state(vdec);
3101 }
3102 }
3103 vdec_core_unlock(vdec_core, flags);
3104
3105 return pbuf - buf;
3106}
d481db31 3107
d481db31 3108
fe96802b 3109
b9164398
NQ
3110static struct class_attribute vdec_class_attrs[] = {
3111 __ATTR_RO(amrisc_regs),
3112 __ATTR_RO(dump_trace),
3113 __ATTR_RO(clock_level),
3114 __ATTR(poweron_clock_level, S_IRUGO | S_IWUSR | S_IWGRP,
3115 show_poweron_clock_level, store_poweron_clock_level),
3116 __ATTR(dump_risc_mem, S_IRUGO | S_IWUSR | S_IWGRP,
3117 dump_risc_mem_show, dump_risc_mem_store),
3118 __ATTR(keep_vdec_mem, S_IRUGO | S_IWUSR | S_IWGRP,
3119 show_keep_vdec_mem, store_keep_vdec_mem),
3120 __ATTR_RO(core),
fe96802b
NQ
3121 __ATTR_RO(vdec_status),
3122 __ATTR_RO(dump_vdec_blocks),
3123 __ATTR_RO(dump_vdec_chunks),
d481db31 3124 __ATTR_RO(dump_decoder_state),
b9164398
NQ
3125 __ATTR_NULL
3126};
3127
3128static struct class vdec_class = {
3129 .name = "vdec",
3130 .class_attrs = vdec_class_attrs,
3131 };
3132
b9164398
NQ
3133struct device *get_vdec_device(void)
3134{
3135 return &vdec_core->vdec_core_platform_device->dev;
3136}
3137EXPORT_SYMBOL(get_vdec_device);
3138
3139static int vdec_probe(struct platform_device *pdev)
3140{
3141 s32 i, r;
3142
3143 vdec_core = (struct vdec_core_s *)devm_kzalloc(&pdev->dev,
3144 sizeof(struct vdec_core_s), GFP_KERNEL);
3145 if (vdec_core == NULL) {
3146 pr_err("vdec core allocation failed.\n");
3147 return -ENOMEM;
3148 }
3149
3150 atomic_set(&vdec_core->vdec_nr, 0);
3151 sema_init(&vdec_core->sem, 1);
3152
3153 r = class_register(&vdec_class);
3154 if (r) {
3155 pr_info("vdec class create fail.\n");
3156 return r;
3157 }
3158
3159 vdec_core->vdec_core_platform_device = pdev;
3160
3161 platform_set_drvdata(pdev, vdec_core);
3162
3163 for (i = 0; i < VDEC_IRQ_MAX; i++) {
3164 vdec_core->isr_context[i].index = i;
3165 vdec_core->isr_context[i].irq = -1;
3166 }
3167
3168 r = vdec_request_threaded_irq(VDEC_IRQ_1, NULL, NULL,
3169 IRQF_ONESHOT, "vdec-1", NULL);
3170 if (r < 0) {
3171 pr_err("vdec interrupt request failed\n");
3172 return r;
3173 }
3174
3175 r = of_reserved_mem_device_init(&pdev->dev);
3176 if (r == 0)
3177 pr_info("vdec_probe done\n");
3178
3179 vdec_core->cma_dev = &pdev->dev;
3180
3181 if (get_cpu_type() < MESON_CPU_MAJOR_ID_M8) {
3182 /* default to 250MHz */
3183 vdec_clock_hi_enable();
3184 }
3185
3186 if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXBB) {
3187 /* set vdec dmc request to urgent */
3188 WRITE_DMCREG(DMC_AM5_CHAN_CTRL, 0x3f203cf);
3189 }
b9164398
NQ
3190 INIT_LIST_HEAD(&vdec_core->connected_vdec_list);
3191 spin_lock_init(&vdec_core->lock);
fe96802b 3192 ida_init(&vdec_core->ida);
b9164398
NQ
3193 vdec_core->thread = kthread_run(vdec_core_thread, vdec_core,
3194 "vdec-core");
3195
3f4a083c
HZ
3196 vdec_core->vdec_core_wq = alloc_ordered_workqueue("%s",
3197 __WQ_LEGACY |
3198 WQ_MEM_RECLAIM |
3199 WQ_HIGHPRI/*high priority*/
3200 ,
3201 "vdec-work");
3202 /*work queue priority lower than vdec-core.*/
b9164398
NQ
3203 return 0;
3204}
3205
3206static int vdec_remove(struct platform_device *pdev)
3207{
3208 int i;
3209
3210 for (i = 0; i < VDEC_IRQ_MAX; i++) {
3211 if (vdec_core->isr_context[i].irq >= 0) {
3212 free_irq(vdec_core->isr_context[i].irq,
3213 &vdec_core->isr_context[i]);
3214 vdec_core->isr_context[i].irq = -1;
3215 vdec_core->isr_context[i].dev_isr = NULL;
3216 vdec_core->isr_context[i].dev_threaded_isr = NULL;
3217 vdec_core->isr_context[i].dev_id = NULL;
3218 }
3219 }
3220
3221 kthread_stop(vdec_core->thread);
3222
fe96802b 3223 destroy_workqueue(vdec_core->vdec_core_wq);
b9164398
NQ
3224 class_unregister(&vdec_class);
3225
3226 return 0;
3227}
3228
3229static const struct of_device_id amlogic_vdec_dt_match[] = {
3230 {
3231 .compatible = "amlogic, vdec",
3232 },
3233 {},
3234};
3235
fe96802b 3236static struct mconfig vdec_configs[] = {
fe96802b
NQ
3237 MC_PU32("debug_trace_num", &debug_trace_num),
3238 MC_PI32("hevc_max_reset_count", &hevc_max_reset_count),
3239 MC_PU32("clk_config", &clk_config),
3240 MC_PI32("step_mode", &step_mode),
3241 MC_PI32("poweron_clock_level", &poweron_clock_level),
3242};
3243static struct mconfig_node vdec_node;
3244
b9164398
NQ
3245static struct platform_driver vdec_driver = {
3246 .probe = vdec_probe,
3247 .remove = vdec_remove,
3248 .driver = {
3249 .name = "vdec",
3250 .of_match_table = amlogic_vdec_dt_match,
3251 }
3252};
3253
3254int vdec_module_init(void)
3255{
3256 if (platform_driver_register(&vdec_driver)) {
3257 pr_info("failed to register vdec module\n");
3258 return -ENODEV;
3259 }
fe96802b
NQ
3260 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
3261 "vdec", vdec_configs, CONFIG_FOR_RW);
b9164398
NQ
3262 return 0;
3263}
3264EXPORT_SYMBOL(vdec_module_init);
3265
3266void vdec_module_exit(void)
3267{
3268 platform_driver_unregister(&vdec_driver);
3269}
3270EXPORT_SYMBOL(vdec_module_exit);
3271
3272#if 0
3273static int __init vdec_module_init(void)
3274{
3275 if (platform_driver_register(&vdec_driver)) {
3276 pr_info("failed to register vdec module\n");
3277 return -ENODEV;
3278 }
fe96802b
NQ
3279 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
3280 "vdec", vdec_configs, CONFIG_FOR_RW);
b9164398
NQ
3281 return 0;
3282}
3283
3284static void __exit vdec_module_exit(void)
3285{
3286 platform_driver_unregister(&vdec_driver);
3287}
3288#endif
3289
3290static int vdec_mem_device_init(struct reserved_mem *rmem, struct device *dev)
3291{
b9164398
NQ
3292 vdec_core->cma_dev = dev;
3293
3294 return 0;
3295}
3296
3297static const struct reserved_mem_ops rmem_vdec_ops = {
3298 .device_init = vdec_mem_device_init,
3299};
3300
3301static int __init vdec_mem_setup(struct reserved_mem *rmem)
3302{
3303 rmem->ops = &rmem_vdec_ops;
3304 pr_info("vdec: reserved mem setup\n");
3305
3306 return 0;
3307}
3308
3309RESERVEDMEM_OF_DECLARE(vdec, "amlogic, vdec-memory", vdec_mem_setup);
3f4a083c 3310module_param(debug, uint, 0664);
b9164398
NQ
3311module_param(debug_trace_num, uint, 0664);
3312module_param(hevc_max_reset_count, int, 0664);
3313module_param(clk_config, uint, 0664);
3314module_param(step_mode, int, 0664);
fe96802b 3315
b9164398
NQ
3316/*
3317*module_init(vdec_module_init);
3318*module_exit(vdec_module_exit);
3319*/
fe96802b
NQ
3320#define CREATE_TRACE_POINTS
3321#include "vdec_trace.h"
b9164398
NQ
3322MODULE_DESCRIPTION("AMLOGIC vdec driver");
3323MODULE_LICENSE("GPL");
3324MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");