dvb: update i2c adapter interface
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_media.git] / drivers / frame_provider / decoder / utils / vdec.c
CommitLineData
b9164398
NQ
1/*
2 * drivers/amlogic/media/frame_provider/decoder/utils/vdec.c
3 *
4 * Copyright (C) 2016 Amlogic, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
e0614bf7 16 */
5b851ff9 17#define DEBUG
b9164398
NQ
18#include <linux/kernel.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/kthread.h>
24#include <linux/platform_device.h>
25#include <linux/uaccess.h>
26#include <linux/semaphore.h>
27#include <linux/sched/rt.h>
28#include <linux/interrupt.h>
29#include <linux/amlogic/media/utils/vformat.h>
30#include <linux/amlogic/iomap.h>
31#include <linux/amlogic/media/canvas/canvas.h>
32#include <linux/amlogic/media/vfm/vframe.h>
33#include <linux/amlogic/media/vfm/vframe_provider.h>
34#include <linux/amlogic/media/vfm/vframe_receiver.h>
35#include <linux/amlogic/media/video_sink/ionvideo_ext.h>
36#include <linux/amlogic/media/vfm/vfm_ext.h>
a6c89e96
NQ
37/*for VDEC_DEBUG_SUPPORT*/
38#include <linux/time.h>
b9164398
NQ
39
40#include <linux/amlogic/media/utils/vdec_reg.h>
41#include "vdec.h"
fe96802b
NQ
42#include "vdec_trace.h"
43#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
44#include "vdec_profile.h"
45#endif
46#include <linux/of.h>
47#include <linux/of_fdt.h>
48#include <linux/libfdt_env.h>
49#include <linux/of_reserved_mem.h>
50#include <linux/dma-contiguous.h>
51#include <linux/cma.h>
52#include <linux/module.h>
53#include <linux/slab.h>
54#include <linux/dma-mapping.h>
55#include <linux/dma-contiguous.h>
56#include "../../../stream_input/amports/amports_priv.h"
57
58#include <linux/amlogic/media/utils/amports_config.h>
59#include "../utils/amvdec.h"
60#include "vdec_input.h"
61
62#include "../../../common/media_clock/clk/clk.h"
63#include <linux/reset.h>
fe96802b 64#include <linux/amlogic/cpu_version.h>
b9164398
NQ
65#include <linux/amlogic/media/codec_mm/codec_mm.h>
66#include <linux/amlogic/media/video_sink/video_keeper.h>
fe96802b
NQ
67#include <linux/amlogic/media/codec_mm/configs.h>
68#include <linux/amlogic/media/frame_sync/ptsserv.h>
69#include "secprot.h"
b9164398
NQ
70
71static DEFINE_MUTEX(vdec_mutex);
72
73#define MC_SIZE (4096 * 4)
74#define CMA_ALLOC_SIZE SZ_64M
75#define MEM_NAME "vdec_prealloc"
76static int inited_vcodec_num;
77static int poweron_clock_level;
78static int keep_vdec_mem;
79static unsigned int debug_trace_num = 16 * 20;
80static int step_mode;
81static unsigned int clk_config;
8247f369 82
b9164398 83static int hevc_max_reset_count;
fe96802b
NQ
84#define MAX_INSTANCE_MUN 9
85
28e318df 86static int no_powerdown;
b9164398
NQ
87static DEFINE_SPINLOCK(vdec_spin_lock);
88
89#define HEVC_TEST_LIMIT 100
90#define GXBB_REV_A_MINOR 0xA
91
92struct am_reg {
93 char *name;
94 int offset;
95};
96
97struct vdec_isr_context_s {
98 int index;
99 int irq;
100 irq_handler_t dev_isr;
101 irq_handler_t dev_threaded_isr;
102 void *dev_id;
a6c89e96 103 struct vdec_s *vdec;
b9164398
NQ
104};
105
106struct vdec_core_s {
107 struct list_head connected_vdec_list;
108 spinlock_t lock;
fe96802b 109 struct ida ida;
b9164398
NQ
110 atomic_t vdec_nr;
111 struct vdec_s *vfm_vdec;
112 struct vdec_s *active_vdec;
fe96802b 113 struct vdec_s *hint_fr_vdec;
b9164398
NQ
114 struct platform_device *vdec_core_platform_device;
115 struct device *cma_dev;
b9164398
NQ
116 struct semaphore sem;
117 struct task_struct *thread;
fe96802b 118 struct workqueue_struct *vdec_core_wq;
b9164398 119
a6c89e96 120 unsigned long sched_mask;
b9164398
NQ
121 struct vdec_isr_context_s isr_context[VDEC_IRQ_MAX];
122 int power_ref_count[VDEC_MAX];
123};
124
125static struct vdec_core_s *vdec_core;
126
fe96802b
NQ
127static const char * const vdec_status_string[] = {
128 "VDEC_STATUS_UNINITIALIZED",
129 "VDEC_STATUS_DISCONNECTED",
130 "VDEC_STATUS_CONNECTED",
131 "VDEC_STATUS_ACTIVE"
132};
133
134static int debugflags;
135
136int vdec_get_debug_flags(void)
137{
138 return debugflags;
139}
140EXPORT_SYMBOL(vdec_get_debug_flags);
141
142unsigned char is_mult_inc(unsigned int type)
143{
144 unsigned char ret = 0;
145 if (vdec_get_debug_flags() & 0xf000)
146 ret = (vdec_get_debug_flags() & 0x1000)
147 ? 1 : 0;
148 else if (type & PORT_TYPE_DECODER_SCHED)
149 ret = 1;
150 return ret;
151}
152EXPORT_SYMBOL(is_mult_inc);
153
a6c89e96
NQ
154static const bool cores_with_input[VDEC_MAX] = {
155 true, /* VDEC_1 */
156 false, /* VDEC_HCODEC */
157 false, /* VDEC_2 */
158 true, /* VDEC_HEVC / VDEC_HEVC_FRONT */
159 false, /* VDEC_HEVC_BACK */
160};
161
162static const int cores_int[VDEC_MAX] = {
163 VDEC_IRQ_1,
164 VDEC_IRQ_2,
165 VDEC_IRQ_0,
166 VDEC_IRQ_0,
167 VDEC_IRQ_HEVC_BACK
168};
169
b9164398
NQ
170unsigned long vdec_core_lock(struct vdec_core_s *core)
171{
172 unsigned long flags;
173
174 spin_lock_irqsave(&core->lock, flags);
175
176 return flags;
177}
178
179void vdec_core_unlock(struct vdec_core_s *core, unsigned long flags)
180{
181 spin_unlock_irqrestore(&core->lock, flags);
182}
183
184static int get_canvas(unsigned int index, unsigned int base)
185{
186 int start;
187 int canvas_index = index * base;
188
189 if ((base > 4) || (base == 0))
190 return -1;
191
192 if ((AMVDEC_CANVAS_START_INDEX + canvas_index + base - 1)
193 <= AMVDEC_CANVAS_MAX1) {
194 start = AMVDEC_CANVAS_START_INDEX + base * index;
195 } else {
196 canvas_index -= (AMVDEC_CANVAS_MAX1 -
197 AMVDEC_CANVAS_START_INDEX + 1) / base * base;
198 if (canvas_index <= AMVDEC_CANVAS_MAX2)
199 start = canvas_index / base;
200 else
201 return -1;
202 }
203
204 if (base == 1) {
205 return start;
206 } else if (base == 2) {
207 return ((start + 1) << 16) | ((start + 1) << 8) | start;
208 } else if (base == 3) {
209 return ((start + 2) << 16) | ((start + 1) << 8) | start;
210 } else if (base == 4) {
211 return (((start + 3) << 24) | (start + 2) << 16) |
212 ((start + 1) << 8) | start;
213 }
214
215 return -1;
216}
217
218
fe96802b 219int vdec_status(struct vdec_s *vdec, struct vdec_info *vstatus)
b9164398
NQ
220{
221 if (vdec->dec_status)
222 return vdec->dec_status(vdec, vstatus);
223
224 return -1;
225}
226EXPORT_SYMBOL(vdec_status);
227
228int vdec_set_trickmode(struct vdec_s *vdec, unsigned long trickmode)
229{
230 int r;
231
232 if (vdec->set_trickmode) {
233 r = vdec->set_trickmode(vdec, trickmode);
234
235 if ((r == 0) && (vdec->slave) && (vdec->slave->set_trickmode))
236 r = vdec->slave->set_trickmode(vdec->slave,
237 trickmode);
238 }
239
240 return -1;
241}
242EXPORT_SYMBOL(vdec_set_trickmode);
243
d481db31
NQ
244int vdec_set_isreset(struct vdec_s *vdec, int isreset)
245{
246 vdec->is_reset = isreset;
247 pr_info("is_reset=%d\n", isreset);
248 if (vdec->set_isreset)
249 return vdec->set_isreset(vdec, isreset);
250 return 0;
251}
252EXPORT_SYMBOL(vdec_set_isreset);
253
28e318df
NQ
254int vdec_set_dv_metawithel(struct vdec_s *vdec, int isdvmetawithel)
255{
256 vdec->dolby_meta_with_el = isdvmetawithel;
257 pr_info("isdvmetawithel=%d\n", isdvmetawithel);
258 return 0;
259}
c23e8aee 260EXPORT_SYMBOL(vdec_set_dv_metawithel);
28e318df
NQ
261
262void vdec_set_no_powerdown(int flag)
263{
264 no_powerdown = flag;
265 pr_info("no_powerdown=%d\n", no_powerdown);
266 return;
267}
c23e8aee 268EXPORT_SYMBOL(vdec_set_no_powerdown);
28e318df 269
fe96802b
NQ
270void vdec_count_info(struct vdec_info *vs, unsigned int err,
271 unsigned int offset)
272{
273 if (err)
274 vs->error_frame_count++;
275 if (offset) {
276 if (0 == vs->frame_count) {
277 vs->offset = 0;
278 vs->samp_cnt = 0;
279 }
280 vs->frame_data = offset > vs->total_data ?
281 offset - vs->total_data : vs->total_data - offset;
282 vs->total_data = offset;
283 if (vs->samp_cnt < 96000 * 2) { /* 2s */
284 if (0 == vs->samp_cnt)
285 vs->offset = offset;
286 vs->samp_cnt += vs->frame_dur;
287 } else {
288 vs->bit_rate = (offset - vs->offset) / 2;
289 /*pr_info("bitrate : %u\n",vs->bit_rate);*/
290 vs->samp_cnt = 0;
291 }
292 vs->frame_count++;
293 }
294 /*pr_info("size : %u, offset : %u, dur : %u, cnt : %u\n",
295 vs->offset,offset,vs->frame_dur,vs->samp_cnt);*/
296 return;
297}
298EXPORT_SYMBOL(vdec_count_info);
c23e8aee
HZ
299int vdec_is_support_4k(void)
300{
a8d5afab 301 return !is_meson_gxl_package_805X();
c23e8aee
HZ
302}
303EXPORT_SYMBOL(vdec_is_support_4k);
fe96802b 304
b9164398 305/*
e0614bf7 306 * clk_config:
b9164398
NQ
307 *0:default
308 *1:no gp0_pll;
309 *2:always used gp0_pll;
310 *>=10:fixed n M clk;
311 *== 100 , 100M clks;
e0614bf7 312 */
b9164398
NQ
313unsigned int get_vdec_clk_config_settings(void)
314{
315 return clk_config;
316}
317void update_vdec_clk_config_settings(unsigned int config)
318{
319 clk_config = config;
320}
321EXPORT_SYMBOL(update_vdec_clk_config_settings);
322
323static bool hevc_workaround_needed(void)
324{
325 return (get_cpu_type() == MESON_CPU_MAJOR_ID_GXBB) &&
326 (get_meson_cpu_version(MESON_CPU_VERSION_LVL_MINOR)
327 == GXBB_REV_A_MINOR);
328}
329
330struct device *get_codec_cma_device(void)
331{
332 return vdec_core->cma_dev;
333}
334
fe96802b 335#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
336static const char * const vdec_device_name[] = {
337 "amvdec_mpeg12", "ammvdec_mpeg12",
338 "amvdec_mpeg4", "ammvdec_mpeg4",
339 "amvdec_h264", "ammvdec_h264",
340 "amvdec_mjpeg", "ammvdec_mjpeg",
341 "amvdec_real", "ammvdec_real",
342 "amjpegdec", "ammjpegdec",
343 "amvdec_vc1", "ammvdec_vc1",
344 "amvdec_avs", "ammvdec_avs",
345 "amvdec_yuv", "ammvdec_yuv",
346 "amvdec_h264mvc", "ammvdec_h264mvc",
347 "amvdec_h264_4k2k", "ammvdec_h264_4k2k",
348 "amvdec_h265", "ammvdec_h265",
349 "amvenc_avc", "amvenc_avc",
350 "jpegenc", "jpegenc",
a6c89e96
NQ
351 "amvdec_vp9", "ammvdec_vp9",
352 "amvdec_avs2", "ammvdec_avs2"
b9164398
NQ
353};
354
b9164398
NQ
355
356#else
357
358static const char * const vdec_device_name[] = {
359 "amvdec_mpeg12",
360 "amvdec_mpeg4",
361 "amvdec_h264",
362 "amvdec_mjpeg",
363 "amvdec_real",
364 "amjpegdec",
365 "amvdec_vc1",
366 "amvdec_avs",
367 "amvdec_yuv",
368 "amvdec_h264mvc",
369 "amvdec_h264_4k2k",
370 "amvdec_h265",
371 "amvenc_avc",
372 "jpegenc",
a6c89e96
NQ
373 "amvdec_vp9",
374 "amvdec_avs2"
b9164398
NQ
375};
376
b9164398
NQ
377#endif
378
a6c89e96
NQ
379#ifdef VDEC_DEBUG_SUPPORT
380static u64 get_current_clk(void)
381{
382 /*struct timespec xtime = current_kernel_time();
383 u64 usec = xtime.tv_sec * 1000000;
384 usec += xtime.tv_nsec / 1000;
385 */
386 u64 usec = sched_clock();
387 return usec;
388}
389
390static void inc_profi_count(unsigned long mask, u32 *count)
391{
392 enum vdec_type_e type;
393
394 for (type = VDEC_1; type < VDEC_MAX; type++) {
395 if (mask & (1 << type))
396 count[type]++;
397 }
398}
399
400static void update_profi_clk_run(struct vdec_s *vdec,
401 unsigned long mask, u64 clk)
402{
403 enum vdec_type_e type;
404
405 for (type = VDEC_1; type < VDEC_MAX; type++) {
406 if (mask & (1 << type)) {
407 vdec->start_run_clk[type] = clk;
408 if (vdec->profile_start_clk[type] == 0)
409 vdec->profile_start_clk[type] = clk;
410 vdec->total_clk[type] = clk
411 - vdec->profile_start_clk[type];
412 /*pr_info("set start_run_clk %ld\n",
413 vdec->start_run_clk);*/
414
415 }
416 }
417}
418
419static void update_profi_clk_stop(struct vdec_s *vdec,
420 unsigned long mask, u64 clk)
421{
422 enum vdec_type_e type;
423
424 for (type = VDEC_1; type < VDEC_MAX; type++) {
425 if (mask & (1 << type)) {
426 if (vdec->start_run_clk[type] == 0)
427 pr_info("error, start_run_clk[%d] not set\n", type);
428
429 /*pr_info("update run_clk type %d, %ld, %ld, %ld\n",
430 type,
431 clk,
432 vdec->start_run_clk[type],
433 vdec->run_clk[type]);*/
434 vdec->run_clk[type] +=
435 (clk - vdec->start_run_clk[type]);
436 }
437 }
438}
439
440#endif
441
b9164398
NQ
442int vdec_set_decinfo(struct vdec_s *vdec, struct dec_sysinfo *p)
443{
444 if (copy_from_user((void *)&vdec->sys_info_store, (void *)p,
445 sizeof(struct dec_sysinfo)))
446 return -EFAULT;
447
448 return 0;
449}
450EXPORT_SYMBOL(vdec_set_decinfo);
451
452/* construct vdec strcture */
453struct vdec_s *vdec_create(struct stream_port_s *port,
454 struct vdec_s *master)
455{
456 struct vdec_s *vdec;
457 int type = VDEC_TYPE_SINGLE;
fe96802b
NQ
458 int id;
459 if (is_mult_inc(port->type))
b9164398
NQ
460 type = (port->type & PORT_TYPE_FRAME) ?
461 VDEC_TYPE_FRAME_BLOCK :
462 VDEC_TYPE_STREAM_PARSER;
463
fe96802b
NQ
464 id = ida_simple_get(&vdec_core->ida,
465 0, MAX_INSTANCE_MUN, GFP_KERNEL);
466 if (id < 0) {
467 pr_info("vdec_create request id failed!ret =%d\n", id);
468 return NULL;
469 }
b9164398
NQ
470 vdec = vzalloc(sizeof(struct vdec_s));
471
472 /* TBD */
473 if (vdec) {
474 vdec->magic = 0x43454456;
fe96802b 475 vdec->id = -1;
b9164398
NQ
476 vdec->type = type;
477 vdec->port = port;
478 vdec->sys_info = &vdec->sys_info_store;
479
480 INIT_LIST_HEAD(&vdec->list);
481
b9164398 482 atomic_inc(&vdec_core->vdec_nr);
fe96802b
NQ
483 vdec->id = id;
484 vdec_input_init(&vdec->input, vdec);
b9164398
NQ
485 if (master) {
486 vdec->master = master;
487 master->slave = vdec;
488 master->sched = 1;
489 }
490 }
491
5b851ff9 492 pr_debug("vdec_create instance %p, total %d\n", vdec,
b9164398
NQ
493 atomic_read(&vdec_core->vdec_nr));
494
fe96802b
NQ
495 //trace_vdec_create(vdec); /*DEBUG_TMP*/
496
b9164398
NQ
497 return vdec;
498}
499EXPORT_SYMBOL(vdec_create);
500
501int vdec_set_format(struct vdec_s *vdec, int format)
502{
503 vdec->format = format;
fe96802b 504 vdec->port_flag |= PORT_FLAG_VFORMAT;
b9164398 505
fe96802b 506 if (vdec->slave) {
b9164398 507 vdec->slave->format = format;
fe96802b
NQ
508 vdec->slave->port_flag |= PORT_FLAG_VFORMAT;
509 }
510
511 //trace_vdec_set_format(vdec, format);/*DEBUG_TMP*/
b9164398
NQ
512
513 return 0;
514}
515EXPORT_SYMBOL(vdec_set_format);
516
517int vdec_set_pts(struct vdec_s *vdec, u32 pts)
518{
519 vdec->pts = pts;
fe96802b 520 vdec->pts64 = div64_u64((u64)pts * 100, 9);
b9164398 521 vdec->pts_valid = true;
fe96802b 522 //trace_vdec_set_pts(vdec, (u64)pts);/*DEBUG_TMP*/
b9164398
NQ
523 return 0;
524}
525EXPORT_SYMBOL(vdec_set_pts);
526
527int vdec_set_pts64(struct vdec_s *vdec, u64 pts64)
528{
529 vdec->pts64 = pts64;
fe96802b 530 vdec->pts = (u32)div64_u64(pts64 * 9, 100);
b9164398 531 vdec->pts_valid = true;
fe96802b
NQ
532
533 //trace_vdec_set_pts64(vdec, pts64);/*DEBUG_TMP*/
b9164398
NQ
534 return 0;
535}
536EXPORT_SYMBOL(vdec_set_pts64);
537
538void vdec_set_status(struct vdec_s *vdec, int status)
539{
fe96802b 540 //trace_vdec_set_status(vdec, status);/*DEBUG_TMP*/
b9164398
NQ
541 vdec->status = status;
542}
543EXPORT_SYMBOL(vdec_set_status);
544
545void vdec_set_next_status(struct vdec_s *vdec, int status)
546{
fe96802b 547 //trace_vdec_set_next_status(vdec, status);/*DEBUG_TMP*/
b9164398
NQ
548 vdec->next_status = status;
549}
550EXPORT_SYMBOL(vdec_set_next_status);
551
552int vdec_set_video_path(struct vdec_s *vdec, int video_path)
553{
554 vdec->frame_base_video_path = video_path;
555 return 0;
556}
557EXPORT_SYMBOL(vdec_set_video_path);
558
fe96802b
NQ
559int vdec_set_receive_id(struct vdec_s *vdec, int receive_id)
560{
561 vdec->vf_receiver_inst = receive_id;
562 return 0;
563}
564EXPORT_SYMBOL(vdec_set_receive_id);
565
b9164398
NQ
566/* add frame data to input chain */
567int vdec_write_vframe(struct vdec_s *vdec, const char *buf, size_t count)
568{
569 return vdec_input_add_frame(&vdec->input, buf, count);
570}
571EXPORT_SYMBOL(vdec_write_vframe);
572
fe96802b
NQ
573/* add a work queue thread for vdec*/
574void vdec_schedule_work(struct work_struct *work)
575{
576 if (vdec_core->vdec_core_wq)
577 queue_work(vdec_core->vdec_core_wq, work);
578 else
579 schedule_work(work);
580}
581EXPORT_SYMBOL(vdec_schedule_work);
582
583static struct vdec_s *vdec_get_associate(struct vdec_s *vdec)
584{
585 if (vdec->master)
586 return vdec->master;
587 else if (vdec->slave)
588 return vdec->slave;
589 return NULL;
590}
591
592static void vdec_sync_input_read(struct vdec_s *vdec)
593{
594 if (!vdec_stream_based(vdec))
595 return;
596
597 if (vdec_dual(vdec)) {
598 u32 me, other;
599 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
600 me = READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
601 other =
602 vdec_get_associate(vdec)->input.stream_cookie;
603 if (me > other)
604 return;
605 else if (me == other) {
606 me = READ_VREG(VLD_MEM_VIFIFO_RP);
607 other =
608 vdec_get_associate(vdec)->input.swap_rp;
609 if (me > other) {
610 WRITE_PARSER_REG(PARSER_VIDEO_RP,
611 vdec_get_associate(vdec)->
612 input.swap_rp);
613 return;
614 }
615 }
616 WRITE_PARSER_REG(PARSER_VIDEO_RP,
617 READ_VREG(VLD_MEM_VIFIFO_RP));
618 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
619 me = READ_VREG(HEVC_SHIFT_BYTE_COUNT);
620 if (((me & 0x80000000) == 0) &&
621 (vdec->input.streaming_rp & 0x80000000))
622 me += 1ULL << 32;
623 other = vdec_get_associate(vdec)->input.streaming_rp;
624 if (me > other) {
625 WRITE_PARSER_REG(PARSER_VIDEO_RP,
626 vdec_get_associate(vdec)->
627 input.swap_rp);
628 return;
629 }
630
631 WRITE_PARSER_REG(PARSER_VIDEO_RP,
632 READ_VREG(HEVC_STREAM_RD_PTR));
633 }
634 } else if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
635 WRITE_PARSER_REG(PARSER_VIDEO_RP,
636 READ_VREG(VLD_MEM_VIFIFO_RP));
637 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
638 WRITE_PARSER_REG(PARSER_VIDEO_RP,
639 READ_VREG(HEVC_STREAM_RD_PTR));
640 }
641}
642
643static void vdec_sync_input_write(struct vdec_s *vdec)
644{
645 if (!vdec_stream_based(vdec))
646 return;
647
648 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
649 WRITE_VREG(VLD_MEM_VIFIFO_WP,
650 READ_PARSER_REG(PARSER_VIDEO_WP));
651 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
652 WRITE_VREG(HEVC_STREAM_WR_PTR,
653 READ_PARSER_REG(PARSER_VIDEO_WP));
654 }
655}
656
b9164398 657/*
e0614bf7
ZZ
658 *get next frame from input chain
659 */
b9164398 660/*
e0614bf7 661 *THE VLD_FIFO is 512 bytes and Video buffer level
b9164398
NQ
662 * empty interrupt is set to 0x80 bytes threshold
663 */
664#define VLD_PADDING_SIZE 1024
665#define HEVC_PADDING_SIZE (1024*16)
b9164398
NQ
666int vdec_prepare_input(struct vdec_s *vdec, struct vframe_chunk_s **p)
667{
fe96802b 668 struct vdec_input_s *input = &vdec->input;
b9164398
NQ
669 struct vframe_chunk_s *chunk = NULL;
670 struct vframe_block_list_s *block = NULL;
671 int dummy;
672
673 /* full reset to HW input */
674 if (input->target == VDEC_INPUT_TARGET_VLD) {
675 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
676
677 /* reset VLD fifo for all vdec */
678 WRITE_VREG(DOS_SW_RESET0, (1<<5) | (1<<4) | (1<<3));
679 WRITE_VREG(DOS_SW_RESET0, 0);
680
fe96802b 681 dummy = READ_RESET_REG(RESET0_REGISTER);
b9164398
NQ
682 WRITE_VREG(POWER_CTL_VLD, 1 << 4);
683 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
684#if 0
685 /*move to driver*/
686 if (input_frame_based(input))
687 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
688
689 /*
690 * 2: assist
691 * 3: parser
692 * 4: parser_state
693 * 8: dblk
694 * 11:mcpu
695 * 12:ccpu
696 * 13:ddr
697 * 14:iqit
698 * 15:ipp
699 * 17:qdct
700 * 18:mpred
701 * 19:sao
702 * 24:hevc_afifo
703 */
704 WRITE_VREG(DOS_SW_RESET3,
705 (1<<3)|(1<<4)|(1<<8)|(1<<11)|(1<<12)|(1<<14)|(1<<15)|
706 (1<<17)|(1<<18)|(1<<19));
707 WRITE_VREG(DOS_SW_RESET3, 0);
708#endif
709 }
710
711 /*
e0614bf7 712 *setup HW decoder input buffer (VLD context)
b9164398
NQ
713 * based on input->type and input->target
714 */
715 if (input_frame_based(input)) {
716 chunk = vdec_input_next_chunk(&vdec->input);
717
718 if (chunk == NULL) {
719 *p = NULL;
720 return -1;
721 }
722
723 block = chunk->block;
724
725 if (input->target == VDEC_INPUT_TARGET_VLD) {
726 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR, block->start);
727 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR, block->start +
728 block->size - 8);
729 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
730 round_down(block->start + chunk->offset,
fe96802b 731 VDEC_FIFO_ALIGN));
b9164398
NQ
732
733 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
734 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
735
736 /* set to manual mode */
737 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
738 WRITE_VREG(VLD_MEM_VIFIFO_RP,
739 round_down(block->start + chunk->offset,
fe96802b 740 VDEC_FIFO_ALIGN));
b9164398
NQ
741 dummy = chunk->offset + chunk->size +
742 VLD_PADDING_SIZE;
743 if (dummy >= block->size)
744 dummy -= block->size;
745 WRITE_VREG(VLD_MEM_VIFIFO_WP,
fe96802b
NQ
746 round_down(block->start + dummy,
747 VDEC_FIFO_ALIGN));
b9164398
NQ
748
749 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 3);
750 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
751
752 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
753 (0x11 << 16) | (1<<10) | (7<<3));
754
755 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
756 WRITE_VREG(HEVC_STREAM_START_ADDR, block->start);
757 WRITE_VREG(HEVC_STREAM_END_ADDR, block->start +
758 block->size);
759 WRITE_VREG(HEVC_STREAM_RD_PTR, block->start +
760 chunk->offset);
761 dummy = chunk->offset + chunk->size +
762 HEVC_PADDING_SIZE;
763 if (dummy >= block->size)
764 dummy -= block->size;
765 WRITE_VREG(HEVC_STREAM_WR_PTR,
fe96802b
NQ
766 round_down(block->start + dummy,
767 VDEC_FIFO_ALIGN));
b9164398
NQ
768
769 /* set endian */
770 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
771 }
772
773 *p = chunk;
774 return chunk->size;
775
776 } else {
fe96802b 777 /* stream based */
b9164398
NQ
778 u32 rp = 0, wp = 0, fifo_len = 0;
779 int size;
fe96802b
NQ
780 bool swap_valid = input->swap_valid;
781 unsigned long swap_page_phys = input->swap_page_phys;
782
783 if (vdec_dual(vdec) &&
784 ((vdec->flag & VDEC_FLAG_SELF_INPUT_CONTEXT) == 0)) {
785 /* keep using previous input context */
786 struct vdec_s *master = (vdec->slave) ?
787 vdec : vdec->master;
788 if (master->input.last_swap_slave) {
789 swap_valid = master->slave->input.swap_valid;
790 swap_page_phys =
791 master->slave->input.swap_page_phys;
792 } else {
793 swap_valid = master->input.swap_valid;
794 swap_page_phys = master->input.swap_page_phys;
795 }
796 }
797
798 if (swap_valid) {
b9164398 799 if (input->target == VDEC_INPUT_TARGET_VLD) {
fe96802b
NQ
800 if (vdec->format == VFORMAT_H264)
801 SET_VREG_MASK(POWER_CTL_VLD,
802 (1 << 9));
803
b9164398
NQ
804 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
805
806 /* restore read side */
807 WRITE_VREG(VLD_MEM_SWAP_ADDR,
fe96802b 808 swap_page_phys);
b9164398
NQ
809 WRITE_VREG(VLD_MEM_SWAP_CTL, 1);
810
811 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
812 ;
813 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
814
815 /* restore wrap count */
816 WRITE_VREG(VLD_MEM_VIFIFO_WRAP_COUNT,
817 input->stream_cookie);
818
819 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
820 fifo_len = READ_VREG(VLD_MEM_VIFIFO_LEVEL);
821
822 /* enable */
823 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
824 (0x11 << 16) | (1<<10));
825
fe96802b
NQ
826 /* sync with front end */
827 vdec_sync_input_read(vdec);
828 vdec_sync_input_write(vdec);
b9164398
NQ
829
830 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
831 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
832 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
833
834 /* restore read side */
835 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
fe96802b 836 swap_page_phys);
b9164398
NQ
837 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 1);
838
839 while (READ_VREG(HEVC_STREAM_SWAP_CTRL)
840 & (1<<7))
841 ;
842 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
843
844 /* restore stream offset */
845 WRITE_VREG(HEVC_SHIFT_BYTE_COUNT,
846 input->stream_cookie);
847
848 rp = READ_VREG(HEVC_STREAM_RD_PTR);
849 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
850 >> 16) & 0x7f;
851
852
853 /* enable */
854
fe96802b
NQ
855 /* sync with front end */
856 vdec_sync_input_read(vdec);
857 vdec_sync_input_write(vdec);
b9164398
NQ
858
859 wp = READ_VREG(HEVC_STREAM_WR_PTR);
fe96802b
NQ
860
861 /*pr_info("vdec: restore context\r\n");*/
b9164398
NQ
862 }
863
864 } else {
865 if (input->target == VDEC_INPUT_TARGET_VLD) {
866 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR,
867 input->start);
868 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR,
869 input->start + input->size - 8);
870 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
871 input->start);
872
873 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
874 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
875
876 /* set to manual mode */
877 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
878 WRITE_VREG(VLD_MEM_VIFIFO_RP, input->start);
879 WRITE_VREG(VLD_MEM_VIFIFO_WP,
fe96802b 880 READ_PARSER_REG(PARSER_VIDEO_WP));
b9164398
NQ
881
882 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
883
884 /* enable */
885 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
886 (0x11 << 16) | (1<<10));
887
888 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
889
890 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
891 WRITE_VREG(HEVC_STREAM_START_ADDR,
892 input->start);
893 WRITE_VREG(HEVC_STREAM_END_ADDR,
894 input->start + input->size);
895 WRITE_VREG(HEVC_STREAM_RD_PTR,
896 input->start);
897 WRITE_VREG(HEVC_STREAM_WR_PTR,
fe96802b 898 READ_PARSER_REG(PARSER_VIDEO_WP));
b9164398
NQ
899
900 rp = READ_VREG(HEVC_STREAM_RD_PTR);
901 wp = READ_VREG(HEVC_STREAM_WR_PTR);
902 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
903 >> 16) & 0x7f;
904
905 /* enable */
906 }
907 }
908 *p = NULL;
909 if (wp >= rp)
910 size = wp - rp + fifo_len;
911 else
912 size = wp + input->size - rp + fifo_len;
913 if (size < 0) {
914 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
915 __func__, input->size, wp, rp, fifo_len, size);
916 size = 0;
917 }
918 return size;
919 }
920}
921EXPORT_SYMBOL(vdec_prepare_input);
922
923void vdec_enable_input(struct vdec_s *vdec)
924{
925 struct vdec_input_s *input = &vdec->input;
926
927 if (vdec->status != VDEC_STATUS_ACTIVE)
928 return;
929
930 if (input->target == VDEC_INPUT_TARGET_VLD)
931 SET_VREG_MASK(VLD_MEM_VIFIFO_CONTROL, (1<<2) | (1<<1));
932 else if (input->target == VDEC_INPUT_TARGET_HEVC) {
933 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
934 if (vdec_stream_based(vdec))
935 CLEAR_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
936 else
937 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
938 SET_VREG_MASK(HEVC_STREAM_FIFO_CTL, (1<<29));
939 }
940}
941EXPORT_SYMBOL(vdec_enable_input);
942
fe96802b
NQ
943int vdec_set_input_buffer(struct vdec_s *vdec, u32 start, u32 size)
944{
945 int r = vdec_input_set_buffer(&vdec->input, start, size);
946
947 if (r)
948 return r;
949
950 if (vdec->slave)
951 r = vdec_input_set_buffer(&vdec->slave->input, start, size);
952
953 return r;
954}
955EXPORT_SYMBOL(vdec_set_input_buffer);
956
957/*
958 * vdec_eos returns the possibility that there are
959 * more input can be used by decoder through vdec_prepare_input
960 * Note: this function should be called prior to vdec_vframe_dirty
961 * by decoder driver to determine if EOS happens for stream based
962 * decoding when there is no sufficient data for a frame
963 */
964bool vdec_has_more_input(struct vdec_s *vdec)
965{
966 struct vdec_input_s *input = &vdec->input;
967
968 if (!input->eos)
969 return true;
970
971 if (input_frame_based(input))
972 return vdec_input_next_input_chunk(input) != NULL;
973 else {
974 if (input->target == VDEC_INPUT_TARGET_VLD)
975 return READ_VREG(VLD_MEM_VIFIFO_WP) !=
976 READ_PARSER_REG(PARSER_VIDEO_WP);
977 else {
978 return (READ_VREG(HEVC_STREAM_WR_PTR) & ~0x3) !=
979 (READ_PARSER_REG(PARSER_VIDEO_WP) & ~0x3);
980 }
981 }
982}
983EXPORT_SYMBOL(vdec_has_more_input);
984
985void vdec_set_prepare_level(struct vdec_s *vdec, int level)
986{
987 vdec->input.prepare_level = level;
988}
989EXPORT_SYMBOL(vdec_set_prepare_level);
990
b9164398
NQ
991void vdec_set_flag(struct vdec_s *vdec, u32 flag)
992{
993 vdec->flag = flag;
994}
fe96802b
NQ
995EXPORT_SYMBOL(vdec_set_flag);
996
997void vdec_set_eos(struct vdec_s *vdec, bool eos)
998{
999 vdec->input.eos = eos;
1000
1001 if (vdec->slave)
1002 vdec->slave->input.eos = eos;
1003}
1004EXPORT_SYMBOL(vdec_set_eos);
b9164398 1005
a6c89e96
NQ
1006#ifdef VDEC_DEBUG_SUPPORT
1007void vdec_set_step_mode(void)
1008{
1009 step_mode = 0x1ff;
1010}
1011EXPORT_SYMBOL(vdec_set_step_mode);
1012#endif
1013
b9164398
NQ
1014void vdec_set_next_sched(struct vdec_s *vdec, struct vdec_s *next_vdec)
1015{
1016 if (vdec && next_vdec) {
1017 vdec->sched = 0;
1018 next_vdec->sched = 1;
1019 }
1020}
fe96802b
NQ
1021EXPORT_SYMBOL(vdec_set_next_sched);
1022
1023/*
1024 * Swap Context: S0 S1 S2 S3 S4
1025 * Sample sequence: M S M M S
1026 * Master Context: S0 S0 S2 S3 S3
1027 * Slave context: NA S1 S1 S2 S4
1028 * ^
1029 * ^
1030 * ^
1031 * the tricky part
1032 * If there are back to back decoding of master or slave
1033 * then the context of the counter part should be updated
1034 * with current decoder. In this example, S1 should be
1035 * updated to S2.
1036 * This is done by swap the swap_page and related info
1037 * between two layers.
1038 */
1039static void vdec_borrow_input_context(struct vdec_s *vdec)
1040{
1041 struct page *swap_page;
1042 unsigned long swap_page_phys;
1043 struct vdec_input_s *me;
1044 struct vdec_input_s *other;
1045
1046 if (!vdec_dual(vdec))
1047 return;
1048
1049 me = &vdec->input;
1050 other = &vdec_get_associate(vdec)->input;
1051
1052 /* swap the swap_context, borrow counter part's
1053 * swap context storage and update all related info.
1054 * After vdec_vframe_dirty, vdec_save_input_context
1055 * will be called to update current vdec's
1056 * swap context
1057 */
1058 swap_page = other->swap_page;
1059 other->swap_page = me->swap_page;
1060 me->swap_page = swap_page;
1061
1062 swap_page_phys = other->swap_page_phys;
1063 other->swap_page_phys = me->swap_page_phys;
1064 me->swap_page_phys = swap_page_phys;
1065
1066 other->swap_rp = me->swap_rp;
1067 other->streaming_rp = me->streaming_rp;
1068 other->stream_cookie = me->stream_cookie;
1069 other->swap_valid = me->swap_valid;
1070}
1071
b9164398
NQ
1072void vdec_vframe_dirty(struct vdec_s *vdec, struct vframe_chunk_s *chunk)
1073{
1074 if (chunk)
1075 chunk->flag |= VFRAME_CHUNK_FLAG_CONSUMED;
1076
1077 if (vdec_stream_based(vdec)) {
fe96802b
NQ
1078 vdec->input.swap_needed = true;
1079
1080 if (vdec_dual(vdec)) {
1081 vdec_get_associate(vdec)->input.dirty_count = 0;
1082 vdec->input.dirty_count++;
1083 if (vdec->input.dirty_count > 1) {
1084 vdec->input.dirty_count = 1;
1085 vdec_borrow_input_context(vdec);
1086 }
b9164398 1087 }
fe96802b
NQ
1088
1089 /* for stream based mode, we update read and write pointer
1090 * also in case decoder wants to keep working on decoding
1091 * for more frames while input front end has more data
1092 */
1093 vdec_sync_input_read(vdec);
1094 vdec_sync_input_write(vdec);
1095
1096 vdec->need_more_data |= VDEC_NEED_MORE_DATA_DIRTY;
1097 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
b9164398
NQ
1098 }
1099}
1100EXPORT_SYMBOL(vdec_vframe_dirty);
1101
fe96802b
NQ
1102bool vdec_need_more_data(struct vdec_s *vdec)
1103{
1104 if (vdec_stream_based(vdec))
1105 return vdec->need_more_data & VDEC_NEED_MORE_DATA;
1106
1107 return false;
1108}
1109EXPORT_SYMBOL(vdec_need_more_data);
1110
b9164398
NQ
1111void vdec_save_input_context(struct vdec_s *vdec)
1112{
fe96802b 1113 struct vdec_input_s *input = &vdec->input;
b9164398 1114
fe96802b 1115#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1116 vdec_profile(vdec, VDEC_PROFILE_EVENT_SAVE_INPUT);
1117#endif
1118
1119 if (input->target == VDEC_INPUT_TARGET_VLD)
1120 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1<<15);
1121
1122 if (input_stream_based(input) && (input->swap_needed)) {
1123 if (input->target == VDEC_INPUT_TARGET_VLD) {
1124 WRITE_VREG(VLD_MEM_SWAP_ADDR,
fe96802b 1125 input->swap_page_phys);
b9164398
NQ
1126 WRITE_VREG(VLD_MEM_SWAP_CTL, 3);
1127 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1128 ;
1129 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1130 vdec->input.stream_cookie =
1131 READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
fe96802b
NQ
1132 vdec->input.swap_rp =
1133 READ_VREG(VLD_MEM_VIFIFO_RP);
1134 vdec->input.total_rd_count =
1135 (u64)vdec->input.stream_cookie *
1136 vdec->input.size + vdec->input.swap_rp -
1137 READ_VREG(VLD_MEM_VIFIFO_BYTES_AVAIL);
b9164398
NQ
1138 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1139 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
fe96802b 1140 input->swap_page_phys);
b9164398
NQ
1141 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 3);
1142
1143 while (READ_VREG(HEVC_STREAM_SWAP_CTRL) & (1<<7))
1144 ;
1145 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1146
1147 vdec->input.stream_cookie =
1148 READ_VREG(HEVC_SHIFT_BYTE_COUNT);
fe96802b
NQ
1149 vdec->input.swap_rp =
1150 READ_VREG(HEVC_STREAM_RD_PTR);
1151 if (((vdec->input.stream_cookie & 0x80000000) == 0) &&
1152 (vdec->input.streaming_rp & 0x80000000))
1153 vdec->input.streaming_rp += 1ULL << 32;
1154 vdec->input.streaming_rp &= 0xffffffffULL << 32;
1155 vdec->input.streaming_rp |= vdec->input.stream_cookie;
1156 vdec->input.total_rd_count = vdec->input.streaming_rp;
b9164398
NQ
1157 }
1158
1159 input->swap_valid = true;
fe96802b
NQ
1160 input->swap_needed = false;
1161 /*pr_info("vdec: save context\r\n");*/
b9164398 1162
fe96802b
NQ
1163 vdec_sync_input_read(vdec);
1164
1165 if (vdec_dual(vdec)) {
1166 struct vdec_s *master = (vdec->slave) ?
1167 vdec : vdec->master;
1168 master->input.last_swap_slave = (master->slave == vdec);
1169 /* pr_info("master->input.last_swap_slave = %d\n",
1170 master->input.last_swap_slave); */
1171 }
b9164398
NQ
1172 }
1173}
1174EXPORT_SYMBOL(vdec_save_input_context);
1175
1176void vdec_clean_input(struct vdec_s *vdec)
1177{
1178 struct vdec_input_s *input = &vdec->input;
1179
1180 while (!list_empty(&input->vframe_chunk_list)) {
1181 struct vframe_chunk_s *chunk =
1182 vdec_input_next_chunk(input);
1183 if (chunk->flag & VFRAME_CHUNK_FLAG_CONSUMED)
1184 vdec_input_release_chunk(input, chunk);
1185 else
1186 break;
1187 }
1188 vdec_save_input_context(vdec);
1189}
1190EXPORT_SYMBOL(vdec_clean_input);
1191
fe96802b 1192int vdec_sync_input(struct vdec_s *vdec)
b9164398 1193{
fe96802b
NQ
1194 struct vdec_input_s *input = &vdec->input;
1195 u32 rp = 0, wp = 0, fifo_len = 0;
1196 int size;
1197
1198 vdec_sync_input_read(vdec);
1199 vdec_sync_input_write(vdec);
1200 if (input->target == VDEC_INPUT_TARGET_VLD) {
1201 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1202 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1203
1204 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1205 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1206 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1207 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1208 >> 16) & 0x7f;
1209 }
1210 if (wp >= rp)
1211 size = wp - rp + fifo_len;
1212 else
1213 size = wp + input->size - rp + fifo_len;
1214 if (size < 0) {
1215 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
1216 __func__, input->size, wp, rp, fifo_len, size);
1217 size = 0;
b9164398 1218 }
fe96802b
NQ
1219 return size;
1220
1221}
1222EXPORT_SYMBOL(vdec_sync_input);
1223
1224const char *vdec_status_str(struct vdec_s *vdec)
1225{
1226 return vdec->status < ARRAY_SIZE(vdec_status_string) ?
1227 vdec_status_string[vdec->status] : "INVALID";
b9164398
NQ
1228}
1229
1230const char *vdec_type_str(struct vdec_s *vdec)
1231{
1232 switch (vdec->type) {
1233 case VDEC_TYPE_SINGLE:
1234 return "VDEC_TYPE_SINGLE";
1235 case VDEC_TYPE_STREAM_PARSER:
1236 return "VDEC_TYPE_STREAM_PARSER";
1237 case VDEC_TYPE_FRAME_BLOCK:
1238 return "VDEC_TYPE_FRAME_BLOCK";
1239 case VDEC_TYPE_FRAME_CIRCULAR:
1240 return "VDEC_TYPE_FRAME_CIRCULAR";
1241 default:
1242 return "VDEC_TYPE_INVALID";
1243 }
1244}
1245
1246const char *vdec_device_name_str(struct vdec_s *vdec)
1247{
1248 return vdec_device_name[vdec->format * 2 + 1];
1249}
fe96802b 1250EXPORT_SYMBOL(vdec_device_name_str);
b9164398
NQ
1251
1252void walk_vdec_core_list(char *s)
1253{
1254 struct vdec_s *vdec;
1255 struct vdec_core_s *core = vdec_core;
1256 unsigned long flags;
1257
1258 pr_info("%s --->\n", s);
1259
1260 flags = vdec_core_lock(vdec_core);
1261
1262 if (list_empty(&core->connected_vdec_list)) {
1263 pr_info("connected vdec list empty\n");
1264 } else {
1265 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
1266 pr_info("\tvdec (%p), status = %s\n", vdec,
1267 vdec_status_str(vdec));
1268 }
1269 }
1270
1271 vdec_core_unlock(vdec_core, flags);
1272}
1273EXPORT_SYMBOL(walk_vdec_core_list);
1274
fe96802b
NQ
1275/* insert vdec to vdec_core for scheduling,
1276 * for dual running decoders, connect/disconnect always runs in pairs
1277 */
b9164398
NQ
1278int vdec_connect(struct vdec_s *vdec)
1279{
1280 unsigned long flags;
1281
fe96802b
NQ
1282 //trace_vdec_connect(vdec);/*DEBUG_TMP*/
1283
b9164398
NQ
1284 if (vdec->status != VDEC_STATUS_DISCONNECTED)
1285 return 0;
1286
1287 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
1288 vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
1289
1290 init_completion(&vdec->inactive_done);
1291
1292 if (vdec->slave) {
1293 vdec_set_status(vdec->slave, VDEC_STATUS_CONNECTED);
1294 vdec_set_next_status(vdec->slave, VDEC_STATUS_CONNECTED);
1295
1296 init_completion(&vdec->slave->inactive_done);
1297 }
1298
1299 flags = vdec_core_lock(vdec_core);
1300
1301 list_add_tail(&vdec->list, &vdec_core->connected_vdec_list);
1302
1303 if (vdec->slave) {
1304 list_add_tail(&vdec->slave->list,
1305 &vdec_core->connected_vdec_list);
1306 }
1307
1308 vdec_core_unlock(vdec_core, flags);
1309
1310 up(&vdec_core->sem);
1311
1312 return 0;
1313}
1314EXPORT_SYMBOL(vdec_connect);
1315
1316/* remove vdec from vdec_core scheduling */
1317int vdec_disconnect(struct vdec_s *vdec)
1318{
fe96802b 1319#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1320 vdec_profile(vdec, VDEC_PROFILE_EVENT_DISCONNECT);
1321#endif
fe96802b 1322 //trace_vdec_disconnect(vdec);/*DEBUG_TMP*/
b9164398
NQ
1323
1324 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
1325 (vdec->status != VDEC_STATUS_ACTIVE)) {
1326 return 0;
1327 }
1328
1329 /*
e0614bf7 1330 *when a vdec is under the management of scheduler
b9164398
NQ
1331 * the status change will only be from vdec_core_thread
1332 */
1333 vdec_set_next_status(vdec, VDEC_STATUS_DISCONNECTED);
1334
1335 if (vdec->slave)
1336 vdec_set_next_status(vdec->slave, VDEC_STATUS_DISCONNECTED);
1337 else if (vdec->master)
1338 vdec_set_next_status(vdec->master, VDEC_STATUS_DISCONNECTED);
1339
1340 up(&vdec_core->sem);
1341
1342 wait_for_completion(&vdec->inactive_done);
1343
1344 if (vdec->slave)
1345 wait_for_completion(&vdec->slave->inactive_done);
1346 else if (vdec->master)
1347 wait_for_completion(&vdec->master->inactive_done);
1348
1349 return 0;
1350}
1351EXPORT_SYMBOL(vdec_disconnect);
1352
1353/* release vdec structure */
1354int vdec_destroy(struct vdec_s *vdec)
1355{
fe96802b
NQ
1356 //trace_vdec_destroy(vdec);/*DEBUG_TMP*/
1357
1358 vdec_input_release(&vdec->input);
b9164398 1359
fe96802b 1360#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1361 vdec_profile_flush(vdec);
1362#endif
fe96802b 1363 ida_simple_remove(&vdec_core->ida, vdec->id);
b9164398
NQ
1364 vfree(vdec);
1365
1366 atomic_dec(&vdec_core->vdec_nr);
1367
1368 return 0;
1369}
1370EXPORT_SYMBOL(vdec_destroy);
1371
1372/*
1373 * Only support time sliced decoding for frame based input,
1374 * so legacy decoder can exist with time sliced decoder.
1375 */
1376static const char *get_dev_name(bool use_legacy_vdec, int format)
1377{
fe96802b 1378#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1379 if (use_legacy_vdec)
1380 return vdec_device_name[format * 2];
1381 else
1382 return vdec_device_name[format * 2 + 1];
1383#else
1384 return vdec_device_name[format];
1385#endif
1386}
1387
b9164398 1388/*
e0614bf7 1389 *register vdec_device
b9164398
NQ
1390 * create output, vfm or create ionvideo output
1391 */
1392s32 vdec_init(struct vdec_s *vdec, int is_4k)
1393{
1394 int r = 0;
1395 struct vdec_s *p = vdec;
b9164398 1396 const char *dev_name;
fe96802b 1397 int id = PLATFORM_DEVID_AUTO;/*if have used my self*/
b9164398
NQ
1398
1399 dev_name = get_dev_name(vdec_single(vdec), vdec->format);
1400
1401 if (dev_name == NULL)
1402 return -ENODEV;
1403
1404 pr_info("vdec_init, dev_name:%s, vdec_type=%s\n",
1405 dev_name, vdec_type_str(vdec));
1406
1407 /*
e0614bf7 1408 *todo: VFM patch control should be configurable,
b9164398
NQ
1409 * for now all stream based input uses default VFM path.
1410 */
1411 if (vdec_stream_based(vdec) && !vdec_dual(vdec)) {
1412 if (vdec_core->vfm_vdec == NULL) {
5b851ff9 1413 pr_debug("vdec_init set vfm decoder %p\n", vdec);
b9164398
NQ
1414 vdec_core->vfm_vdec = vdec;
1415 } else {
1416 pr_info("vdec_init vfm path busy.\n");
1417 return -EBUSY;
1418 }
1419 }
1420
b9164398
NQ
1421 mutex_lock(&vdec_mutex);
1422 inited_vcodec_num++;
1423 mutex_unlock(&vdec_mutex);
1424
1425 vdec_input_set_type(&vdec->input, vdec->type,
1426 (vdec->format == VFORMAT_HEVC ||
a6c89e96 1427 vdec->format == VFORMAT_AVS2 ||
b9164398
NQ
1428 vdec->format == VFORMAT_VP9) ?
1429 VDEC_INPUT_TARGET_HEVC :
1430 VDEC_INPUT_TARGET_VLD);
1431
1432 p->cma_dev = vdec_core->cma_dev;
1433 p->get_canvas = get_canvas;
1434 /* todo */
1435 if (!vdec_dual(vdec))
fe96802b 1436 p->use_vfm_path = vdec_stream_based(vdec);
b9164398 1437 /* vdec_dev_reg.flag = 0; */
fe96802b
NQ
1438 if (vdec->id >= 0)
1439 id = vdec->id;
1440 p->dev = platform_device_register_data(
b9164398
NQ
1441 &vdec_core->vdec_core_platform_device->dev,
1442 dev_name,
fe96802b 1443 id,
b9164398
NQ
1444 &p, sizeof(struct vdec_s *));
1445
1446 if (IS_ERR(p->dev)) {
1447 r = PTR_ERR(p->dev);
1448 pr_err("vdec: Decoder device %s register failed (%d)\n",
1449 dev_name, r);
1450
1451 mutex_lock(&vdec_mutex);
1452 inited_vcodec_num--;
1453 mutex_unlock(&vdec_mutex);
1454
fe96802b
NQ
1455 goto error;
1456 } else if (!p->dev->dev.driver) {
1457 pr_info("vdec: Decoder device %s driver probe failed.\n",
1458 dev_name);
1459 r = -ENODEV;
1460
b9164398
NQ
1461 goto error;
1462 }
1463
1464 if ((p->type == VDEC_TYPE_FRAME_BLOCK) && (p->run == NULL)) {
1465 r = -ENODEV;
1466 pr_err("vdec: Decoder device not handled (%s)\n", dev_name);
1467
1468 mutex_lock(&vdec_mutex);
1469 inited_vcodec_num--;
1470 mutex_unlock(&vdec_mutex);
1471
1472 goto error;
1473 }
1474
1475 if (p->use_vfm_path) {
1476 vdec->vf_receiver_inst = -1;
fe96802b 1477 vdec->vfm_map_id[0] = 0;
b9164398
NQ
1478 } else if (!vdec_dual(vdec)) {
1479 /* create IONVIDEO instance and connect decoder's
1480 * vf_provider interface to it
1481 */
1482 if (p->type != VDEC_TYPE_FRAME_BLOCK) {
1483 r = -ENODEV;
1484 pr_err("vdec: Incorrect decoder type\n");
1485
1486 mutex_lock(&vdec_mutex);
1487 inited_vcodec_num--;
1488 mutex_unlock(&vdec_mutex);
1489
1490 goto error;
1491 }
1492 if (p->frame_base_video_path == FRAME_BASE_PATH_IONVIDEO) {
1493#if 1
ff4c2158
NQ
1494 r = ionvideo_assign_map(&vdec->vf_receiver_name,
1495 &vdec->vf_receiver_inst);
b9164398
NQ
1496#else
1497 /*
1498 * temporarily just use decoder instance ID as iondriver ID
1499 * to solve OMX iondriver instance number check time sequence
1500 * only the limitation is we can NOT mix different video
1501 * decoders since same ID will be used for different decoder
1502 * formats.
1503 */
1504 vdec->vf_receiver_inst = p->dev->id;
1505 r = ionvideo_assign_map(&vdec->vf_receiver_name,
1506 &vdec->vf_receiver_inst);
1507#endif
1508 if (r < 0) {
1509 pr_err("IonVideo frame receiver allocation failed.\n");
1510
1511 mutex_lock(&vdec_mutex);
1512 inited_vcodec_num--;
1513 mutex_unlock(&vdec_mutex);
1514
1515 goto error;
1516 }
1517
fe96802b
NQ
1518 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
1519 "%s %s", vdec->vf_provider_name,
1520 vdec->vf_receiver_name);
1521 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
1522 "vdec-map-%d", vdec->id);
b9164398
NQ
1523 } else if (p->frame_base_video_path ==
1524 FRAME_BASE_PATH_AMLVIDEO_AMVIDEO) {
1525 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
1526 "%s %s", vdec->vf_provider_name,
fe96802b 1527 "amlvideo deinterlace amvideo");
b9164398 1528 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
fe96802b 1529 "vdec-map-%d", vdec->id);
b9164398
NQ
1530 } else if (p->frame_base_video_path ==
1531 FRAME_BASE_PATH_AMLVIDEO1_AMVIDEO2) {
1532 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
1533 "%s %s", vdec->vf_provider_name,
1534 "ppmgr amlvideo.1 amvide2");
1535 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
fe96802b 1536 "vdec-map-%d", vdec->id);
b9164398
NQ
1537 }
1538
1539 if (vfm_map_add(vdec->vfm_map_id,
1540 vdec->vfm_map_chain) < 0) {
1541 r = -ENOMEM;
1542 pr_err("Decoder pipeline map creation failed %s.\n",
1543 vdec->vfm_map_id);
1544 vdec->vfm_map_id[0] = 0;
1545
1546 mutex_lock(&vdec_mutex);
1547 inited_vcodec_num--;
1548 mutex_unlock(&vdec_mutex);
1549
1550 goto error;
1551 }
1552
5b851ff9 1553 pr_debug("vfm map %s created\n", vdec->vfm_map_id);
b9164398
NQ
1554
1555 /*
e0614bf7 1556 *assume IONVIDEO driver already have a few vframe_receiver
b9164398
NQ
1557 * registered.
1558 * 1. Call iondriver function to allocate a IONVIDEO path and
1559 * provide receiver's name and receiver op.
1560 * 2. Get decoder driver's provider name from driver instance
1561 * 3. vfm_map_add(name, "<decoder provider name>
1562 * <iondriver receiver name>"), e.g.
1563 * vfm_map_add("vdec_ion_map_0", "mpeg4_0 iondriver_1");
1564 * 4. vf_reg_provider and vf_reg_receiver
1565 * Note: the decoder provider's op uses vdec as op_arg
1566 * the iondriver receiver's op uses iondev device as
1567 * op_arg
1568 */
1569
1570 }
1571
1572 if (!vdec_single(vdec)) {
1573 vf_reg_provider(&p->vframe_provider);
1574
1575 vf_notify_receiver(p->vf_provider_name,
1576 VFRAME_EVENT_PROVIDER_START,
1577 vdec);
fe96802b
NQ
1578
1579 if (vdec_core->hint_fr_vdec == NULL)
1580 vdec_core->hint_fr_vdec = vdec;
1581
1582 if (vdec_core->hint_fr_vdec == vdec) {
1583 if (p->sys_info->rate != 0) {
d481db31
NQ
1584 if (!vdec->is_reset)
1585 vf_notify_receiver(p->vf_provider_name,
1586 VFRAME_EVENT_PROVIDER_FR_HINT,
1587 (void *)
1588 ((unsigned long)
1589 p->sys_info->rate));
fe96802b
NQ
1590 vdec->fr_hint_state = VDEC_HINTED;
1591 } else {
1592 vdec->fr_hint_state = VDEC_NEED_HINT;
1593 }
1594 }
b9164398
NQ
1595 }
1596
28e318df 1597 p->dolby_meta_with_el = 0;
5b851ff9 1598 pr_debug("vdec_init, vf_provider_name = %s\n", p->vf_provider_name);
fe96802b
NQ
1599 vdec_input_prepare_bufs(/*prepared buffer for fast playing.*/
1600 &vdec->input,
1601 vdec->sys_info->width,
1602 vdec->sys_info->height);
b9164398
NQ
1603 /* vdec is now ready to be active */
1604 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
1605
1606 return 0;
1607
1608error:
1609 return r;
1610}
1611EXPORT_SYMBOL(vdec_init);
1612
fe96802b
NQ
1613/* vdec_create/init/release/destroy are applied to both dual running decoders
1614 */
b9164398
NQ
1615void vdec_release(struct vdec_s *vdec)
1616{
fe96802b 1617 //trace_vdec_release(vdec);/*DEBUG_TMP*/
a6c89e96
NQ
1618#ifdef VDEC_DEBUG_SUPPORT
1619 if (step_mode) {
1620 pr_info("VDEC_DEBUG: in step_mode, wait release\n");
1621 while (step_mode)
1622 udelay(10);
1623 pr_info("VDEC_DEBUG: step_mode is clear\n");
1624 }
1625#endif
b9164398
NQ
1626 vdec_disconnect(vdec);
1627
fe96802b
NQ
1628 if (vdec->vframe_provider.name) {
1629 if (!vdec_single(vdec)) {
1630 if (vdec_core->hint_fr_vdec == vdec
d481db31
NQ
1631 && vdec->fr_hint_state == VDEC_HINTED
1632 && !vdec->is_reset)
fe96802b
NQ
1633 vf_notify_receiver(
1634 vdec->vf_provider_name,
1635 VFRAME_EVENT_PROVIDER_FR_END_HINT,
1636 NULL);
1637 vdec->fr_hint_state = VDEC_NO_NEED_HINT;
1638 }
b9164398 1639 vf_unreg_provider(&vdec->vframe_provider);
fe96802b 1640 }
b9164398
NQ
1641
1642 if (vdec_core->vfm_vdec == vdec)
1643 vdec_core->vfm_vdec = NULL;
1644
fe96802b
NQ
1645 if (vdec_core->hint_fr_vdec == vdec)
1646 vdec_core->hint_fr_vdec = NULL;
1647
b9164398
NQ
1648 if (vdec->vf_receiver_inst >= 0) {
1649 if (vdec->vfm_map_id[0]) {
1650 vfm_map_remove(vdec->vfm_map_id);
1651 vdec->vfm_map_id[0] = 0;
1652 }
b9164398
NQ
1653 }
1654
1655 platform_device_unregister(vdec->dev);
b9164398
NQ
1656 vdec_destroy(vdec);
1657
1658 mutex_lock(&vdec_mutex);
1659 inited_vcodec_num--;
1660 mutex_unlock(&vdec_mutex);
fe96802b 1661
5b851ff9 1662 pr_debug("vdec_release instance %p, total %d\n", vdec,
fe96802b 1663 atomic_read(&vdec_core->vdec_nr));
b9164398
NQ
1664}
1665EXPORT_SYMBOL(vdec_release);
1666
a6c89e96
NQ
1667/* For dual running decoders, vdec_reset is only called with master vdec.
1668 */
b9164398
NQ
1669int vdec_reset(struct vdec_s *vdec)
1670{
a6c89e96
NQ
1671 //trace_vdec_reset(vdec); /*DEBUG_TMP*/
1672
b9164398
NQ
1673 vdec_disconnect(vdec);
1674
1675 if (vdec->vframe_provider.name)
1676 vf_unreg_provider(&vdec->vframe_provider);
1677
1678 if ((vdec->slave) && (vdec->slave->vframe_provider.name))
1679 vf_unreg_provider(&vdec->slave->vframe_provider);
1680
1681 if (vdec->reset) {
1682 vdec->reset(vdec);
1683 if (vdec->slave)
1684 vdec->slave->reset(vdec->slave);
1685 }
a6c89e96 1686
b9164398
NQ
1687 vdec_input_release(&vdec->input);
1688
1689 vf_reg_provider(&vdec->vframe_provider);
1690 vf_notify_receiver(vdec->vf_provider_name,
1691 VFRAME_EVENT_PROVIDER_START, vdec);
1692
1693 if (vdec->slave) {
1694 vf_reg_provider(&vdec->slave->vframe_provider);
1695 vf_notify_receiver(vdec->slave->vf_provider_name,
1696 VFRAME_EVENT_PROVIDER_START, vdec->slave);
1697 }
1698
1699 vdec_connect(vdec);
1700
1701 return 0;
1702}
1703EXPORT_SYMBOL(vdec_reset);
1704
fe96802b
NQ
1705void vdec_free_cmabuf(void)
1706{
1707 mutex_lock(&vdec_mutex);
1708
1709 if (inited_vcodec_num > 0) {
1710 mutex_unlock(&vdec_mutex);
1711 return;
1712 }
1713 mutex_unlock(&vdec_mutex);
1714}
1715
a6c89e96 1716int vdec_core_request(struct vdec_s *vdec, unsigned long mask)
b9164398 1717{
a6c89e96 1718 vdec->core_mask |= mask;
b9164398 1719
a6c89e96
NQ
1720 if (vdec->slave)
1721 vdec->slave->core_mask |= mask;
1722
1723 return 0;
1724}
1725EXPORT_SYMBOL(vdec_core_request);
1726
1727int vdec_core_release(struct vdec_s *vdec, unsigned long mask)
1728{
1729 vdec->core_mask &= ~mask;
1730
1731 if (vdec->slave)
1732 vdec->slave->core_mask &= ~mask;
1733
1734 return 0;
1735}
1736EXPORT_SYMBOL(vdec_core_release);
1737
1738const bool vdec_core_with_input(unsigned long mask)
1739{
1740 enum vdec_type_e type;
1741
1742 for (type = VDEC_1; type < VDEC_MAX; type++) {
1743 if ((mask & (1 << type)) && cores_with_input[type])
1744 return true;
b9164398
NQ
1745 }
1746
a6c89e96
NQ
1747 return false;
1748}
1749
1750void vdec_core_finish_run(struct vdec_s *vdec, unsigned long mask)
1751{
1752 unsigned long i;
1753 unsigned long t = mask;
1754
1755 while (t) {
1756 i = __ffs(t);
1757 clear_bit(i, &vdec->active_mask);
1758 t &= ~(1 << i);
1759 }
1760
1761 if (vdec->active_mask == 0)
1762 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
1763}
1764EXPORT_SYMBOL(vdec_core_finish_run);
1765/*
1766 * find what core resources are available for vdec
1767 */
1768static unsigned long vdec_schedule_mask(struct vdec_s *vdec,
1769 unsigned long active_mask)
1770{
1771 unsigned long mask = vdec->core_mask &
1772 ~CORE_MASK_COMBINE;
1773
1774 if (vdec->core_mask & CORE_MASK_COMBINE) {
1775 /* combined cores must be granted together */
1776 if ((mask & ~active_mask) == mask)
1777 return mask;
1778 else
1779 return 0;
1780 } else
1781 return mask & ~vdec->sched_mask & ~active_mask;
b9164398
NQ
1782}
1783
1784/*
e0614bf7 1785 *Decoder callback
b9164398
NQ
1786 * Each decoder instance uses this callback to notify status change, e.g. when
1787 * decoder finished using HW resource.
1788 * a sample callback from decoder's driver is following:
1789 *
1790 * if (hw->vdec_cb) {
1791 * vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
1792 * hw->vdec_cb(vdec, hw->vdec_cb_arg);
1793 * }
1794 */
1795static void vdec_callback(struct vdec_s *vdec, void *data)
1796{
1797 struct vdec_core_s *core = (struct vdec_core_s *)data;
1798
fe96802b 1799#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1800 vdec_profile(vdec, VDEC_PROFILE_EVENT_CB);
1801#endif
1802
1803 up(&core->sem);
1804}
1805
1806static irqreturn_t vdec_isr(int irq, void *dev_id)
1807{
1808 struct vdec_isr_context_s *c =
1809 (struct vdec_isr_context_s *)dev_id;
a6c89e96 1810 struct vdec_s *vdec = c->vdec;
b9164398
NQ
1811
1812 if (c->dev_isr)
1813 return c->dev_isr(irq, c->dev_id);
1814
a6c89e96
NQ
1815 if ((c != &vdec_core->isr_context[VDEC_IRQ_0]) &&
1816 (c != &vdec_core->isr_context[VDEC_IRQ_1]) &&
1817 (c != &vdec_core->isr_context[VDEC_IRQ_HEVC_BACK])) {
b9164398
NQ
1818#if 0
1819 pr_warn("vdec interrupt w/o a valid receiver\n");
1820#endif
1821 return IRQ_HANDLED;
1822 }
1823
1824 if (!vdec) {
1825#if 0
1826 pr_warn("vdec interrupt w/o an active instance running. core = %p\n",
1827 core);
1828#endif
1829 return IRQ_HANDLED;
1830 }
1831
1832 if (!vdec->irq_handler) {
1833#if 0
1834 pr_warn("vdec instance has no irq handle.\n");
1835#endif
1836 return IRQ_HANDLED;
1837 }
1838
a6c89e96 1839 return vdec->irq_handler(vdec, c->index);
b9164398
NQ
1840}
1841
1842static irqreturn_t vdec_thread_isr(int irq, void *dev_id)
1843{
1844 struct vdec_isr_context_s *c =
1845 (struct vdec_isr_context_s *)dev_id;
a6c89e96 1846 struct vdec_s *vdec = c->vdec;
b9164398
NQ
1847
1848 if (c->dev_threaded_isr)
1849 return c->dev_threaded_isr(irq, c->dev_id);
1850
1851 if (!vdec)
1852 return IRQ_HANDLED;
1853
1854 if (!vdec->threaded_irq_handler)
1855 return IRQ_HANDLED;
1856
a6c89e96 1857 return vdec->threaded_irq_handler(vdec, c->index);
b9164398
NQ
1858}
1859
a6c89e96 1860unsigned long vdec_ready_to_run(struct vdec_s *vdec, unsigned long mask)
b9164398 1861{
a6c89e96 1862 unsigned long ready_mask;
fe96802b 1863 struct vdec_input_s *input = &vdec->input;
a6c89e96
NQ
1864 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
1865 (vdec->status != VDEC_STATUS_ACTIVE))
b9164398
NQ
1866 return false;
1867
1868 if (!vdec->run_ready)
1869 return false;
1870
1871 if ((vdec->slave || vdec->master) &&
1872 (vdec->sched == 0))
1873 return false;
a6c89e96
NQ
1874#ifdef VDEC_DEBUG_SUPPORT
1875 inc_profi_count(mask, vdec->check_count);
1876#endif
1877 if (vdec_core_with_input(mask)) {
1878 /* check frame based input underrun */
1879 if (input && !input->eos && input_frame_based(input)
1880 && (!vdec_input_next_chunk(input))) {
1881#ifdef VDEC_DEBUG_SUPPORT
1882 inc_profi_count(mask, vdec->input_underrun_count);
1883#endif
fe96802b 1884 return false;
a6c89e96
NQ
1885 }
1886 /* check streaming prepare level threshold if not EOS */
1887 if (input && input_stream_based(input) && !input->eos) {
1888 u32 rp, wp, level;
1889
1890 rp = READ_PARSER_REG(PARSER_VIDEO_RP);
1891 wp = READ_PARSER_REG(PARSER_VIDEO_WP);
1892 if (wp < rp)
1893 level = input->size + wp - rp;
1894 else
1895 level = wp - rp;
1896
1897 if ((level < input->prepare_level) &&
1898 (pts_get_rec_num(PTS_TYPE_VIDEO,
1899 vdec->input.total_rd_count) < 2)) {
1900 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
1901#ifdef VDEC_DEBUG_SUPPORT
1902 inc_profi_count(mask, vdec->input_underrun_count);
1903 if (step_mode & 0x200) {
1904 if ((step_mode & 0xff) == vdec->id) {
1905 step_mode |= 0xff;
1906 return mask;
1907 }
1908 }
1909#endif
1910 return false;
1911 } else if (level > input->prepare_level)
1912 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
1913 }
fe96802b
NQ
1914 }
1915
b9164398
NQ
1916 if (step_mode) {
1917 if ((step_mode & 0xff) != vdec->id)
a6c89e96
NQ
1918 return 0;
1919 step_mode |= 0xff; /*VDEC_DEBUG_SUPPORT*/
b9164398
NQ
1920 }
1921
a6c89e96 1922 /*step_mode &= ~0xff; not work for id of 0, removed*/
b9164398 1923
fe96802b 1924#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
1925 vdec_profile(vdec, VDEC_PROFILE_EVENT_CHK_RUN_READY);
1926#endif
1927
a6c89e96
NQ
1928 ready_mask = vdec->run_ready(vdec, mask) & mask;
1929#ifdef VDEC_DEBUG_SUPPORT
1930 if (ready_mask != mask)
1931 inc_profi_count(ready_mask ^ mask, vdec->not_run_ready_count);
1932#endif
fe96802b 1933#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
a6c89e96 1934 if (ready_mask)
b9164398
NQ
1935 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN_READY);
1936#endif
1937
a6c89e96
NQ
1938 return ready_mask;
1939}
1940
1941/* bridge on/off vdec's interrupt processing to vdec core */
1942static void vdec_route_interrupt(struct vdec_s *vdec, unsigned long mask,
1943 bool enable)
1944{
1945 enum vdec_type_e type;
1946
1947 for (type = VDEC_1; type < VDEC_MAX; type++) {
1948 if (mask & (1 << type)) {
1949 struct vdec_isr_context_s *c =
1950 &vdec_core->isr_context[cores_int[type]];
1951 if (enable)
1952 c->vdec = vdec;
1953 else if (c->vdec == vdec)
1954 c->vdec = NULL;
1955 }
1956 }
b9164398
NQ
1957}
1958
fe96802b
NQ
1959/*
1960 * Set up secure protection for each decoder instance running.
1961 * Note: The operation from REE side only resets memory access
1962 * to a default policy and even a non_secure type will still be
1963 * changed to secure type automatically when secure source is
1964 * detected inside TEE.
1965 * Perform need_more_data checking and set flag is decoder
1966 * is not consuming data.
1967 */
a6c89e96 1968void vdec_prepare_run(struct vdec_s *vdec, unsigned long mask)
fe96802b
NQ
1969{
1970 struct vdec_input_s *input = &vdec->input;
a6c89e96 1971 int secure = (vdec_secure(vdec)) ? DMC_DEV_TYPE_SECURE :
fe96802b
NQ
1972 DMC_DEV_TYPE_NON_SECURE;
1973
a6c89e96
NQ
1974 vdec_route_interrupt(vdec, mask, true);
1975
1976 if (!vdec_core_with_input(mask))
1977 return;
1978
fe96802b 1979 if (input->target == VDEC_INPUT_TARGET_VLD)
a6c89e96 1980 tee_config_device_secure(DMC_DEV_ID_VDEC, secure);
fe96802b 1981 else if (input->target == VDEC_INPUT_TARGET_HEVC)
a6c89e96 1982 tee_config_device_secure(DMC_DEV_ID_HEVC, secure);
fe96802b
NQ
1983
1984 if (vdec_stream_based(vdec) &&
1985 ((vdec->need_more_data & VDEC_NEED_MORE_DATA_RUN) &&
1986 (vdec->need_more_data & VDEC_NEED_MORE_DATA_DIRTY) == 0)) {
1987 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
1988 }
1989
1990 vdec->need_more_data |= VDEC_NEED_MORE_DATA_RUN;
1991 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA_DIRTY;
1992}
1993
b9164398
NQ
1994/* struct vdec_core_shread manages all decoder instance in active list. When
1995 * a vdec is added into the active list, it can onlt be in two status:
1996 * VDEC_STATUS_CONNECTED(the decoder does not own HW resource and ready to run)
1997 * VDEC_STATUS_ACTIVE(the decoder owns HW resources and is running).
1998 * Removing a decoder from active list is only performed within core thread.
1999 * Adding a decoder into active list is performed from user thread.
2000 */
2001static int vdec_core_thread(void *data)
2002{
b9164398 2003 struct vdec_core_s *core = (struct vdec_core_s *)data;
a6c89e96
NQ
2004
2005 struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1};
b9164398
NQ
2006
2007 sched_setscheduler(current, SCHED_FIFO, &param);
2008
2009 allow_signal(SIGTERM);
a6c89e96 2010
b9164398 2011 while (down_interruptible(&core->sem) == 0) {
a6c89e96
NQ
2012 struct vdec_s *vdec, *tmp, *worker;
2013 unsigned long sched_mask = 0;
b9164398
NQ
2014 LIST_HEAD(disconnecting_list);
2015
2016 if (kthread_should_stop())
2017 break;
2018
2019 /* clean up previous active vdec's input */
a6c89e96
NQ
2020 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
2021 unsigned long mask = vdec->sched_mask &
2022 (vdec->active_mask ^ vdec->sched_mask);
2023
2024 vdec_route_interrupt(vdec, mask, false);
2025
2026#ifdef VDEC_DEBUG_SUPPORT
2027 update_profi_clk_stop(vdec, mask, get_current_clk());
2028#endif
2029 /*
2030 * If decoder released some core resources (mask), then
2031 * check if these core resources are associated
2032 * with any input side and do input clean up accordingly
2033 */
2034 if (vdec_core_with_input(mask)) {
2035 struct vdec_input_s *input = &vdec->input;
2036 while (!list_empty(
2037 &input->vframe_chunk_list)) {
2038 struct vframe_chunk_s *chunk =
2039 vdec_input_next_chunk(input);
2040 if (chunk->flag &
2041 VFRAME_CHUNK_FLAG_CONSUMED)
2042 vdec_input_release_chunk(input,
2043 chunk);
2044 else
2045 break;
2046 }
2047
2048 vdec_save_input_context(vdec);
b9164398
NQ
2049 }
2050
a6c89e96
NQ
2051 vdec->sched_mask &= ~mask;
2052 core->sched_mask &= ~mask;
b9164398
NQ
2053 }
2054
2055 /*
e0614bf7 2056 *todo:
b9164398
NQ
2057 * this is the case when the decoder is in active mode and
2058 * the system side wants to stop it. Currently we rely on
2059 * the decoder instance to go back to VDEC_STATUS_CONNECTED
2060 * from VDEC_STATUS_ACTIVE by its own. However, if for some
2061 * reason the decoder can not exist by itself (dead decoding
2062 * or whatever), then we may have to add another vdec API
2063 * to kill the vdec and release its HW resource and make it
2064 * become inactive again.
2065 * if ((core->active_vdec) &&
2066 * (core->active_vdec->status == VDEC_STATUS_DISCONNECTED)) {
2067 * }
2068 */
2069
b9164398
NQ
2070 /* check disconnected decoders */
2071 list_for_each_entry_safe(vdec, tmp,
2072 &core->connected_vdec_list, list) {
2073 if ((vdec->status == VDEC_STATUS_CONNECTED) &&
2074 (vdec->next_status == VDEC_STATUS_DISCONNECTED)) {
2075 if (core->active_vdec == vdec)
2076 core->active_vdec = NULL;
2077 list_move(&vdec->list, &disconnecting_list);
2078 }
2079 }
2080
a6c89e96
NQ
2081 /* elect next vdec to be scheduled */
2082 vdec = core->active_vdec;
2083 if (vdec) {
2084 vdec = list_entry(vdec->list.next, struct vdec_s, list);
b9164398
NQ
2085 list_for_each_entry_from(vdec,
2086 &core->connected_vdec_list, list) {
a6c89e96
NQ
2087 sched_mask = vdec_schedule_mask(vdec,
2088 core->sched_mask);
2089 if (!sched_mask)
2090 continue;
2091 sched_mask = vdec_ready_to_run(vdec,
2092 sched_mask);
2093 if (sched_mask)
b9164398
NQ
2094 break;
2095 }
2096
a6c89e96
NQ
2097 if (&vdec->list == &core->connected_vdec_list)
2098 vdec = NULL;
2099 }
2100
2101 if (!vdec) {
2102 /* search from beginning */
2103 list_for_each_entry(vdec,
2104 &core->connected_vdec_list, list) {
2105 sched_mask = vdec_schedule_mask(vdec,
2106 core->sched_mask);
2107 if (vdec == core->active_vdec) {
2108 if (!sched_mask) {
2109 vdec = NULL;
b9164398 2110 break;
a6c89e96
NQ
2111 }
2112
2113 sched_mask = vdec_ready_to_run(vdec,
2114 sched_mask);
b9164398 2115
a6c89e96 2116 if (!sched_mask) {
b9164398
NQ
2117 vdec = NULL;
2118 break;
2119 }
a6c89e96 2120 break;
b9164398 2121 }
a6c89e96
NQ
2122
2123 if (!sched_mask)
2124 continue;
2125
2126 sched_mask = vdec_ready_to_run(vdec,
2127 sched_mask);
2128 if (sched_mask)
2129 break;
b9164398
NQ
2130 }
2131
2132 if (&vdec->list == &core->connected_vdec_list)
2133 vdec = NULL;
b9164398
NQ
2134 }
2135
a6c89e96
NQ
2136 worker = vdec;
2137
2138 if (vdec) {
2139 unsigned long mask = sched_mask;
2140 unsigned long i;
2141
2142 /* setting active_mask should be atomic.
2143 * it can be modified by decoder driver callbacks.
2144 */
2145 while (sched_mask) {
2146 i = __ffs(sched_mask);
2147 set_bit(i, &vdec->active_mask);
2148 sched_mask &= ~(1 << i);
2149 }
2150
2151 /* vdec's sched_mask is only set from core thread */
2152 vdec->sched_mask |= mask;
b9164398 2153
b9164398
NQ
2154 vdec_set_status(vdec, VDEC_STATUS_ACTIVE);
2155
a6c89e96 2156 core->sched_mask |= mask;
b9164398 2157 core->active_vdec = vdec;
fe96802b 2158#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
b9164398
NQ
2159 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN);
2160#endif
a6c89e96
NQ
2161 vdec_prepare_run(vdec, mask);
2162#ifdef VDEC_DEBUG_SUPPORT
2163 inc_profi_count(mask, vdec->run_count);
2164 update_profi_clk_run(vdec, mask, get_current_clk());
2165#endif
2166 vdec->run(vdec, mask, vdec_callback, core);
2167
fe96802b 2168
a6c89e96
NQ
2169 /* we have some cores scheduled, keep working until
2170 * all vdecs are checked with no cores to schedule
2171 */
2172 up(&core->sem);
b9164398
NQ
2173 }
2174
2175 /* remove disconnected decoder from active list */
2176 list_for_each_entry_safe(vdec, tmp, &disconnecting_list, list) {
2177 list_del(&vdec->list);
2178 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
2179 complete(&vdec->inactive_done);
2180 }
2181
a6c89e96
NQ
2182 /* if there is no new work scheduled and nothing
2183 * is running, sleep 20ms
2184 */
2185 if ((!worker) && (!core->sched_mask)) {
2186 msleep(20);
b9164398
NQ
2187 up(&core->sem);
2188 }
3f4a083c 2189
b9164398
NQ
2190 }
2191
2192 return 0;
2193}
2194
2195#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */
2196static bool test_hevc(u32 decomp_addr, u32 us_delay)
2197{
2198 int i;
2199
2200 /* SW_RESET IPP */
2201 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 1);
2202 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0);
2203
2204 /* initialize all canvas table */
2205 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0);
2206 for (i = 0; i < 32; i++)
2207 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
2208 0x1 | (i << 8) | decomp_addr);
2209 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1);
2210 WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (0 << 8) | (0<<1) | 1);
2211 for (i = 0; i < 32; i++)
2212 WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0);
2213
2214 /* Initialize mcrcc */
2215 WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2);
2216 WRITE_VREG(HEVCD_MCRCC_CTL2, 0x0);
2217 WRITE_VREG(HEVCD_MCRCC_CTL3, 0x0);
2218 WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0);
2219
2220 /* Decomp initialize */
2221 WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x0);
2222 WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0x0);
2223
2224 /* Frame level initialization */
2225 WRITE_VREG(HEVCD_IPP_TOP_FRMCONFIG, 0x100 | (0x100 << 16));
2226 WRITE_VREG(HEVCD_IPP_TOP_TILECONFIG3, 0x0);
2227 WRITE_VREG(HEVCD_IPP_TOP_LCUCONFIG, 0x1 << 5);
2228 WRITE_VREG(HEVCD_IPP_BITDEPTH_CONFIG, 0x2 | (0x2 << 2));
2229
2230 WRITE_VREG(HEVCD_IPP_CONFIG, 0x0);
2231 WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, 0x0);
2232
2233 /* Enable SWIMP mode */
2234 WRITE_VREG(HEVCD_IPP_SWMPREDIF_CONFIG, 0x1);
2235
2236 /* Enable frame */
2237 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x2);
2238 WRITE_VREG(HEVCD_IPP_TOP_FRMCTL, 0x1);
2239
2240 /* Send SW-command CTB info */
2241 WRITE_VREG(HEVCD_IPP_SWMPREDIF_CTBINFO, 0x1 << 31);
2242
2243 /* Send PU_command */
2244 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO0, (0x4 << 9) | (0x4 << 16));
2245 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO1, 0x1 << 3);
2246 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO2, 0x0);
2247 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO3, 0x0);
2248
2249 udelay(us_delay);
2250
2251 WRITE_VREG(HEVCD_IPP_DBG_SEL, 0x2 << 4);
2252
2253 return (READ_VREG(HEVCD_IPP_DBG_DATA) & 3) == 1;
2254}
2255
2256void vdec_poweron(enum vdec_type_e core)
2257{
2258 void *decomp_addr = NULL;
2259 dma_addr_t decomp_dma_addr;
2260 u32 decomp_addr_aligned = 0;
2261 int hevc_loop = 0;
2262
2263 if (core >= VDEC_MAX)
2264 return;
2265
2266 mutex_lock(&vdec_mutex);
2267
2268 vdec_core->power_ref_count[core]++;
2269 if (vdec_core->power_ref_count[core] > 1) {
2270 mutex_unlock(&vdec_mutex);
2271 return;
2272 }
2273
2274 if (vdec_on(core)) {
2275 mutex_unlock(&vdec_mutex);
2276 return;
2277 }
2278
2279 if (hevc_workaround_needed() &&
2280 (core == VDEC_HEVC)) {
2281 decomp_addr = codec_mm_dma_alloc_coherent(MEM_NAME,
2282 SZ_64K + SZ_4K, &decomp_dma_addr, GFP_KERNEL, 0);
2283
2284 if (decomp_addr) {
2285 decomp_addr_aligned = ALIGN(decomp_dma_addr, SZ_64K);
2286 memset((u8 *)decomp_addr +
2287 (decomp_addr_aligned - decomp_dma_addr),
2288 0xff, SZ_4K);
2289 } else
2290 pr_err("vdec: alloc HEVC gxbb decomp buffer failed.\n");
2291 }
2292
2293 if (core == VDEC_1) {
2294 /* vdec1 power on */
2295 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2296 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~0xc);
2297 /* wait 10uS */
2298 udelay(10);
2299 /* vdec1 soft reset */
2300 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
2301 WRITE_VREG(DOS_SW_RESET0, 0);
2302 /* enable vdec1 clock */
2303 /*
e0614bf7
ZZ
2304 *add power on vdec clock level setting,only for m8 chip,
2305 * m8baby and m8m2 can dynamic adjust vdec clock,
2306 * power on with default clock level
2307 */
b9164398
NQ
2308 vdec_clock_hi_enable();
2309 /* power up vdec memories */
2310 WRITE_VREG(DOS_MEM_PD_VDEC, 0);
2311 /* remove vdec1 isolation */
2312 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2313 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~0xC0);
2314 /* reset DOS top registers */
2315 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
2316 if (get_cpu_type() >=
2317 MESON_CPU_MAJOR_ID_GXBB) {
2318 /*
e0614bf7
ZZ
2319 *enable VDEC_1 DMC request
2320 */
b9164398
NQ
2321 unsigned long flags;
2322
2323 spin_lock_irqsave(&vdec_spin_lock, flags);
2324 codec_dmcbus_write(DMC_REQ_CTRL,
2325 codec_dmcbus_read(DMC_REQ_CTRL) | (1 << 13));
2326 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2327 }
2328 } else if (core == VDEC_2) {
2329 if (has_vdec2()) {
2330 /* vdec2 power on */
2331 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2332 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2333 ~0x30);
2334 /* wait 10uS */
2335 udelay(10);
2336 /* vdec2 soft reset */
2337 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
2338 WRITE_VREG(DOS_SW_RESET2, 0);
2339 /* enable vdec1 clock */
2340 vdec2_clock_hi_enable();
2341 /* power up vdec memories */
2342 WRITE_VREG(DOS_MEM_PD_VDEC2, 0);
2343 /* remove vdec2 isolation */
2344 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2345 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2346 ~0x300);
2347 /* reset DOS top registers */
2348 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
2349 }
2350 } else if (core == VDEC_HCODEC) {
2351 if (has_hdec()) {
2352 /* hcodec power on */
2353 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2354 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2355 ~0x3);
2356 /* wait 10uS */
2357 udelay(10);
2358 /* hcodec soft reset */
2359 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
2360 WRITE_VREG(DOS_SW_RESET1, 0);
2361 /* enable hcodec clock */
2362 hcodec_clock_enable();
2363 /* power up hcodec memories */
2364 WRITE_VREG(DOS_MEM_PD_HCODEC, 0);
2365 /* remove hcodec isolation */
2366 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2367 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2368 ~0x30);
2369 }
2370 } else if (core == VDEC_HEVC) {
2371 if (has_hevc_vdec()) {
2372 bool hevc_fixed = false;
2373
2374 while (!hevc_fixed) {
2375 /* hevc power on */
2376 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2377 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2378 ~0xc0);
2379 /* wait 10uS */
2380 udelay(10);
2381 /* hevc soft reset */
2382 WRITE_VREG(DOS_SW_RESET3, 0xffffffff);
2383 WRITE_VREG(DOS_SW_RESET3, 0);
2384 /* enable hevc clock */
2385 hevc_clock_hi_enable();
118bcc65 2386 hevc_back_clock_hi_enable();
b9164398
NQ
2387 /* power up hevc memories */
2388 WRITE_VREG(DOS_MEM_PD_HEVC, 0);
2389 /* remove hevc isolation */
2390 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2391 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2392 ~0xc00);
2393
2394 if (!hevc_workaround_needed())
2395 break;
2396
2397 if (decomp_addr)
2398 hevc_fixed = test_hevc(
2399 decomp_addr_aligned, 20);
2400
2401 if (!hevc_fixed) {
2402 hevc_loop++;
2403
2404 mutex_unlock(&vdec_mutex);
2405
2406 if (hevc_loop >= HEVC_TEST_LIMIT) {
2407 pr_warn("hevc power sequence over limit\n");
2408 pr_warn("=====================================================\n");
2409 pr_warn(" This chip is identified to have HW failure.\n");
2410 pr_warn(" Please contact sqa-platform to replace the platform.\n");
2411 pr_warn("=====================================================\n");
2412
2413 panic("Force panic for chip detection !!!\n");
2414
2415 break;
2416 }
2417
2418 vdec_poweroff(VDEC_HEVC);
2419
2420 mdelay(10);
2421
2422 mutex_lock(&vdec_mutex);
2423 }
2424 }
2425
2426 if (hevc_loop > hevc_max_reset_count)
2427 hevc_max_reset_count = hevc_loop;
2428
2429 WRITE_VREG(DOS_SW_RESET3, 0xffffffff);
2430 udelay(10);
2431 WRITE_VREG(DOS_SW_RESET3, 0);
2432 }
2433 }
2434
2435 if (decomp_addr)
2436 codec_mm_dma_free_coherent(MEM_NAME,
2437 SZ_64K + SZ_4K, decomp_addr, decomp_dma_addr, 0);
2438
2439 mutex_unlock(&vdec_mutex);
2440}
2441EXPORT_SYMBOL(vdec_poweron);
2442
2443void vdec_poweroff(enum vdec_type_e core)
2444{
2445 if (core >= VDEC_MAX)
2446 return;
2447
2448 mutex_lock(&vdec_mutex);
2449
2450 vdec_core->power_ref_count[core]--;
2451 if (vdec_core->power_ref_count[core] > 0) {
2452 mutex_unlock(&vdec_mutex);
2453 return;
2454 }
2455
2456 if (core == VDEC_1) {
2457 if (get_cpu_type() >=
2458 MESON_CPU_MAJOR_ID_GXBB) {
2459 /* disable VDEC_1 DMC REQ*/
2460 unsigned long flags;
2461
2462 spin_lock_irqsave(&vdec_spin_lock, flags);
2463 codec_dmcbus_write(DMC_REQ_CTRL,
2464 codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 13)));
2465 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2466 udelay(10);
2467 }
2468 /* enable vdec1 isolation */
2469 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2470 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc0);
2471 /* power off vdec1 memories */
2472 WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL);
2473 /* disable vdec1 clock */
2474 vdec_clock_off();
2475 /* vdec1 power off */
2476 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2477 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc);
2478 } else if (core == VDEC_2) {
2479 if (has_vdec2()) {
2480 /* enable vdec2 isolation */
2481 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2482 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
2483 0x300);
2484 /* power off vdec2 memories */
2485 WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL);
2486 /* disable vdec2 clock */
2487 vdec2_clock_off();
2488 /* vdec2 power off */
2489 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2490 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
2491 0x30);
2492 }
2493 } else if (core == VDEC_HCODEC) {
2494 if (has_hdec()) {
2495 /* enable hcodec isolation */
2496 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2497 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
2498 0x30);
2499 /* power off hcodec memories */
2500 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
2501 /* disable hcodec clock */
2502 hcodec_clock_off();
2503 /* hcodec power off */
2504 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2505 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 3);
2506 }
2507 } else if (core == VDEC_HEVC) {
2508 if (has_hevc_vdec()) {
28e318df
NQ
2509 if (no_powerdown == 0) {
2510 /* enable hevc isolation */
2511 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
b9164398
NQ
2512 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
2513 0xc00);
2514 /* power off hevc memories */
2515 WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL);
a6c89e96 2516
b9164398
NQ
2517 /* disable hevc clock */
2518 hevc_clock_off();
a6c89e96
NQ
2519 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_G12A)
2520 hevc_back_clock_off();
2521
b9164398
NQ
2522 /* hevc power off */
2523 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2524 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
2525 0xc0);
28e318df
NQ
2526 } else {
2527 pr_info("!!!!!!!!not power down\n");
2528 hevc_reset_core(NULL);
2529 no_powerdown = 0;
2530 }
b9164398
NQ
2531 }
2532 }
2533 mutex_unlock(&vdec_mutex);
2534}
2535EXPORT_SYMBOL(vdec_poweroff);
2536
2537bool vdec_on(enum vdec_type_e core)
2538{
2539 bool ret = false;
2540
2541 if (core == VDEC_1) {
2542 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0xc) == 0) &&
2543 (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100))
2544 ret = true;
2545 } else if (core == VDEC_2) {
2546 if (has_vdec2()) {
2547 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0x30) == 0) &&
2548 (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100))
2549 ret = true;
2550 }
2551 } else if (core == VDEC_HCODEC) {
2552 if (has_hdec()) {
2553 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0x3) == 0) &&
2554 (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000))
2555 ret = true;
2556 }
2557 } else if (core == VDEC_HEVC) {
2558 if (has_hevc_vdec()) {
2559 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0xc0) == 0) &&
2560 (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x1000000))
2561 ret = true;
2562 }
2563 }
2564
2565 return ret;
2566}
2567EXPORT_SYMBOL(vdec_on);
2568
2569#elif 0 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD */
2570void vdec_poweron(enum vdec_type_e core)
2571{
2572 ulong flags;
2573
2574 spin_lock_irqsave(&lock, flags);
2575
2576 if (core == VDEC_1) {
2577 /* vdec1 soft reset */
2578 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
2579 WRITE_VREG(DOS_SW_RESET0, 0);
2580 /* enable vdec1 clock */
2581 vdec_clock_enable();
2582 /* reset DOS top registers */
2583 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
2584 } else if (core == VDEC_2) {
2585 /* vdec2 soft reset */
2586 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
2587 WRITE_VREG(DOS_SW_RESET2, 0);
2588 /* enable vdec2 clock */
2589 vdec2_clock_enable();
2590 /* reset DOS top registers */
2591 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
2592 } else if (core == VDEC_HCODEC) {
2593 /* hcodec soft reset */
2594 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
2595 WRITE_VREG(DOS_SW_RESET1, 0);
2596 /* enable hcodec clock */
2597 hcodec_clock_enable();
2598 }
2599
2600 spin_unlock_irqrestore(&lock, flags);
2601}
2602
2603void vdec_poweroff(enum vdec_type_e core)
2604{
2605 ulong flags;
2606
2607 spin_lock_irqsave(&lock, flags);
2608
2609 if (core == VDEC_1) {
2610 /* disable vdec1 clock */
2611 vdec_clock_off();
2612 } else if (core == VDEC_2) {
2613 /* disable vdec2 clock */
2614 vdec2_clock_off();
2615 } else if (core == VDEC_HCODEC) {
2616 /* disable hcodec clock */
2617 hcodec_clock_off();
2618 }
2619
2620 spin_unlock_irqrestore(&lock, flags);
2621}
2622
2623bool vdec_on(enum vdec_type_e core)
2624{
2625 bool ret = false;
2626
2627 if (core == VDEC_1) {
2628 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100)
2629 ret = true;
2630 } else if (core == VDEC_2) {
2631 if (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100)
2632 ret = true;
2633 } else if (core == VDEC_HCODEC) {
2634 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000)
2635 ret = true;
2636 }
2637
2638 return ret;
2639}
2640#endif
2641
2642int vdec_source_changed(int format, int width, int height, int fps)
2643{
2644 /* todo: add level routines for clock adjustment per chips */
2645 int ret = -1;
2646 static int on_setting;
2647
2648 if (on_setting > 0)
2649 return ret;/*on changing clk,ignore this change*/
2650
2651 if (vdec_source_get(VDEC_1) == width * height * fps)
2652 return ret;
2653
2654
2655 on_setting = 1;
2656 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
5b851ff9 2657 pr_debug("vdec1 video changed to %d x %d %d fps clk->%dMHZ\n",
b9164398
NQ
2658 width, height, fps, vdec_clk_get(VDEC_1));
2659 on_setting = 0;
2660 return ret;
2661
2662}
2663EXPORT_SYMBOL(vdec_source_changed);
2664
fe96802b
NQ
2665void hevc_reset_core(struct vdec_s *vdec)
2666{
2667 unsigned long flags;
2668 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
2669 spin_lock_irqsave(&vdec_spin_lock, flags);
2670 codec_dmcbus_write(DMC_REQ_CTRL,
2671 codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 4)));
2672 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2673
2674 while (!(codec_dmcbus_read(DMC_CHAN_STS)
2675 & (1 << 4)))
2676 ;
2677
28e318df 2678 if (vdec == NULL || input_frame_based(vdec))
fe96802b
NQ
2679 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
2680
2681 /*
2682 * 2: assist
2683 * 3: parser
2684 * 4: parser_state
2685 * 8: dblk
2686 * 11:mcpu
2687 * 12:ccpu
2688 * 13:ddr
2689 * 14:iqit
2690 * 15:ipp
2691 * 17:qdct
2692 * 18:mpred
2693 * 19:sao
2694 * 24:hevc_afifo
2695 */
2696 WRITE_VREG(DOS_SW_RESET3,
2697 (1<<3)|(1<<4)|(1<<8)|(1<<11)|
2698 (1<<12)|(1<<13)|(1<<14)|(1<<15)|
2699 (1<<17)|(1<<18)|(1<<19)|(1<<24));
2700
2701 WRITE_VREG(DOS_SW_RESET3, 0);
2702
2703
2704 spin_lock_irqsave(&vdec_spin_lock, flags);
2705 codec_dmcbus_write(DMC_REQ_CTRL,
2706 codec_dmcbus_read(DMC_REQ_CTRL) | (1 << 4));
2707 spin_unlock_irqrestore(&vdec_spin_lock, flags);
2708
2709}
2710EXPORT_SYMBOL(hevc_reset_core);
2711
b9164398
NQ
2712int vdec2_source_changed(int format, int width, int height, int fps)
2713{
2714 int ret = -1;
2715 static int on_setting;
2716
2717 if (has_vdec2()) {
2718 /* todo: add level routines for clock adjustment per chips */
2719 if (on_setting != 0)
2720 return ret;/*on changing clk,ignore this change*/
2721
2722 if (vdec_source_get(VDEC_2) == width * height * fps)
2723 return ret;
2724
2725 on_setting = 1;
2726 ret = vdec_source_changed_for_clk_set(format,
2727 width, height, fps);
5b851ff9 2728 pr_debug("vdec2 video changed to %d x %d %d fps clk->%dMHZ\n",
b9164398
NQ
2729 width, height, fps, vdec_clk_get(VDEC_2));
2730 on_setting = 0;
2731 return ret;
2732 }
2733 return 0;
2734}
2735EXPORT_SYMBOL(vdec2_source_changed);
2736
2737int hevc_source_changed(int format, int width, int height, int fps)
2738{
2739 /* todo: add level routines for clock adjustment per chips */
2740 int ret = -1;
2741 static int on_setting;
2742
2743 if (on_setting != 0)
2744 return ret;/*on changing clk,ignore this change*/
2745
2746 if (vdec_source_get(VDEC_HEVC) == width * height * fps)
2747 return ret;
2748
2749 on_setting = 1;
2750 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
5b851ff9 2751 pr_debug("hevc video changed to %d x %d %d fps clk->%dMHZ\n",
b9164398
NQ
2752 width, height, fps, vdec_clk_get(VDEC_HEVC));
2753 on_setting = 0;
2754
2755 return ret;
2756}
2757EXPORT_SYMBOL(hevc_source_changed);
2758
b9164398
NQ
2759static struct am_reg am_risc[] = {
2760 {"MSP", 0x300},
2761 {"MPSR", 0x301},
2762 {"MCPU_INT_BASE", 0x302},
2763 {"MCPU_INTR_GRP", 0x303},
2764 {"MCPU_INTR_MSK", 0x304},
2765 {"MCPU_INTR_REQ", 0x305},
2766 {"MPC-P", 0x306},
2767 {"MPC-D", 0x307},
2768 {"MPC_E", 0x308},
2769 {"MPC_W", 0x309},
2770 {"CSP", 0x320},
2771 {"CPSR", 0x321},
2772 {"CCPU_INT_BASE", 0x322},
2773 {"CCPU_INTR_GRP", 0x323},
2774 {"CCPU_INTR_MSK", 0x324},
2775 {"CCPU_INTR_REQ", 0x325},
2776 {"CPC-P", 0x326},
2777 {"CPC-D", 0x327},
2778 {"CPC_E", 0x328},
2779 {"CPC_W", 0x329},
2780 {"AV_SCRATCH_0", 0x09c0},
2781 {"AV_SCRATCH_1", 0x09c1},
2782 {"AV_SCRATCH_2", 0x09c2},
2783 {"AV_SCRATCH_3", 0x09c3},
2784 {"AV_SCRATCH_4", 0x09c4},
2785 {"AV_SCRATCH_5", 0x09c5},
2786 {"AV_SCRATCH_6", 0x09c6},
2787 {"AV_SCRATCH_7", 0x09c7},
2788 {"AV_SCRATCH_8", 0x09c8},
2789 {"AV_SCRATCH_9", 0x09c9},
2790 {"AV_SCRATCH_A", 0x09ca},
2791 {"AV_SCRATCH_B", 0x09cb},
2792 {"AV_SCRATCH_C", 0x09cc},
2793 {"AV_SCRATCH_D", 0x09cd},
2794 {"AV_SCRATCH_E", 0x09ce},
2795 {"AV_SCRATCH_F", 0x09cf},
2796 {"AV_SCRATCH_G", 0x09d0},
2797 {"AV_SCRATCH_H", 0x09d1},
2798 {"AV_SCRATCH_I", 0x09d2},
2799 {"AV_SCRATCH_J", 0x09d3},
2800 {"AV_SCRATCH_K", 0x09d4},
2801 {"AV_SCRATCH_L", 0x09d5},
2802 {"AV_SCRATCH_M", 0x09d6},
2803 {"AV_SCRATCH_N", 0x09d7},
2804};
2805
2806static ssize_t amrisc_regs_show(struct class *class,
2807 struct class_attribute *attr, char *buf)
2808{
2809 char *pbuf = buf;
2810 struct am_reg *regs = am_risc;
2811 int rsize = sizeof(am_risc) / sizeof(struct am_reg);
2812 int i;
e0614bf7 2813 unsigned int val;
b9164398
NQ
2814 ssize_t ret;
2815
2816 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8) {
2817 mutex_lock(&vdec_mutex);
2818 if (!vdec_on(VDEC_1)) {
2819 mutex_unlock(&vdec_mutex);
2820 pbuf += sprintf(pbuf, "amrisc is power off\n");
2821 ret = pbuf - buf;
2822 return ret;
2823 }
2824 } else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M6) {
2825 /*TODO:M6 define */
2826 /*
2827 * switch_mod_gate_by_type(MOD_VDEC, 1);
2828 */
2829 amports_switch_gate("vdec", 1);
2830 }
2831 pbuf += sprintf(pbuf, "amrisc registers show:\n");
2832 for (i = 0; i < rsize; i++) {
2833 val = READ_VREG(regs[i].offset);
2834 pbuf += sprintf(pbuf, "%s(%#x)\t:%#x(%d)\n",
2835 regs[i].name, regs[i].offset, val, val);
2836 }
2837 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8)
2838 mutex_unlock(&vdec_mutex);
2839 else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M6) {
2840 /*TODO:M6 define */
2841 /*
2842 * switch_mod_gate_by_type(MOD_VDEC, 0);
2843 */
2844 amports_switch_gate("vdec", 0);
2845 }
2846 ret = pbuf - buf;
2847 return ret;
2848}
2849
2850static ssize_t dump_trace_show(struct class *class,
2851 struct class_attribute *attr, char *buf)
2852{
2853 int i;
2854 char *pbuf = buf;
2855 ssize_t ret;
2856 u16 *trace_buf = kmalloc(debug_trace_num * 2, GFP_KERNEL);
2857
2858 if (!trace_buf) {
2859 pbuf += sprintf(pbuf, "No Memory bug\n");
2860 ret = pbuf - buf;
2861 return ret;
2862 }
2863 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8) {
2864 mutex_lock(&vdec_mutex);
2865 if (!vdec_on(VDEC_1)) {
2866 mutex_unlock(&vdec_mutex);
2867 kfree(trace_buf);
2868 pbuf += sprintf(pbuf, "amrisc is power off\n");
2869 ret = pbuf - buf;
2870 return ret;
2871 }
2872 } else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M6) {
2873 /*TODO:M6 define */
2874 /*
2875 * switch_mod_gate_by_type(MOD_VDEC, 1);
2876 */
2877 amports_switch_gate("vdec", 1);
2878 }
2879 pr_info("dump trace steps:%d start\n", debug_trace_num);
2880 i = 0;
2881 while (i <= debug_trace_num - 16) {
2882 trace_buf[i] = READ_VREG(MPC_E);
2883 trace_buf[i + 1] = READ_VREG(MPC_E);
2884 trace_buf[i + 2] = READ_VREG(MPC_E);
2885 trace_buf[i + 3] = READ_VREG(MPC_E);
2886 trace_buf[i + 4] = READ_VREG(MPC_E);
2887 trace_buf[i + 5] = READ_VREG(MPC_E);
2888 trace_buf[i + 6] = READ_VREG(MPC_E);
2889 trace_buf[i + 7] = READ_VREG(MPC_E);
2890 trace_buf[i + 8] = READ_VREG(MPC_E);
2891 trace_buf[i + 9] = READ_VREG(MPC_E);
2892 trace_buf[i + 10] = READ_VREG(MPC_E);
2893 trace_buf[i + 11] = READ_VREG(MPC_E);
2894 trace_buf[i + 12] = READ_VREG(MPC_E);
2895 trace_buf[i + 13] = READ_VREG(MPC_E);
2896 trace_buf[i + 14] = READ_VREG(MPC_E);
2897 trace_buf[i + 15] = READ_VREG(MPC_E);
2898 i += 16;
2899 };
2900 pr_info("dump trace steps:%d finished\n", debug_trace_num);
2901 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8)
2902 mutex_unlock(&vdec_mutex);
2903 else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M6) {
2904 /*TODO:M6 define */
2905 /*
2906 * switch_mod_gate_by_type(MOD_VDEC, 0);
2907 */
2908 amports_switch_gate("vdec", 0);
2909 }
2910 for (i = 0; i < debug_trace_num; i++) {
2911 if (i % 4 == 0) {
2912 if (i % 16 == 0)
2913 pbuf += sprintf(pbuf, "\n");
2914 else if (i % 8 == 0)
2915 pbuf += sprintf(pbuf, " ");
2916 else /* 4 */
2917 pbuf += sprintf(pbuf, " ");
2918 }
2919 pbuf += sprintf(pbuf, "%04x:", trace_buf[i]);
2920 }
2921 while (i < debug_trace_num)
2922 ;
2923 kfree(trace_buf);
2924 pbuf += sprintf(pbuf, "\n");
2925 ret = pbuf - buf;
2926 return ret;
2927}
2928
2929static ssize_t clock_level_show(struct class *class,
2930 struct class_attribute *attr, char *buf)
2931{
2932 char *pbuf = buf;
2933 size_t ret;
2934
2935 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_1));
2936
2937 if (has_vdec2())
2938 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_2));
2939
2940 if (has_hevc_vdec())
2941 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_HEVC));
2942
2943 ret = pbuf - buf;
2944 return ret;
2945}
2946
2947static ssize_t store_poweron_clock_level(struct class *class,
2948 struct class_attribute *attr,
2949 const char *buf, size_t size)
2950{
e0614bf7 2951 unsigned int val;
b9164398
NQ
2952 ssize_t ret;
2953
2954 /*ret = sscanf(buf, "%d", &val);*/
2955 ret = kstrtoint(buf, 0, &val);
2956
2957 if (ret != 0)
2958 return -EINVAL;
2959 poweron_clock_level = val;
2960 return size;
2961}
2962
2963static ssize_t show_poweron_clock_level(struct class *class,
2964 struct class_attribute *attr, char *buf)
2965{
2966 return sprintf(buf, "%d\n", poweron_clock_level);
2967}
2968
2969/*
e0614bf7
ZZ
2970 *if keep_vdec_mem == 1
2971 *always don't release
2972 *vdec 64 memory for fast play.
2973 */
b9164398
NQ
2974static ssize_t store_keep_vdec_mem(struct class *class,
2975 struct class_attribute *attr,
2976 const char *buf, size_t size)
2977{
e0614bf7 2978 unsigned int val;
b9164398
NQ
2979 ssize_t ret;
2980
2981 /*ret = sscanf(buf, "%d", &val);*/
2982 ret = kstrtoint(buf, 0, &val);
2983 if (ret != 0)
2984 return -EINVAL;
2985 keep_vdec_mem = val;
2986 return size;
2987}
2988
2989static ssize_t show_keep_vdec_mem(struct class *class,
2990 struct class_attribute *attr, char *buf)
2991{
2992 return sprintf(buf, "%d\n", keep_vdec_mem);
2993}
2994
a6c89e96
NQ
2995#ifdef VDEC_DEBUG_SUPPORT
2996static ssize_t store_debug(struct class *class,
2997 struct class_attribute *attr,
2998 const char *buf, size_t size)
2999{
3000 struct vdec_s *vdec;
3001 struct vdec_core_s *core = vdec_core;
3002 unsigned long flags;
3003
3004 unsigned id;
3005 unsigned val;
3006 ssize_t ret;
3007 char cbuf[32];
3008
3009 cbuf[0] = 0;
3010 ret = sscanf(buf, "%s %x %x", cbuf, &id, &val);
3011 /*pr_info(
3012 "%s(%s)=>ret %ld: %s, %x, %x\n",
3013 __func__, buf, ret, cbuf, id, val);*/
3014 if (strcmp(cbuf, "schedule") == 0) {
3015 pr_info("VDEC_DEBUG: force schedule\n");
3016 up(&core->sem);
3017 } else if (strcmp(cbuf, "power_off") == 0) {
3018 pr_info("VDEC_DEBUG: power off core %d\n", id);
3019 vdec_poweroff(id);
3020 } else if (strcmp(cbuf, "power_on") == 0) {
3021 pr_info("VDEC_DEBUG: power_on core %d\n", id);
3022 vdec_poweron(id);
3023 } else if (strcmp(cbuf, "wr") == 0) {
3024 pr_info("VDEC_DEBUG: WRITE_VREG(0x%x, 0x%x)\n",
3025 id, val);
3026 WRITE_VREG(id, val);
3027 } else if (strcmp(cbuf, "rd") == 0) {
3028 pr_info("VDEC_DEBUG: READ_VREG(0x%x) = 0x%x\n",
3029 id, READ_VREG(id));
3030 } else if (strcmp(cbuf, "read_hevc_clk_reg") == 0) {
3031 pr_info(
3032 "VDEC_DEBUG: HHI_VDEC4_CLK_CNTL = 0x%x, HHI_VDEC2_CLK_CNTL = 0x%x\n",
3033 READ_HHI_REG(HHI_VDEC4_CLK_CNTL),
3034 READ_HHI_REG(HHI_VDEC2_CLK_CNTL));
3035 }
3036
3037 flags = vdec_core_lock(vdec_core);
3038
3039 list_for_each_entry(vdec,
3040 &core->connected_vdec_list, list) {
3041 pr_info("vdec: status %d, id %d\n", vdec->status, vdec->id);
3042 if (((vdec->status == VDEC_STATUS_CONNECTED
3043 || vdec->status == VDEC_STATUS_ACTIVE)) &&
3044 (vdec->id == id)) {
3045 /*to add*/
3046 break;
3047 }
3048 }
3049 vdec_core_unlock(vdec_core, flags);
3050 return size;
3051}
3052
3053static ssize_t show_debug(struct class *class,
3054 struct class_attribute *attr, char *buf)
3055{
3056 char *pbuf = buf;
3057 struct vdec_s *vdec;
3058 struct vdec_core_s *core = vdec_core;
3059 unsigned long flags = vdec_core_lock(vdec_core);
3060
3061 pbuf += sprintf(pbuf,
3062 "============== help:\n");
3063 pbuf += sprintf(pbuf,
3064 "'echo xxx > debug' usuage:\n");
3065 pbuf += sprintf(pbuf,
3066 "schedule - trigger schedule thread to run\n");
3067 pbuf += sprintf(pbuf,
3068 "power_off core_num - call vdec_poweroff(core_num)\n");
3069 pbuf += sprintf(pbuf,
3070 "power_on core_num - call vdec_poweron(core_num)\n");
3071 pbuf += sprintf(pbuf,
3072 "wr adr val - call WRITE_VREG(adr, val)\n");
3073 pbuf += sprintf(pbuf,
3074 "rd adr - call READ_VREG(adr)\n");
3075 pbuf += sprintf(pbuf,
3076 "read_hevc_clk_reg - read HHI register for hevc clk\n");
3077 pbuf += sprintf(pbuf,
3078 "===================\n");
3079
3080 pbuf += sprintf(pbuf,
3081 "name(core)\tschedule_count\trun_count\tinput_underrun\tdecbuf_not_ready\trun_time\n");
3082 list_for_each_entry(vdec,
3083 &core->connected_vdec_list, list) {
3084 enum vdec_type_e type;
3085
3086 for (type = VDEC_1; type < VDEC_MAX; type++) {
3087 if (vdec->core_mask & (1 << type)) {
3088 pbuf += sprintf(pbuf, "%s(%d):",
3089 vdec->vf_provider_name, type);
3090 pbuf += sprintf(pbuf, "\t%d",
3091 vdec->check_count[type]);
3092 pbuf += sprintf(pbuf, "\t%d",
3093 vdec->run_count[type]);
3094 pbuf += sprintf(pbuf, "\t%d",
3095 vdec->input_underrun_count[type]);
3096 pbuf += sprintf(pbuf, "\t%d",
3097 vdec->not_run_ready_count[type]);
3098 pbuf += sprintf(pbuf,
3099 "\t%d%%\n",
3100 vdec->total_clk[type] == 0 ? 0 :
3101 (u32)((vdec->run_clk[type] * 100)
3102 / vdec->total_clk[type]));
3103 }
3104 }
3105 }
3106
3107 vdec_core_unlock(vdec_core, flags);
3108 return pbuf - buf;
3109
3110}
3111#endif
b9164398
NQ
3112
3113/*irq num as same as .dts*/
3114/*
e0614bf7
ZZ
3115 * interrupts = <0 3 1
3116 * 0 23 1
3117 * 0 32 1
3118 * 0 43 1
3119 * 0 44 1
3120 * 0 45 1>;
3121 * interrupt-names = "vsync",
3122 * "demux",
3123 * "parser",
3124 * "mailbox_0",
3125 * "mailbox_1",
3126 * "mailbox_2";
3127 */
b9164398
NQ
3128s32 vdec_request_threaded_irq(enum vdec_irq_num num,
3129 irq_handler_t handler,
3130 irq_handler_t thread_fn,
3131 unsigned long irqflags,
3132 const char *devname, void *dev)
3133{
3134 s32 res_irq;
3135 s32 ret = 0;
3136
3137 if (num >= VDEC_IRQ_MAX) {
3138 pr_err("[%s] request irq error, irq num too big!", __func__);
3139 return -EINVAL;
3140 }
3141
3142 if (vdec_core->isr_context[num].irq < 0) {
3143 res_irq = platform_get_irq(
3144 vdec_core->vdec_core_platform_device, num);
3145 if (res_irq < 0) {
3146 pr_err("[%s] get irq error!", __func__);
3147 return -EINVAL;
3148 }
3149
3150 vdec_core->isr_context[num].irq = res_irq;
3151 vdec_core->isr_context[num].dev_isr = handler;
3152 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
3153 vdec_core->isr_context[num].dev_id = dev;
3154
3155 ret = request_threaded_irq(res_irq,
3156 vdec_isr,
3157 vdec_thread_isr,
3158 (thread_fn) ? IRQF_ONESHOT : irqflags,
3159 devname,
3160 &vdec_core->isr_context[num]);
3161
3162 if (ret) {
3163 vdec_core->isr_context[num].irq = -1;
3164 vdec_core->isr_context[num].dev_isr = NULL;
3165 vdec_core->isr_context[num].dev_threaded_isr = NULL;
3166 vdec_core->isr_context[num].dev_id = NULL;
3167
3168 pr_err("vdec irq register error for %s.\n", devname);
3169 return -EIO;
3170 }
3171 } else {
3172 vdec_core->isr_context[num].dev_isr = handler;
3173 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
3174 vdec_core->isr_context[num].dev_id = dev;
3175 }
3176
3177 return ret;
3178}
3179EXPORT_SYMBOL(vdec_request_threaded_irq);
3180
3181s32 vdec_request_irq(enum vdec_irq_num num, irq_handler_t handler,
3182 const char *devname, void *dev)
3183{
5b851ff9 3184 pr_debug("vdec_request_irq %p, %s\n", handler, devname);
b9164398
NQ
3185
3186 return vdec_request_threaded_irq(num,
3187 handler,
3188 NULL,/*no thread_fn*/
3189 IRQF_SHARED,
3190 devname,
3191 dev);
3192}
3193EXPORT_SYMBOL(vdec_request_irq);
3194
3195void vdec_free_irq(enum vdec_irq_num num, void *dev)
3196{
3197 if (num >= VDEC_IRQ_MAX) {
3198 pr_err("[%s] request irq error, irq num too big!", __func__);
3199 return;
3200 }
3201
3202 synchronize_irq(vdec_core->isr_context[num].irq);
3203
3204 /*
e0614bf7 3205 *assume amrisc is stopped already and there is no mailbox interrupt
b9164398
NQ
3206 * when we reset pointers here.
3207 */
3208 vdec_core->isr_context[num].dev_isr = NULL;
3209 vdec_core->isr_context[num].dev_threaded_isr = NULL;
3210 vdec_core->isr_context[num].dev_id = NULL;
3211}
3212EXPORT_SYMBOL(vdec_free_irq);
3213
a6c89e96
NQ
3214struct vdec_s *vdec_get_default_vdec_for_userdata(void)
3215{
3216 struct vdec_s *vdec;
3217 struct vdec_s *ret_vdec;
3218 struct vdec_core_s *core = vdec_core;
3219 unsigned long flags;
3220 int id;
3221
3222 flags = vdec_core_lock(vdec_core);
3223
3224 id = 0x10000000;
3225 ret_vdec = NULL;
3226 if (!list_empty(&core->connected_vdec_list)) {
3227 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3228 if (vdec->id < id) {
3229 id = vdec->id;
3230 ret_vdec = vdec;
3231 }
3232 }
3233 }
3234
3235 vdec_core_unlock(vdec_core, flags);
3236
3237 return ret_vdec;
3238}
3239EXPORT_SYMBOL(vdec_get_default_vdec_for_userdata);
3240
3241int vdec_read_user_data(struct vdec_s *vdec,
3242 struct userdata_param_t *p_userdata_param)
3243{
3244 int ret = 0;
3245
3246 if (!vdec)
3247 vdec = vdec_get_default_vdec_for_userdata();
3248
3249 if (vdec) {
3250 if (vdec->user_data_read)
3251 ret = vdec->user_data_read(vdec, p_userdata_param);
3252 }
3253 return ret;
3254}
3255EXPORT_SYMBOL(vdec_read_user_data);
3256
3257int vdec_wakeup_userdata_poll(struct vdec_s *vdec)
3258{
3259 /*if (vdec && vdec == vdec_get_default_vdec_for_userdata())
3260 amstream_wakeup_userdata_poll();*/ //DEBUG_TMP
3261
3262 return 0;
3263}
3264EXPORT_SYMBOL(vdec_wakeup_userdata_poll);
3265
3266void vdec_reset_userdata_fifo(struct vdec_s *vdec, int bInit)
3267{
3268 if (!vdec)
3269 vdec = vdec_get_default_vdec_for_userdata();
3270
3271 if (vdec) {
3272 if (vdec->reset_userdata_fifo)
3273 vdec->reset_userdata_fifo(vdec, bInit);
3274 }
3275}
3276EXPORT_SYMBOL(vdec_reset_userdata_fifo);
3277
b9164398
NQ
3278static int dump_mode;
3279static ssize_t dump_risc_mem_store(struct class *class,
3280 struct class_attribute *attr,
3281 const char *buf, size_t size)/*set*/
3282{
e0614bf7 3283 unsigned int val;
b9164398
NQ
3284 ssize_t ret;
3285 char dump_mode_str[4] = "PRL";
3286
3287 /*ret = sscanf(buf, "%d", &val);*/
3288 ret = kstrtoint(buf, 0, &val);
3289
3290 if (ret != 0)
3291 return -EINVAL;
3292 dump_mode = val & 0x3;
3293 pr_info("set dump mode to %d,%c_mem\n",
3294 dump_mode, dump_mode_str[dump_mode]);
3295 return size;
3296}
3297static u32 read_amrisc_reg(int reg)
3298{
3299 WRITE_VREG(0x31b, reg);
3300 return READ_VREG(0x31c);
3301}
3302
3303static void dump_pmem(void)
3304{
3305 int i;
3306
3307 WRITE_VREG(0x301, 0x8000);
3308 WRITE_VREG(0x31d, 0);
3309 pr_info("start dump amrisc pmem of risc\n");
3310 for (i = 0; i < 0xfff; i++) {
3311 /*same as .o format*/
3312 pr_info("%08x // 0x%04x:\n", read_amrisc_reg(i), i);
3313 }
3314}
3315
3316static void dump_lmem(void)
3317{
3318 int i;
3319
3320 WRITE_VREG(0x301, 0x8000);
3321 WRITE_VREG(0x31d, 2);
3322 pr_info("start dump amrisc lmem\n");
3323 for (i = 0; i < 0x3ff; i++) {
3324 /*same as */
3325 pr_info("[%04x] = 0x%08x:\n", i, read_amrisc_reg(i));
3326 }
3327}
3328
3329static ssize_t dump_risc_mem_show(struct class *class,
3330 struct class_attribute *attr, char *buf)
3331{
3332 char *pbuf = buf;
3333 int ret;
3334
3335 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8) {
3336 mutex_lock(&vdec_mutex);
3337 if (!vdec_on(VDEC_1)) {
3338 mutex_unlock(&vdec_mutex);
3339 pbuf += sprintf(pbuf, "amrisc is power off\n");
3340 ret = pbuf - buf;
3341 return ret;
3342 }
3343 } else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M6) {
3344 /*TODO:M6 define */
3345 /*
3346 * switch_mod_gate_by_type(MOD_VDEC, 1);
3347 */
3348 amports_switch_gate("vdec", 1);
3349 }
3350 /*start do**/
3351 switch (dump_mode) {
3352 case 0:
3353 dump_pmem();
3354 break;
3355 case 2:
3356 dump_lmem();
3357 break;
3358 default:
3359 break;
3360 }
3361
3362 /*done*/
3363 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8)
3364 mutex_unlock(&vdec_mutex);
3365 else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M6) {
3366 /*TODO:M6 define */
3367 /*
3368 * switch_mod_gate_by_type(MOD_VDEC, 0);
3369 */
3370 amports_switch_gate("vdec", 0);
3371 }
3372 return sprintf(buf, "done\n");
3373}
3374
3375static ssize_t core_show(struct class *class, struct class_attribute *attr,
3376 char *buf)
3377{
3378 struct vdec_core_s *core = vdec_core;
3379 char *pbuf = buf;
3380
3381 if (list_empty(&core->connected_vdec_list))
3382 pbuf += sprintf(pbuf, "connected vdec list empty\n");
3383 else {
3384 struct vdec_s *vdec;
3385
a6c89e96
NQ
3386 pbuf += sprintf(pbuf,
3387 " Core: last_sched %p, sched_mask %lx\n",
3388 core->active_vdec,
3389 core->sched_mask);
3390
b9164398
NQ
3391 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3392 pbuf += sprintf(pbuf,
a6c89e96 3393 "\tvdec.%d (%p (%s)), status = %s,\ttype = %s, \tactive_mask = %lx\n",
fe96802b
NQ
3394 vdec->id,
3395 vdec,
3396 vdec_device_name[vdec->format * 2],
b9164398 3397 vdec_status_str(vdec),
a6c89e96
NQ
3398 vdec_type_str(vdec),
3399 vdec->active_mask);
b9164398
NQ
3400 }
3401 }
3402
3403 return pbuf - buf;
3404}
3405
fe96802b
NQ
3406static ssize_t vdec_status_show(struct class *class,
3407 struct class_attribute *attr, char *buf)
3408{
3409 char *pbuf = buf;
3410 struct vdec_s *vdec;
3411 struct vdec_info vs;
3412 unsigned char vdec_num = 0;
3413 struct vdec_core_s *core = vdec_core;
3414 unsigned long flags = vdec_core_lock(vdec_core);
3415
3416 if (list_empty(&core->connected_vdec_list)) {
3417 pbuf += sprintf(pbuf, "No vdec.\n");
3418 goto out;
3419 }
3420
3421 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3422 if (VDEC_STATUS_CONNECTED == vdec->status) {
3423 memset(&vs, 0, sizeof(vs));
3424 if (vdec_status(vdec, &vs)) {
3425 pbuf += sprintf(pbuf, "err.\n");
3426 goto out;
3427 }
3428 pbuf += sprintf(pbuf,
3429 "vdec channel %u statistics:\n",
3430 vdec_num);
3431 pbuf += sprintf(pbuf,
3432 "%13s : %s\n", "device name",
3433 vs.vdec_name);
3434 pbuf += sprintf(pbuf,
3435 "%13s : %u\n", "frame width",
3436 vs.frame_width);
3437 pbuf += sprintf(pbuf,
3438 "%13s : %u\n", "frame height",
3439 vs.frame_height);
3440 pbuf += sprintf(pbuf,
3441 "%13s : %u %s\n", "frame rate",
3442 vs.frame_rate, "fps");
3443 pbuf += sprintf(pbuf,
3444 "%13s : %u %s\n", "bit rate",
3445 vs.bit_rate / 1024 * 8, "kbps");
3446 pbuf += sprintf(pbuf,
3447 "%13s : %u\n", "status",
3448 vs.status);
3449 pbuf += sprintf(pbuf,
3450 "%13s : %u\n", "frame dur",
3451 vs.frame_dur);
3452 pbuf += sprintf(pbuf,
3453 "%13s : %u %s\n", "frame data",
3454 vs.frame_data / 1024, "KB");
3455 pbuf += sprintf(pbuf,
3456 "%13s : %u\n", "frame count",
3457 vs.frame_count);
3458 pbuf += sprintf(pbuf,
3459 "%13s : %u\n", "drop count",
3460 vs.drop_frame_count);
3461 pbuf += sprintf(pbuf,
3462 "%13s : %u\n", "fra err count",
3463 vs.error_frame_count);
3464 pbuf += sprintf(pbuf,
3465 "%13s : %u\n", "hw err count",
3466 vs.error_count);
3467 pbuf += sprintf(pbuf,
3468 "%13s : %llu %s\n\n", "total data",
3469 vs.total_data / 1024, "KB");
3470
3471 vdec_num++;
3472 }
3473 }
3474out:
3475 vdec_core_unlock(vdec_core, flags);
3476 return pbuf - buf;
3477}
3478
3479static ssize_t dump_vdec_blocks_show(struct class *class,
3480 struct class_attribute *attr, char *buf)
3481{
3482 struct vdec_core_s *core = vdec_core;
3483 char *pbuf = buf;
3484
3485 if (list_empty(&core->connected_vdec_list))
3486 pbuf += sprintf(pbuf, "connected vdec list empty\n");
3487 else {
3488 struct vdec_s *vdec;
3489 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3490 pbuf += vdec_input_dump_blocks(&vdec->input,
3491 pbuf, PAGE_SIZE - (pbuf - buf));
3492 }
3493 }
3494
3495 return pbuf - buf;
3496}
3497static ssize_t dump_vdec_chunks_show(struct class *class,
3498 struct class_attribute *attr, char *buf)
3499{
3500 struct vdec_core_s *core = vdec_core;
3501 char *pbuf = buf;
3502
3503 if (list_empty(&core->connected_vdec_list))
3504 pbuf += sprintf(pbuf, "connected vdec list empty\n");
3505 else {
3506 struct vdec_s *vdec;
3507 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3508 pbuf += vdec_input_dump_chunks(&vdec->input,
3509 pbuf, PAGE_SIZE - (pbuf - buf));
3510 }
3511 }
3512
3513 return pbuf - buf;
3514}
3515
fe96802b
NQ
3516static ssize_t dump_decoder_state_show(struct class *class,
3517 struct class_attribute *attr, char *buf)
3518{
3519 char *pbuf = buf;
3520 struct vdec_s *vdec;
3521 struct vdec_core_s *core = vdec_core;
3522 unsigned long flags = vdec_core_lock(vdec_core);
3523
3524 if (list_empty(&core->connected_vdec_list)) {
3525 pbuf += sprintf(pbuf, "No vdec.\n");
3526 } else {
3527 list_for_each_entry(vdec,
3528 &core->connected_vdec_list, list) {
3529 if ((vdec->status == VDEC_STATUS_CONNECTED
3530 || vdec->status == VDEC_STATUS_ACTIVE)
3531 && vdec->dump_state)
3532 vdec->dump_state(vdec);
3533 }
3534 }
3535 vdec_core_unlock(vdec_core, flags);
3536
3537 return pbuf - buf;
3538}
d481db31 3539
d481db31 3540
fe96802b 3541
b9164398
NQ
3542static struct class_attribute vdec_class_attrs[] = {
3543 __ATTR_RO(amrisc_regs),
3544 __ATTR_RO(dump_trace),
3545 __ATTR_RO(clock_level),
3546 __ATTR(poweron_clock_level, S_IRUGO | S_IWUSR | S_IWGRP,
3547 show_poweron_clock_level, store_poweron_clock_level),
3548 __ATTR(dump_risc_mem, S_IRUGO | S_IWUSR | S_IWGRP,
3549 dump_risc_mem_show, dump_risc_mem_store),
3550 __ATTR(keep_vdec_mem, S_IRUGO | S_IWUSR | S_IWGRP,
3551 show_keep_vdec_mem, store_keep_vdec_mem),
3552 __ATTR_RO(core),
fe96802b
NQ
3553 __ATTR_RO(vdec_status),
3554 __ATTR_RO(dump_vdec_blocks),
3555 __ATTR_RO(dump_vdec_chunks),
d481db31 3556 __ATTR_RO(dump_decoder_state),
a6c89e96
NQ
3557#ifdef VDEC_DEBUG_SUPPORT
3558 __ATTR(debug, S_IRUGO | S_IWUSR | S_IWGRP,
3559 show_debug, store_debug),
3560#endif
b9164398
NQ
3561 __ATTR_NULL
3562};
3563
3564static struct class vdec_class = {
3565 .name = "vdec",
3566 .class_attrs = vdec_class_attrs,
3567 };
3568
b9164398
NQ
3569struct device *get_vdec_device(void)
3570{
3571 return &vdec_core->vdec_core_platform_device->dev;
3572}
3573EXPORT_SYMBOL(get_vdec_device);
3574
3575static int vdec_probe(struct platform_device *pdev)
3576{
3577 s32 i, r;
3578
3579 vdec_core = (struct vdec_core_s *)devm_kzalloc(&pdev->dev,
3580 sizeof(struct vdec_core_s), GFP_KERNEL);
3581 if (vdec_core == NULL) {
3582 pr_err("vdec core allocation failed.\n");
3583 return -ENOMEM;
3584 }
3585
3586 atomic_set(&vdec_core->vdec_nr, 0);
3587 sema_init(&vdec_core->sem, 1);
3588
3589 r = class_register(&vdec_class);
3590 if (r) {
3591 pr_info("vdec class create fail.\n");
3592 return r;
3593 }
3594
3595 vdec_core->vdec_core_platform_device = pdev;
3596
3597 platform_set_drvdata(pdev, vdec_core);
3598
3599 for (i = 0; i < VDEC_IRQ_MAX; i++) {
3600 vdec_core->isr_context[i].index = i;
3601 vdec_core->isr_context[i].irq = -1;
3602 }
3603
a6c89e96
NQ
3604 r = vdec_request_threaded_irq(VDEC_IRQ_0, NULL, NULL,
3605 IRQF_ONESHOT, "vdec-0", NULL);
3606 if (r < 0) {
3607 pr_err("vdec interrupt request failed\n");
3608 return r;
3609 }
3610
b9164398
NQ
3611 r = vdec_request_threaded_irq(VDEC_IRQ_1, NULL, NULL,
3612 IRQF_ONESHOT, "vdec-1", NULL);
3613 if (r < 0) {
3614 pr_err("vdec interrupt request failed\n");
3615 return r;
3616 }
a6c89e96
NQ
3617#if 0
3618 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_G12A) {
3619 r = vdec_request_threaded_irq(VDEC_IRQ_HEVC_BACK, NULL, NULL,
3620 IRQF_ONESHOT, "vdec-hevc_back", NULL);
3621 if (r < 0) {
3622 pr_err("vdec interrupt request failed\n");
3623 return r;
3624 }
3625 }
3626#endif
b9164398
NQ
3627 r = of_reserved_mem_device_init(&pdev->dev);
3628 if (r == 0)
3629 pr_info("vdec_probe done\n");
3630
3631 vdec_core->cma_dev = &pdev->dev;
3632
3633 if (get_cpu_type() < MESON_CPU_MAJOR_ID_M8) {
3634 /* default to 250MHz */
3635 vdec_clock_hi_enable();
3636 }
3637
3638 if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXBB) {
3639 /* set vdec dmc request to urgent */
3640 WRITE_DMCREG(DMC_AM5_CHAN_CTRL, 0x3f203cf);
3641 }
b9164398
NQ
3642 INIT_LIST_HEAD(&vdec_core->connected_vdec_list);
3643 spin_lock_init(&vdec_core->lock);
fe96802b 3644 ida_init(&vdec_core->ida);
b9164398
NQ
3645 vdec_core->thread = kthread_run(vdec_core_thread, vdec_core,
3646 "vdec-core");
3647
a6c89e96
NQ
3648 vdec_core->vdec_core_wq = create_singlethread_workqueue("threadvdec");
3649
b9164398
NQ
3650 return 0;
3651}
3652
3653static int vdec_remove(struct platform_device *pdev)
3654{
3655 int i;
3656
3657 for (i = 0; i < VDEC_IRQ_MAX; i++) {
3658 if (vdec_core->isr_context[i].irq >= 0) {
3659 free_irq(vdec_core->isr_context[i].irq,
3660 &vdec_core->isr_context[i]);
3661 vdec_core->isr_context[i].irq = -1;
3662 vdec_core->isr_context[i].dev_isr = NULL;
3663 vdec_core->isr_context[i].dev_threaded_isr = NULL;
3664 vdec_core->isr_context[i].dev_id = NULL;
3665 }
3666 }
3667
3668 kthread_stop(vdec_core->thread);
3669
fe96802b 3670 destroy_workqueue(vdec_core->vdec_core_wq);
b9164398
NQ
3671 class_unregister(&vdec_class);
3672
3673 return 0;
3674}
3675
3676static const struct of_device_id amlogic_vdec_dt_match[] = {
3677 {
3678 .compatible = "amlogic, vdec",
3679 },
3680 {},
3681};
3682
fe96802b 3683static struct mconfig vdec_configs[] = {
fe96802b
NQ
3684 MC_PU32("debug_trace_num", &debug_trace_num),
3685 MC_PI32("hevc_max_reset_count", &hevc_max_reset_count),
3686 MC_PU32("clk_config", &clk_config),
3687 MC_PI32("step_mode", &step_mode),
3688 MC_PI32("poweron_clock_level", &poweron_clock_level),
3689};
3690static struct mconfig_node vdec_node;
3691
b9164398
NQ
3692static struct platform_driver vdec_driver = {
3693 .probe = vdec_probe,
3694 .remove = vdec_remove,
3695 .driver = {
3696 .name = "vdec",
3697 .of_match_table = amlogic_vdec_dt_match,
3698 }
3699};
3700
3701int vdec_module_init(void)
3702{
3703 if (platform_driver_register(&vdec_driver)) {
3704 pr_info("failed to register vdec module\n");
3705 return -ENODEV;
3706 }
fe96802b
NQ
3707 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
3708 "vdec", vdec_configs, CONFIG_FOR_RW);
b9164398
NQ
3709 return 0;
3710}
3711EXPORT_SYMBOL(vdec_module_init);
3712
3713void vdec_module_exit(void)
3714{
3715 platform_driver_unregister(&vdec_driver);
3716}
3717EXPORT_SYMBOL(vdec_module_exit);
3718
3719#if 0
3720static int __init vdec_module_init(void)
3721{
3722 if (platform_driver_register(&vdec_driver)) {
3723 pr_info("failed to register vdec module\n");
3724 return -ENODEV;
3725 }
fe96802b
NQ
3726 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
3727 "vdec", vdec_configs, CONFIG_FOR_RW);
b9164398
NQ
3728 return 0;
3729}
3730
3731static void __exit vdec_module_exit(void)
3732{
3733 platform_driver_unregister(&vdec_driver);
3734}
3735#endif
3736
3737static int vdec_mem_device_init(struct reserved_mem *rmem, struct device *dev)
3738{
b9164398
NQ
3739 vdec_core->cma_dev = dev;
3740
3741 return 0;
3742}
3743
3744static const struct reserved_mem_ops rmem_vdec_ops = {
3745 .device_init = vdec_mem_device_init,
3746};
3747
3748static int __init vdec_mem_setup(struct reserved_mem *rmem)
3749{
3750 rmem->ops = &rmem_vdec_ops;
3751 pr_info("vdec: reserved mem setup\n");
3752
3753 return 0;
3754}
3755
3756RESERVEDMEM_OF_DECLARE(vdec, "amlogic, vdec-memory", vdec_mem_setup);
a6c89e96
NQ
3757/*
3758uint force_hevc_clock_cntl;
3759EXPORT_SYMBOL(force_hevc_clock_cntl);
3760
3761module_param(force_hevc_clock_cntl, uint, 0664);
3762*/
b9164398
NQ
3763module_param(debug_trace_num, uint, 0664);
3764module_param(hevc_max_reset_count, int, 0664);
3765module_param(clk_config, uint, 0664);
3766module_param(step_mode, int, 0664);
a6c89e96 3767module_param(debugflags, int, 0664);
fe96802b 3768
b9164398
NQ
3769/*
3770*module_init(vdec_module_init);
3771*module_exit(vdec_module_exit);
3772*/
fe96802b
NQ
3773#define CREATE_TRACE_POINTS
3774#include "vdec_trace.h"
b9164398
NQ
3775MODULE_DESCRIPTION("AMLOGIC vdec driver");
3776MODULE_LICENSE("GPL");
3777MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");