drivers/video/msm: update to new kernel
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / video / msm / mdp.c
CommitLineData
d480ace0
PM
1/* drivers/video/msm_fb/mdp.c
2 *
3 * MSM MDP Interface (used by framebuffer core)
4 *
5 * Copyright (C) 2007 QUALCOMM Incorporated
6 * Copyright (C) 2007 Google Incorporated
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/kernel.h>
19#include <linux/fb.h>
20#include <linux/msm_mdp.h>
21#include <linux/interrupt.h>
22#include <linux/wait.h>
23#include <linux/clk.h>
24#include <linux/file.h>
d480ace0
PM
25#include <linux/major.h>
26
27#include <mach/msm_iomap.h>
28#include <mach/msm_fb.h>
29#include <linux/platform_device.h>
30
31#include "mdp_hw.h"
32
33struct class *mdp_class;
34
35#define MDP_CMD_DEBUG_ACCESS_BASE (0x10000)
36
37static uint16_t mdp_default_ccs[] = {
38 0x254, 0x000, 0x331, 0x254, 0xF38, 0xE61, 0x254, 0x409, 0x000,
39 0x010, 0x080, 0x080
40};
41
42static DECLARE_WAIT_QUEUE_HEAD(mdp_dma2_waitqueue);
43static DECLARE_WAIT_QUEUE_HEAD(mdp_ppp_waitqueue);
44static struct msmfb_callback *dma_callback;
45static struct clk *clk;
46static unsigned int mdp_irq_mask;
47static DEFINE_SPINLOCK(mdp_lock);
48DEFINE_MUTEX(mdp_mutex);
49
50static int enable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
51{
52 unsigned long irq_flags;
53 int ret = 0;
54
55 BUG_ON(!mask);
56
57 spin_lock_irqsave(&mdp_lock, irq_flags);
58 /* if the mask bits are already set return an error, this interrupt
59 * is already enabled */
60 if (mdp_irq_mask & mask) {
61 printk(KERN_ERR "mdp irq already on already on %x %x\n",
62 mdp_irq_mask, mask);
63 ret = -1;
64 }
65 /* if the mdp irq is not already enabled enable it */
66 if (!mdp_irq_mask) {
67 if (clk)
68 clk_enable(clk);
69 enable_irq(mdp->irq);
70 }
71
72 /* update the irq mask to reflect the fact that the interrupt is
73 * enabled */
74 mdp_irq_mask |= mask;
75 spin_unlock_irqrestore(&mdp_lock, irq_flags);
76 return ret;
77}
78
79static int locked_disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
80{
81 /* this interrupt is already disabled! */
82 if (!(mdp_irq_mask & mask)) {
83 printk(KERN_ERR "mdp irq already off %x %x\n",
84 mdp_irq_mask, mask);
85 return -1;
86 }
87 /* update the irq mask to reflect the fact that the interrupt is
88 * disabled */
89 mdp_irq_mask &= ~(mask);
90 /* if no one is waiting on the interrupt, disable it */
91 if (!mdp_irq_mask) {
92 disable_irq(mdp->irq);
93 if (clk)
94 clk_disable(clk);
95 }
96 return 0;
97}
98
99static int disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
100{
101 unsigned long irq_flags;
102 int ret;
103
104 spin_lock_irqsave(&mdp_lock, irq_flags);
105 ret = locked_disable_mdp_irq(mdp, mask);
106 spin_unlock_irqrestore(&mdp_lock, irq_flags);
107 return ret;
108}
109
110static irqreturn_t mdp_isr(int irq, void *data)
111{
112 uint32_t status;
113 unsigned long irq_flags;
114 struct mdp_info *mdp = data;
115
116 spin_lock_irqsave(&mdp_lock, irq_flags);
117
118 status = mdp_readl(mdp, MDP_INTR_STATUS);
119 mdp_writel(mdp, status, MDP_INTR_CLEAR);
120
121 status &= mdp_irq_mask;
122 if (status & DL0_DMA2_TERM_DONE) {
123 if (dma_callback) {
124 dma_callback->func(dma_callback);
125 dma_callback = NULL;
126 }
127 wake_up(&mdp_dma2_waitqueue);
128 }
129
130 if (status & DL0_ROI_DONE)
131 wake_up(&mdp_ppp_waitqueue);
132
133 if (status)
134 locked_disable_mdp_irq(mdp, status);
135
136 spin_unlock_irqrestore(&mdp_lock, irq_flags);
137 return IRQ_HANDLED;
138}
139
140static uint32_t mdp_check_mask(uint32_t mask)
141{
142 uint32_t ret;
143 unsigned long irq_flags;
144
145 spin_lock_irqsave(&mdp_lock, irq_flags);
146 ret = mdp_irq_mask & mask;
147 spin_unlock_irqrestore(&mdp_lock, irq_flags);
148 return ret;
149}
150
151static int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq)
152{
153 int ret = 0;
154 unsigned long irq_flags;
155
156 wait_event_timeout(*wq, !mdp_check_mask(mask), HZ);
157
158 spin_lock_irqsave(&mdp_lock, irq_flags);
159 if (mdp_irq_mask & mask) {
160 locked_disable_mdp_irq(mdp, mask);
161 printk(KERN_WARNING "timeout waiting for mdp to complete %x\n",
162 mask);
163 ret = -ETIMEDOUT;
164 }
165 spin_unlock_irqrestore(&mdp_lock, irq_flags);
166
167 return ret;
168}
169
170void mdp_dma_wait(struct mdp_device *mdp_dev)
171{
172#define MDP_MAX_TIMEOUTS 20
173 static int timeout_count;
174 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
175
176 if (mdp_wait(mdp, DL0_DMA2_TERM_DONE, &mdp_dma2_waitqueue) == -ETIMEDOUT)
177 timeout_count++;
178 else
179 timeout_count = 0;
180
181 if (timeout_count > MDP_MAX_TIMEOUTS) {
182 printk(KERN_ERR "mdp: dma failed %d times, somethings wrong!\n",
183 MDP_MAX_TIMEOUTS);
184 BUG();
185 }
186}
187
188static int mdp_ppp_wait(struct mdp_info *mdp)
189{
190 return mdp_wait(mdp, DL0_ROI_DONE, &mdp_ppp_waitqueue);
191}
192
193void mdp_dma_to_mddi(struct mdp_info *mdp, uint32_t addr, uint32_t stride,
194 uint32_t width, uint32_t height, uint32_t x, uint32_t y,
195 struct msmfb_callback *callback)
196{
197 uint32_t dma2_cfg;
198 uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */
199
200 if (enable_mdp_irq(mdp, DL0_DMA2_TERM_DONE)) {
201 printk(KERN_ERR "mdp_dma_to_mddi: busy\n");
202 return;
203 }
204
205 dma_callback = callback;
206
207 dma2_cfg = DMA_PACK_TIGHT |
208 DMA_PACK_ALIGN_LSB |
209 DMA_PACK_PATTERN_RGB |
210 DMA_OUT_SEL_AHB |
211 DMA_IBUF_NONCONTIGUOUS;
212
213 dma2_cfg |= DMA_IBUF_FORMAT_RGB565;
214
215 dma2_cfg |= DMA_OUT_SEL_MDDI;
216
217 dma2_cfg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY;
218
219 dma2_cfg |= DMA_DITHER_EN;
220
221 /* setup size, address, and stride */
222 mdp_writel(mdp, (height << 16) | (width),
223 MDP_CMD_DEBUG_ACCESS_BASE + 0x0184);
224 mdp_writel(mdp, addr, MDP_CMD_DEBUG_ACCESS_BASE + 0x0188);
225 mdp_writel(mdp, stride, MDP_CMD_DEBUG_ACCESS_BASE + 0x018C);
226
227 /* 666 18BPP */
228 dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
229
230 /* set y & x offset and MDDI transaction parameters */
231 mdp_writel(mdp, (y << 16) | (x), MDP_CMD_DEBUG_ACCESS_BASE + 0x0194);
232 mdp_writel(mdp, ld_param, MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0);
233 mdp_writel(mdp, (MDDI_VDO_PACKET_DESC << 16) | MDDI_VDO_PACKET_PRIM,
234 MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4);
235
236 mdp_writel(mdp, dma2_cfg, MDP_CMD_DEBUG_ACCESS_BASE + 0x0180);
237
238 /* start DMA2 */
239 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0044);
240}
241
242void mdp_dma(struct mdp_device *mdp_dev, uint32_t addr, uint32_t stride,
243 uint32_t width, uint32_t height, uint32_t x, uint32_t y,
244 struct msmfb_callback *callback, int interface)
245{
246 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
247
248 if (interface == MSM_MDDI_PMDH_INTERFACE) {
249 mdp_dma_to_mddi(mdp, addr, stride, width, height, x, y,
250 callback);
251 }
252}
253
254int get_img(struct mdp_img *img, struct fb_info *info,
255 unsigned long *start, unsigned long *len,
256 struct file **filep)
257{
258 int put_needed, ret = 0;
259 struct file *file;
260 unsigned long vstart;
261
d480ace0
PM
262 file = fget_light(img->memory_id, &put_needed);
263 if (file == NULL)
264 return -1;
265
266 if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
267 *start = info->fix.smem_start;
268 *len = info->fix.smem_len;
269 } else
270 ret = -1;
271 fput_light(file, put_needed);
272
273 return ret;
274}
275
276void put_img(struct file *src_file, struct file *dst_file)
277{
d480ace0
PM
278}
279
280int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb,
281 struct mdp_blit_req *req)
282{
283 int ret;
284 unsigned long src_start = 0, src_len = 0, dst_start = 0, dst_len = 0;
285 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
286 struct file *src_file = 0, *dst_file = 0;
287
288 /* WORKAROUND FOR HARDWARE BUG IN BG TILE FETCH */
289 if (unlikely(req->src_rect.h == 0 ||
290 req->src_rect.w == 0)) {
291 printk(KERN_ERR "mpd_ppp: src img of zero size!\n");
292 return -EINVAL;
293 }
294 if (unlikely(req->dst_rect.h == 0 ||
295 req->dst_rect.w == 0))
296 return -EINVAL;
297
298 /* do this first so that if this fails, the caller can always
299 * safely call put_img */
300 if (unlikely(get_img(&req->src, fb, &src_start, &src_len, &src_file))) {
301 printk(KERN_ERR "mpd_ppp: could not retrieve src image from "
302 "memory\n");
303 return -EINVAL;
304 }
305
306 if (unlikely(get_img(&req->dst, fb, &dst_start, &dst_len, &dst_file))) {
307 printk(KERN_ERR "mpd_ppp: could not retrieve dst image from "
308 "memory\n");
d480ace0
PM
309 return -EINVAL;
310 }
311 mutex_lock(&mdp_mutex);
312
313 /* transp_masking unimplemented */
314 req->transp_mask = MDP_TRANSP_NOP;
315 if (unlikely((req->transp_mask != MDP_TRANSP_NOP ||
316 req->alpha != MDP_ALPHA_NOP ||
317 HAS_ALPHA(req->src.format)) &&
318 (req->flags & MDP_ROT_90 &&
319 req->dst_rect.w <= 16 && req->dst_rect.h >= 16))) {
320 int i;
321 unsigned int tiles = req->dst_rect.h / 16;
322 unsigned int remainder = req->dst_rect.h % 16;
323 req->src_rect.w = 16*req->src_rect.w / req->dst_rect.h;
324 req->dst_rect.h = 16;
325 for (i = 0; i < tiles; i++) {
326 enable_mdp_irq(mdp, DL0_ROI_DONE);
327 ret = mdp_ppp_blit(mdp, req, src_file, src_start,
328 src_len, dst_file, dst_start,
329 dst_len);
330 if (ret)
331 goto err_bad_blit;
332 ret = mdp_ppp_wait(mdp);
333 if (ret)
334 goto err_wait_failed;
335 req->dst_rect.y += 16;
336 req->src_rect.x += req->src_rect.w;
337 }
338 if (!remainder)
339 goto end;
340 req->src_rect.w = remainder*req->src_rect.w / req->dst_rect.h;
341 req->dst_rect.h = remainder;
342 }
343 enable_mdp_irq(mdp, DL0_ROI_DONE);
344 ret = mdp_ppp_blit(mdp, req, src_file, src_start, src_len, dst_file,
345 dst_start,
346 dst_len);
347 if (ret)
348 goto err_bad_blit;
349 ret = mdp_ppp_wait(mdp);
350 if (ret)
351 goto err_wait_failed;
352end:
353 put_img(src_file, dst_file);
354 mutex_unlock(&mdp_mutex);
355 return 0;
356err_bad_blit:
357 disable_mdp_irq(mdp, DL0_ROI_DONE);
358err_wait_failed:
359 put_img(src_file, dst_file);
360 mutex_unlock(&mdp_mutex);
361 return ret;
362}
363
364void mdp_set_grp_disp(struct mdp_device *mdp_dev, unsigned disp_id)
365{
366 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
367
368 disp_id &= 0xf;
369 mdp_writel(mdp, disp_id, MDP_FULL_BYPASS_WORD43);
370}
371
372int register_mdp_client(struct class_interface *cint)
373{
374 if (!mdp_class) {
375 pr_err("mdp: no mdp_class when registering mdp client\n");
376 return -ENODEV;
377 }
378 cint->class = mdp_class;
379 return class_interface_register(cint);
380}
381
382#include "mdp_csc_table.h"
383#include "mdp_scale_tables.h"
384
385int mdp_probe(struct platform_device *pdev)
386{
387 struct resource *resource;
388 int ret;
389 int n;
390 struct mdp_info *mdp;
391
392 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
393 if (!resource) {
394 pr_err("mdp: can not get mdp mem resource!\n");
395 return -ENOMEM;
396 }
397
398 mdp = kzalloc(sizeof(struct mdp_info), GFP_KERNEL);
399 if (!mdp)
400 return -ENOMEM;
401
402 mdp->irq = platform_get_irq(pdev, 0);
403 if (mdp->irq < 0) {
404 pr_err("mdp: can not get mdp irq\n");
405 ret = mdp->irq;
406 goto error_get_irq;
407 }
408
409 mdp->base = ioremap(resource->start,
410 resource->end - resource->start);
411 if (mdp->base == 0) {
412 printk(KERN_ERR "msmfb: cannot allocate mdp regs!\n");
413 ret = -ENOMEM;
414 goto error_ioremap;
415 }
416
417 mdp->mdp_dev.dma = mdp_dma;
418 mdp->mdp_dev.dma_wait = mdp_dma_wait;
419 mdp->mdp_dev.blit = mdp_blit;
420 mdp->mdp_dev.set_grp_disp = mdp_set_grp_disp;
421
422 clk = clk_get(&pdev->dev, "mdp_clk");
423 if (IS_ERR(clk)) {
424 printk(KERN_INFO "mdp: failed to get mdp clk");
425 return PTR_ERR(clk);
426 }
427
428 ret = request_irq(mdp->irq, mdp_isr, IRQF_DISABLED, "msm_mdp", mdp);
429 if (ret)
430 goto error_request_irq;
431 disable_irq(mdp->irq);
432 mdp_irq_mask = 0;
433
434 /* debug interface write access */
435 mdp_writel(mdp, 1, 0x60);
436
437 mdp_writel(mdp, MDP_ANY_INTR_MASK, MDP_INTR_ENABLE);
438 mdp_writel(mdp, 1, MDP_EBI2_PORTMAP_MODE);
439
440 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8);
441 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc);
442
443 for (n = 0; n < ARRAY_SIZE(csc_table); n++)
444 mdp_writel(mdp, csc_table[n].val, csc_table[n].reg);
445
446 /* clear up unused fg/main registers */
447 /* comp.plane 2&3 ystride */
448 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0120);
449
450 /* unpacked pattern */
451 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x012c);
452 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0130);
453 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0134);
454 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0158);
455 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x015c);
456 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0160);
457 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0170);
458 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0174);
459 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x017c);
460
461 /* comp.plane 2 & 3 */
462 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0114);
463 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0118);
464
465 /* clear unused bg registers */
466 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8);
467 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0);
468 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc);
469 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0);
470 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4);
471
472 for (n = 0; n < ARRAY_SIZE(mdp_upscale_table); n++)
473 mdp_writel(mdp, mdp_upscale_table[n].val,
474 mdp_upscale_table[n].reg);
475
476 for (n = 0; n < 9; n++)
477 mdp_writel(mdp, mdp_default_ccs[n], 0x40440 + 4 * n);
478 mdp_writel(mdp, mdp_default_ccs[9], 0x40500 + 4 * 0);
479 mdp_writel(mdp, mdp_default_ccs[10], 0x40500 + 4 * 0);
480 mdp_writel(mdp, mdp_default_ccs[11], 0x40500 + 4 * 0);
481
482 /* register mdp device */
483 mdp->mdp_dev.dev.parent = &pdev->dev;
484 mdp->mdp_dev.dev.class = mdp_class;
d480ace0
PM
485
486 /* if you can remove the platform device you'd have to implement
487 * this:
488 mdp_dev.release = mdp_class; */
489
490 ret = device_register(&mdp->mdp_dev.dev);
491 if (ret)
492 goto error_device_register;
493 return 0;
494
495error_device_register:
496 free_irq(mdp->irq, mdp);
497error_request_irq:
498 iounmap(mdp->base);
499error_get_irq:
500error_ioremap:
501 kfree(mdp);
502 return ret;
503}
504
505static struct platform_driver msm_mdp_driver = {
506 .probe = mdp_probe,
507 .driver = {.name = "msm_mdp"},
508};
509
510static int __init mdp_init(void)
511{
512 mdp_class = class_create(THIS_MODULE, "msm_mdp");
513 if (IS_ERR(mdp_class)) {
514 printk(KERN_ERR "Error creating mdp class\n");
515 return PTR_ERR(mdp_class);
516 }
517 return platform_driver_register(&msm_mdp_driver);
518}
519
520subsys_initcall(mdp_init);