include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / video / msm / mdp.c
CommitLineData
d480ace0
PM
1/* drivers/video/msm_fb/mdp.c
2 *
3 * MSM MDP Interface (used by framebuffer core)
4 *
5 * Copyright (C) 2007 QUALCOMM Incorporated
6 * Copyright (C) 2007 Google Incorporated
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/kernel.h>
19#include <linux/fb.h>
20#include <linux/msm_mdp.h>
21#include <linux/interrupt.h>
22#include <linux/wait.h>
23#include <linux/clk.h>
24#include <linux/file.h>
d480ace0 25#include <linux/major.h>
5a0e3ad6 26#include <linux/slab.h>
d480ace0
PM
27
28#include <mach/msm_iomap.h>
29#include <mach/msm_fb.h>
30#include <linux/platform_device.h>
31
32#include "mdp_hw.h"
33
34struct class *mdp_class;
35
36#define MDP_CMD_DEBUG_ACCESS_BASE (0x10000)
37
38static uint16_t mdp_default_ccs[] = {
39 0x254, 0x000, 0x331, 0x254, 0xF38, 0xE61, 0x254, 0x409, 0x000,
40 0x010, 0x080, 0x080
41};
42
43static DECLARE_WAIT_QUEUE_HEAD(mdp_dma2_waitqueue);
44static DECLARE_WAIT_QUEUE_HEAD(mdp_ppp_waitqueue);
45static struct msmfb_callback *dma_callback;
46static struct clk *clk;
47static unsigned int mdp_irq_mask;
48static DEFINE_SPINLOCK(mdp_lock);
49DEFINE_MUTEX(mdp_mutex);
50
51static int enable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
52{
53 unsigned long irq_flags;
54 int ret = 0;
55
56 BUG_ON(!mask);
57
58 spin_lock_irqsave(&mdp_lock, irq_flags);
59 /* if the mask bits are already set return an error, this interrupt
60 * is already enabled */
61 if (mdp_irq_mask & mask) {
62 printk(KERN_ERR "mdp irq already on already on %x %x\n",
63 mdp_irq_mask, mask);
64 ret = -1;
65 }
66 /* if the mdp irq is not already enabled enable it */
67 if (!mdp_irq_mask) {
68 if (clk)
69 clk_enable(clk);
70 enable_irq(mdp->irq);
71 }
72
73 /* update the irq mask to reflect the fact that the interrupt is
74 * enabled */
75 mdp_irq_mask |= mask;
76 spin_unlock_irqrestore(&mdp_lock, irq_flags);
77 return ret;
78}
79
80static int locked_disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
81{
82 /* this interrupt is already disabled! */
83 if (!(mdp_irq_mask & mask)) {
84 printk(KERN_ERR "mdp irq already off %x %x\n",
85 mdp_irq_mask, mask);
86 return -1;
87 }
88 /* update the irq mask to reflect the fact that the interrupt is
89 * disabled */
90 mdp_irq_mask &= ~(mask);
91 /* if no one is waiting on the interrupt, disable it */
92 if (!mdp_irq_mask) {
93 disable_irq(mdp->irq);
94 if (clk)
95 clk_disable(clk);
96 }
97 return 0;
98}
99
100static int disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
101{
102 unsigned long irq_flags;
103 int ret;
104
105 spin_lock_irqsave(&mdp_lock, irq_flags);
106 ret = locked_disable_mdp_irq(mdp, mask);
107 spin_unlock_irqrestore(&mdp_lock, irq_flags);
108 return ret;
109}
110
111static irqreturn_t mdp_isr(int irq, void *data)
112{
113 uint32_t status;
114 unsigned long irq_flags;
115 struct mdp_info *mdp = data;
116
117 spin_lock_irqsave(&mdp_lock, irq_flags);
118
119 status = mdp_readl(mdp, MDP_INTR_STATUS);
120 mdp_writel(mdp, status, MDP_INTR_CLEAR);
121
122 status &= mdp_irq_mask;
123 if (status & DL0_DMA2_TERM_DONE) {
124 if (dma_callback) {
125 dma_callback->func(dma_callback);
126 dma_callback = NULL;
127 }
128 wake_up(&mdp_dma2_waitqueue);
129 }
130
131 if (status & DL0_ROI_DONE)
132 wake_up(&mdp_ppp_waitqueue);
133
134 if (status)
135 locked_disable_mdp_irq(mdp, status);
136
137 spin_unlock_irqrestore(&mdp_lock, irq_flags);
138 return IRQ_HANDLED;
139}
140
141static uint32_t mdp_check_mask(uint32_t mask)
142{
143 uint32_t ret;
144 unsigned long irq_flags;
145
146 spin_lock_irqsave(&mdp_lock, irq_flags);
147 ret = mdp_irq_mask & mask;
148 spin_unlock_irqrestore(&mdp_lock, irq_flags);
149 return ret;
150}
151
152static int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq)
153{
154 int ret = 0;
155 unsigned long irq_flags;
156
157 wait_event_timeout(*wq, !mdp_check_mask(mask), HZ);
158
159 spin_lock_irqsave(&mdp_lock, irq_flags);
160 if (mdp_irq_mask & mask) {
161 locked_disable_mdp_irq(mdp, mask);
162 printk(KERN_WARNING "timeout waiting for mdp to complete %x\n",
163 mask);
164 ret = -ETIMEDOUT;
165 }
166 spin_unlock_irqrestore(&mdp_lock, irq_flags);
167
168 return ret;
169}
170
171void mdp_dma_wait(struct mdp_device *mdp_dev)
172{
173#define MDP_MAX_TIMEOUTS 20
174 static int timeout_count;
175 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
176
177 if (mdp_wait(mdp, DL0_DMA2_TERM_DONE, &mdp_dma2_waitqueue) == -ETIMEDOUT)
178 timeout_count++;
179 else
180 timeout_count = 0;
181
182 if (timeout_count > MDP_MAX_TIMEOUTS) {
183 printk(KERN_ERR "mdp: dma failed %d times, somethings wrong!\n",
184 MDP_MAX_TIMEOUTS);
185 BUG();
186 }
187}
188
189static int mdp_ppp_wait(struct mdp_info *mdp)
190{
191 return mdp_wait(mdp, DL0_ROI_DONE, &mdp_ppp_waitqueue);
192}
193
194void mdp_dma_to_mddi(struct mdp_info *mdp, uint32_t addr, uint32_t stride,
195 uint32_t width, uint32_t height, uint32_t x, uint32_t y,
196 struct msmfb_callback *callback)
197{
198 uint32_t dma2_cfg;
199 uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */
200
201 if (enable_mdp_irq(mdp, DL0_DMA2_TERM_DONE)) {
202 printk(KERN_ERR "mdp_dma_to_mddi: busy\n");
203 return;
204 }
205
206 dma_callback = callback;
207
208 dma2_cfg = DMA_PACK_TIGHT |
209 DMA_PACK_ALIGN_LSB |
210 DMA_PACK_PATTERN_RGB |
211 DMA_OUT_SEL_AHB |
212 DMA_IBUF_NONCONTIGUOUS;
213
214 dma2_cfg |= DMA_IBUF_FORMAT_RGB565;
215
216 dma2_cfg |= DMA_OUT_SEL_MDDI;
217
218 dma2_cfg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY;
219
220 dma2_cfg |= DMA_DITHER_EN;
221
222 /* setup size, address, and stride */
223 mdp_writel(mdp, (height << 16) | (width),
224 MDP_CMD_DEBUG_ACCESS_BASE + 0x0184);
225 mdp_writel(mdp, addr, MDP_CMD_DEBUG_ACCESS_BASE + 0x0188);
226 mdp_writel(mdp, stride, MDP_CMD_DEBUG_ACCESS_BASE + 0x018C);
227
228 /* 666 18BPP */
229 dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
230
231 /* set y & x offset and MDDI transaction parameters */
232 mdp_writel(mdp, (y << 16) | (x), MDP_CMD_DEBUG_ACCESS_BASE + 0x0194);
233 mdp_writel(mdp, ld_param, MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0);
234 mdp_writel(mdp, (MDDI_VDO_PACKET_DESC << 16) | MDDI_VDO_PACKET_PRIM,
235 MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4);
236
237 mdp_writel(mdp, dma2_cfg, MDP_CMD_DEBUG_ACCESS_BASE + 0x0180);
238
239 /* start DMA2 */
240 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0044);
241}
242
243void mdp_dma(struct mdp_device *mdp_dev, uint32_t addr, uint32_t stride,
244 uint32_t width, uint32_t height, uint32_t x, uint32_t y,
245 struct msmfb_callback *callback, int interface)
246{
247 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
248
249 if (interface == MSM_MDDI_PMDH_INTERFACE) {
250 mdp_dma_to_mddi(mdp, addr, stride, width, height, x, y,
251 callback);
252 }
253}
254
255int get_img(struct mdp_img *img, struct fb_info *info,
256 unsigned long *start, unsigned long *len,
257 struct file **filep)
258{
259 int put_needed, ret = 0;
260 struct file *file;
261 unsigned long vstart;
262
d480ace0
PM
263 file = fget_light(img->memory_id, &put_needed);
264 if (file == NULL)
265 return -1;
266
267 if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
268 *start = info->fix.smem_start;
269 *len = info->fix.smem_len;
270 } else
271 ret = -1;
272 fput_light(file, put_needed);
273
274 return ret;
275}
276
277void put_img(struct file *src_file, struct file *dst_file)
278{
d480ace0
PM
279}
280
281int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb,
282 struct mdp_blit_req *req)
283{
284 int ret;
285 unsigned long src_start = 0, src_len = 0, dst_start = 0, dst_len = 0;
286 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
287 struct file *src_file = 0, *dst_file = 0;
288
289 /* WORKAROUND FOR HARDWARE BUG IN BG TILE FETCH */
290 if (unlikely(req->src_rect.h == 0 ||
291 req->src_rect.w == 0)) {
292 printk(KERN_ERR "mpd_ppp: src img of zero size!\n");
293 return -EINVAL;
294 }
295 if (unlikely(req->dst_rect.h == 0 ||
296 req->dst_rect.w == 0))
297 return -EINVAL;
298
299 /* do this first so that if this fails, the caller can always
300 * safely call put_img */
301 if (unlikely(get_img(&req->src, fb, &src_start, &src_len, &src_file))) {
302 printk(KERN_ERR "mpd_ppp: could not retrieve src image from "
303 "memory\n");
304 return -EINVAL;
305 }
306
307 if (unlikely(get_img(&req->dst, fb, &dst_start, &dst_len, &dst_file))) {
308 printk(KERN_ERR "mpd_ppp: could not retrieve dst image from "
309 "memory\n");
d480ace0
PM
310 return -EINVAL;
311 }
312 mutex_lock(&mdp_mutex);
313
314 /* transp_masking unimplemented */
315 req->transp_mask = MDP_TRANSP_NOP;
316 if (unlikely((req->transp_mask != MDP_TRANSP_NOP ||
317 req->alpha != MDP_ALPHA_NOP ||
318 HAS_ALPHA(req->src.format)) &&
319 (req->flags & MDP_ROT_90 &&
320 req->dst_rect.w <= 16 && req->dst_rect.h >= 16))) {
321 int i;
322 unsigned int tiles = req->dst_rect.h / 16;
323 unsigned int remainder = req->dst_rect.h % 16;
324 req->src_rect.w = 16*req->src_rect.w / req->dst_rect.h;
325 req->dst_rect.h = 16;
326 for (i = 0; i < tiles; i++) {
327 enable_mdp_irq(mdp, DL0_ROI_DONE);
328 ret = mdp_ppp_blit(mdp, req, src_file, src_start,
329 src_len, dst_file, dst_start,
330 dst_len);
331 if (ret)
332 goto err_bad_blit;
333 ret = mdp_ppp_wait(mdp);
334 if (ret)
335 goto err_wait_failed;
336 req->dst_rect.y += 16;
337 req->src_rect.x += req->src_rect.w;
338 }
339 if (!remainder)
340 goto end;
341 req->src_rect.w = remainder*req->src_rect.w / req->dst_rect.h;
342 req->dst_rect.h = remainder;
343 }
344 enable_mdp_irq(mdp, DL0_ROI_DONE);
345 ret = mdp_ppp_blit(mdp, req, src_file, src_start, src_len, dst_file,
346 dst_start,
347 dst_len);
348 if (ret)
349 goto err_bad_blit;
350 ret = mdp_ppp_wait(mdp);
351 if (ret)
352 goto err_wait_failed;
353end:
354 put_img(src_file, dst_file);
355 mutex_unlock(&mdp_mutex);
356 return 0;
357err_bad_blit:
358 disable_mdp_irq(mdp, DL0_ROI_DONE);
359err_wait_failed:
360 put_img(src_file, dst_file);
361 mutex_unlock(&mdp_mutex);
362 return ret;
363}
364
365void mdp_set_grp_disp(struct mdp_device *mdp_dev, unsigned disp_id)
366{
367 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
368
369 disp_id &= 0xf;
370 mdp_writel(mdp, disp_id, MDP_FULL_BYPASS_WORD43);
371}
372
373int register_mdp_client(struct class_interface *cint)
374{
375 if (!mdp_class) {
376 pr_err("mdp: no mdp_class when registering mdp client\n");
377 return -ENODEV;
378 }
379 cint->class = mdp_class;
380 return class_interface_register(cint);
381}
382
383#include "mdp_csc_table.h"
384#include "mdp_scale_tables.h"
385
386int mdp_probe(struct platform_device *pdev)
387{
388 struct resource *resource;
389 int ret;
390 int n;
391 struct mdp_info *mdp;
392
393 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
394 if (!resource) {
395 pr_err("mdp: can not get mdp mem resource!\n");
396 return -ENOMEM;
397 }
398
399 mdp = kzalloc(sizeof(struct mdp_info), GFP_KERNEL);
400 if (!mdp)
401 return -ENOMEM;
402
403 mdp->irq = platform_get_irq(pdev, 0);
404 if (mdp->irq < 0) {
405 pr_err("mdp: can not get mdp irq\n");
406 ret = mdp->irq;
407 goto error_get_irq;
408 }
409
410 mdp->base = ioremap(resource->start,
411 resource->end - resource->start);
412 if (mdp->base == 0) {
413 printk(KERN_ERR "msmfb: cannot allocate mdp regs!\n");
414 ret = -ENOMEM;
415 goto error_ioremap;
416 }
417
418 mdp->mdp_dev.dma = mdp_dma;
419 mdp->mdp_dev.dma_wait = mdp_dma_wait;
420 mdp->mdp_dev.blit = mdp_blit;
421 mdp->mdp_dev.set_grp_disp = mdp_set_grp_disp;
422
423 clk = clk_get(&pdev->dev, "mdp_clk");
424 if (IS_ERR(clk)) {
425 printk(KERN_INFO "mdp: failed to get mdp clk");
426 return PTR_ERR(clk);
427 }
428
429 ret = request_irq(mdp->irq, mdp_isr, IRQF_DISABLED, "msm_mdp", mdp);
430 if (ret)
431 goto error_request_irq;
432 disable_irq(mdp->irq);
433 mdp_irq_mask = 0;
434
435 /* debug interface write access */
436 mdp_writel(mdp, 1, 0x60);
437
438 mdp_writel(mdp, MDP_ANY_INTR_MASK, MDP_INTR_ENABLE);
439 mdp_writel(mdp, 1, MDP_EBI2_PORTMAP_MODE);
440
441 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8);
442 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc);
443
444 for (n = 0; n < ARRAY_SIZE(csc_table); n++)
445 mdp_writel(mdp, csc_table[n].val, csc_table[n].reg);
446
447 /* clear up unused fg/main registers */
448 /* comp.plane 2&3 ystride */
449 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0120);
450
451 /* unpacked pattern */
452 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x012c);
453 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0130);
454 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0134);
455 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0158);
456 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x015c);
457 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0160);
458 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0170);
459 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0174);
460 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x017c);
461
462 /* comp.plane 2 & 3 */
463 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0114);
464 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0118);
465
466 /* clear unused bg registers */
467 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8);
468 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0);
469 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc);
470 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0);
471 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4);
472
473 for (n = 0; n < ARRAY_SIZE(mdp_upscale_table); n++)
474 mdp_writel(mdp, mdp_upscale_table[n].val,
475 mdp_upscale_table[n].reg);
476
477 for (n = 0; n < 9; n++)
478 mdp_writel(mdp, mdp_default_ccs[n], 0x40440 + 4 * n);
479 mdp_writel(mdp, mdp_default_ccs[9], 0x40500 + 4 * 0);
480 mdp_writel(mdp, mdp_default_ccs[10], 0x40500 + 4 * 0);
481 mdp_writel(mdp, mdp_default_ccs[11], 0x40500 + 4 * 0);
482
483 /* register mdp device */
484 mdp->mdp_dev.dev.parent = &pdev->dev;
485 mdp->mdp_dev.dev.class = mdp_class;
d480ace0
PM
486
487 /* if you can remove the platform device you'd have to implement
488 * this:
489 mdp_dev.release = mdp_class; */
490
491 ret = device_register(&mdp->mdp_dev.dev);
492 if (ret)
493 goto error_device_register;
494 return 0;
495
496error_device_register:
497 free_irq(mdp->irq, mdp);
498error_request_irq:
499 iounmap(mdp->base);
500error_get_irq:
501error_ioremap:
502 kfree(mdp);
503 return ret;
504}
505
506static struct platform_driver msm_mdp_driver = {
507 .probe = mdp_probe,
508 .driver = {.name = "msm_mdp"},
509};
510
511static int __init mdp_init(void)
512{
513 mdp_class = class_create(THIS_MODULE, "msm_mdp");
514 if (IS_ERR(mdp_class)) {
515 printk(KERN_ERR "Error creating mdp class\n");
516 return PTR_ERR(mdp_class);
517 }
518 return platform_driver_register(&msm_mdp_driver);
519}
520
521subsys_initcall(mdp_init);