Commit | Line | Data |
---|---|---|
9d200153 SM |
1 | /* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved. |
2 | * | |
3 | * This program is free software; you can redistribute it and/or modify | |
4 | * it under the terms of the GNU General Public License version 2 and | |
5 | * only version 2 as published by the Free Software Foundation. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, | |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | * GNU General Public License for more details. | |
11 | * | |
12 | * You should have received a copy of the GNU General Public License | |
13 | * along with this program; if not, write to the Free Software | |
14 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | |
15 | * 02110-1301, USA. | |
16 | */ | |
17 | ||
18 | #include <linux/module.h> | |
19 | #include <linux/kernel.h> | |
20 | #include <linux/sched.h> | |
21 | #include <linux/time.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/hrtimer.h> | |
26 | #include <linux/clk.h> | |
27 | #include <mach/hardware.h> | |
28 | #include <linux/io.h> | |
29 | #include <linux/debugfs.h> | |
30 | #include <linux/delay.h> | |
31 | #include <linux/mutex.h> | |
32 | ||
33 | #include <asm/system.h> | |
34 | #include <asm/mach-types.h> | |
35 | #include <linux/semaphore.h> | |
36 | #include <linux/uaccess.h> | |
37 | ||
38 | #include "mdp.h" | |
39 | #include "msm_fb.h" | |
40 | #ifdef CONFIG_FB_MSM_MDP40 | |
41 | #include "mdp4.h" | |
42 | #endif | |
43 | ||
44 | static struct clk *mdp_clk; | |
45 | static struct clk *mdp_pclk; | |
46 | ||
47 | struct completion mdp_ppp_comp; | |
48 | struct semaphore mdp_ppp_mutex; | |
49 | struct semaphore mdp_pipe_ctrl_mutex; | |
50 | ||
51 | unsigned long mdp_timer_duration = (HZ); /* 1 sec */ | |
52 | /* unsigned long mdp_mdp_timer_duration=0; */ | |
53 | ||
54 | boolean mdp_ppp_waiting = FALSE; | |
55 | uint32 mdp_tv_underflow_cnt; | |
56 | uint32 mdp_lcdc_underflow_cnt; | |
57 | ||
58 | boolean mdp_current_clk_on = FALSE; | |
59 | boolean mdp_is_in_isr = FALSE; | |
60 | ||
61 | /* | |
62 | * legacy mdp_in_processing is only for DMA2-MDDI | |
63 | * this applies to DMA2 block only | |
64 | */ | |
65 | uint32 mdp_in_processing = FALSE; | |
66 | ||
67 | #ifdef CONFIG_FB_MSM_MDP40 | |
68 | uint32 mdp_intr_mask = MDP4_ANY_INTR_MASK; | |
69 | #else | |
70 | uint32 mdp_intr_mask = MDP_ANY_INTR_MASK; | |
71 | #endif | |
72 | ||
73 | MDP_BLOCK_TYPE mdp_debug[MDP_MAX_BLOCK]; | |
74 | ||
75 | int32 mdp_block_power_cnt[MDP_MAX_BLOCK]; | |
76 | ||
77 | spinlock_t mdp_spin_lock; | |
78 | struct workqueue_struct *mdp_dma_wq; /*mdp dma wq */ | |
79 | struct workqueue_struct *mdp_vsync_wq; /*mdp vsync wq */ | |
80 | ||
81 | static struct workqueue_struct *mdp_pipe_ctrl_wq; /* mdp mdp pipe ctrl wq */ | |
82 | static struct delayed_work mdp_pipe_ctrl_worker; | |
83 | ||
84 | #ifdef CONFIG_FB_MSM_MDP40 | |
85 | struct mdp_dma_data dma2_data; | |
86 | struct mdp_dma_data dma_s_data; | |
87 | struct mdp_dma_data dma_e_data; | |
88 | #else | |
89 | static struct mdp_dma_data dma2_data; | |
90 | static struct mdp_dma_data dma_s_data; | |
91 | static struct mdp_dma_data dma_e_data; | |
92 | #endif | |
93 | static struct mdp_dma_data dma3_data; | |
94 | ||
95 | extern ktime_t mdp_dma2_last_update_time; | |
96 | ||
97 | extern uint32 mdp_dma2_update_time_in_usec; | |
98 | extern int mdp_lcd_rd_cnt_offset_slow; | |
99 | extern int mdp_lcd_rd_cnt_offset_fast; | |
100 | extern int mdp_usec_diff_threshold; | |
101 | ||
102 | #ifdef CONFIG_FB_MSM_LCDC | |
103 | extern int mdp_lcdc_pclk_clk_rate; | |
104 | extern int mdp_lcdc_pad_pclk_clk_rate; | |
105 | extern int first_pixel_start_x; | |
106 | extern int first_pixel_start_y; | |
107 | #endif | |
108 | ||
109 | #ifdef MSM_FB_ENABLE_DBGFS | |
110 | struct dentry *mdp_dir; | |
111 | #endif | |
112 | ||
113 | #if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND) | |
114 | static int mdp_suspend(struct platform_device *pdev, pm_message_t state); | |
115 | #else | |
116 | #define mdp_suspend NULL | |
117 | #endif | |
118 | ||
119 | struct timeval mdp_dma2_timeval; | |
120 | struct timeval mdp_ppp_timeval; | |
121 | ||
122 | #ifdef CONFIG_HAS_EARLYSUSPEND | |
123 | static struct early_suspend early_suspend; | |
124 | #endif | |
125 | ||
126 | #ifndef CONFIG_FB_MSM_MDP22 | |
127 | DEFINE_MUTEX(mdp_lut_push_sem); | |
128 | static int mdp_lut_i; | |
129 | static int mdp_lut_hw_update(struct fb_cmap *cmap) | |
130 | { | |
131 | int i; | |
132 | u16 *c[3]; | |
133 | u16 r, g, b; | |
134 | ||
135 | c[0] = cmap->green; | |
136 | c[1] = cmap->blue; | |
137 | c[2] = cmap->red; | |
138 | ||
139 | for (i = 0; i < cmap->len; i++) { | |
140 | if (copy_from_user(&r, cmap->red++, sizeof(r)) || | |
141 | copy_from_user(&g, cmap->green++, sizeof(g)) || | |
142 | copy_from_user(&b, cmap->blue++, sizeof(b))) | |
143 | return -EFAULT; | |
144 | ||
145 | #ifdef CONFIG_FB_MSM_MDP40 | |
146 | MDP_OUTP(MDP_BASE + 0x94800 + | |
147 | #else | |
148 | MDP_OUTP(MDP_BASE + 0x93800 + | |
149 | #endif | |
150 | (0x400*mdp_lut_i) + cmap->start*4 + i*4, | |
151 | ((g & 0xff) | | |
152 | ((b & 0xff) << 8) | | |
153 | ((r & 0xff) << 16))); | |
154 | } | |
155 | ||
156 | return 0; | |
157 | } | |
158 | ||
159 | static int mdp_lut_push; | |
160 | static int mdp_lut_push_i; | |
161 | static int mdp_lut_update_nonlcdc(struct fb_info *info, struct fb_cmap *cmap) | |
162 | { | |
163 | int ret; | |
164 | ||
165 | mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); | |
166 | ret = mdp_lut_hw_update(cmap); | |
167 | mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); | |
168 | ||
169 | if (ret) | |
170 | return ret; | |
171 | ||
172 | mutex_lock(&mdp_lut_push_sem); | |
173 | mdp_lut_push = 1; | |
174 | mdp_lut_push_i = mdp_lut_i; | |
175 | mutex_unlock(&mdp_lut_push_sem); | |
176 | ||
177 | mdp_lut_i = (mdp_lut_i + 1)%2; | |
178 | ||
179 | return 0; | |
180 | } | |
181 | ||
182 | static int mdp_lut_update_lcdc(struct fb_info *info, struct fb_cmap *cmap) | |
183 | { | |
184 | int ret; | |
185 | ||
186 | mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); | |
187 | ret = mdp_lut_hw_update(cmap); | |
188 | ||
189 | if (ret) { | |
190 | mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); | |
191 | return ret; | |
192 | } | |
193 | ||
194 | MDP_OUTP(MDP_BASE + 0x90070, (mdp_lut_i << 10) | 0x17); | |
195 | mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); | |
196 | mdp_lut_i = (mdp_lut_i + 1)%2; | |
197 | ||
198 | return 0; | |
199 | } | |
200 | ||
201 | #define MDP_HIST_MAX_BIN 32 | |
202 | static __u32 mdp_hist_r[MDP_HIST_MAX_BIN]; | |
203 | static __u32 mdp_hist_g[MDP_HIST_MAX_BIN]; | |
204 | static __u32 mdp_hist_b[MDP_HIST_MAX_BIN]; | |
205 | ||
206 | #ifdef CONFIG_FB_MSM_MDP40 | |
207 | struct mdp_histogram mdp_hist; | |
208 | struct completion mdp_hist_comp; | |
209 | #else | |
210 | static struct mdp_histogram mdp_hist; | |
211 | static struct completion mdp_hist_comp; | |
212 | #endif | |
213 | ||
214 | static int mdp_do_histogram(struct fb_info *info, struct mdp_histogram *hist) | |
215 | { | |
216 | int ret = 0; | |
217 | ||
218 | if (!hist->frame_cnt || (hist->bin_cnt == 0) || | |
219 | (hist->bin_cnt > MDP_HIST_MAX_BIN)) | |
220 | return -EINVAL; | |
221 | ||
222 | INIT_COMPLETION(mdp_hist_comp); | |
223 | ||
224 | mdp_hist.bin_cnt = hist->bin_cnt; | |
225 | mdp_hist.r = (hist->r) ? mdp_hist_r : 0; | |
226 | mdp_hist.g = (hist->g) ? mdp_hist_g : 0; | |
227 | mdp_hist.b = (hist->b) ? mdp_hist_b : 0; | |
228 | ||
229 | #ifdef CONFIG_FB_MSM_MDP40 | |
230 | MDP_OUTP(MDP_BASE + 0x95004, hist->frame_cnt); | |
231 | MDP_OUTP(MDP_BASE + 0x95000, 1); | |
232 | #else | |
233 | MDP_OUTP(MDP_BASE + 0x94004, hist->frame_cnt); | |
234 | MDP_OUTP(MDP_BASE + 0x94000, 1); | |
235 | #endif | |
236 | wait_for_completion_killable(&mdp_hist_comp); | |
237 | ||
238 | if (hist->r) { | |
239 | ret = copy_to_user(hist->r, mdp_hist.r, hist->bin_cnt*4); | |
240 | if (ret) | |
241 | goto hist_err; | |
242 | } | |
243 | if (hist->g) { | |
244 | ret = copy_to_user(hist->g, mdp_hist.g, hist->bin_cnt*4); | |
245 | if (ret) | |
246 | goto hist_err; | |
247 | } | |
248 | if (hist->b) { | |
249 | ret = copy_to_user(hist->b, mdp_hist.b, hist->bin_cnt*4); | |
250 | if (ret) | |
251 | goto hist_err; | |
252 | } | |
253 | return 0; | |
254 | ||
255 | hist_err: | |
256 | printk(KERN_ERR "%s: invalid hist buffer\n", __func__); | |
257 | return ret; | |
258 | } | |
259 | #endif | |
260 | ||
261 | /* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */ | |
262 | ||
263 | int mdp_ppp_pipe_wait(void) | |
264 | { | |
265 | int ret = 1; | |
266 | ||
267 | /* wait 5 seconds for the operation to complete before declaring | |
268 | the MDP hung */ | |
269 | ||
270 | if (mdp_ppp_waiting == TRUE) { | |
271 | ret = wait_for_completion_interruptible_timeout(&mdp_ppp_comp, | |
272 | 5 * HZ); | |
273 | ||
274 | if (!ret) | |
275 | printk(KERN_ERR "%s: Timed out waiting for the MDP.\n", | |
276 | __func__); | |
277 | } | |
278 | ||
279 | return ret; | |
280 | } | |
281 | ||
282 | static DEFINE_SPINLOCK(mdp_lock); | |
283 | static int mdp_irq_mask; | |
284 | static int mdp_irq_enabled; | |
285 | ||
286 | void mdp_enable_irq(uint32 term) | |
287 | { | |
288 | unsigned long irq_flags; | |
289 | ||
290 | spin_lock_irqsave(&mdp_lock, irq_flags); | |
291 | if (mdp_irq_mask & term) { | |
292 | printk(KERN_ERR "MDP IRQ term-0x%x is already set\n", term); | |
293 | } else { | |
294 | mdp_irq_mask |= term; | |
295 | if (mdp_irq_mask && !mdp_irq_enabled) { | |
296 | mdp_irq_enabled = 1; | |
297 | enable_irq(INT_MDP); | |
298 | } | |
299 | } | |
300 | spin_unlock_irqrestore(&mdp_lock, irq_flags); | |
301 | } | |
302 | ||
303 | void mdp_disable_irq(uint32 term) | |
304 | { | |
305 | unsigned long irq_flags; | |
306 | ||
307 | spin_lock_irqsave(&mdp_lock, irq_flags); | |
308 | if (!(mdp_irq_mask & term)) { | |
309 | printk(KERN_ERR "MDP IRQ term-0x%x is not set\n", term); | |
310 | } else { | |
311 | mdp_irq_mask &= ~term; | |
312 | if (!mdp_irq_mask && mdp_irq_enabled) { | |
313 | mdp_irq_enabled = 0; | |
314 | disable_irq(INT_MDP); | |
315 | } | |
316 | } | |
317 | spin_unlock_irqrestore(&mdp_lock, irq_flags); | |
318 | } | |
319 | ||
320 | void mdp_disable_irq_nolock(uint32 term) | |
321 | { | |
322 | ||
323 | if (!(mdp_irq_mask & term)) { | |
324 | printk(KERN_ERR "MDP IRQ term-0x%x is not set\n", term); | |
325 | } else { | |
326 | mdp_irq_mask &= ~term; | |
327 | if (!mdp_irq_mask && mdp_irq_enabled) { | |
328 | mdp_irq_enabled = 0; | |
329 | disable_irq(INT_MDP); | |
330 | } | |
331 | } | |
332 | } | |
333 | ||
334 | void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd) | |
335 | { | |
336 | ||
337 | dmb(); /* memory barrier */ | |
338 | ||
339 | /* kick off PPP engine */ | |
340 | if (term == MDP_PPP_TERM) { | |
341 | if (mdp_debug[MDP_PPP_BLOCK]) | |
342 | jiffies_to_timeval(jiffies, &mdp_ppp_timeval); | |
343 | ||
344 | /* let's turn on PPP block */ | |
345 | mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE); | |
346 | ||
347 | mdp_enable_irq(term); | |
348 | INIT_COMPLETION(mdp_ppp_comp); | |
349 | mdp_ppp_waiting = TRUE; | |
350 | outpdw(MDP_BASE + 0x30, 0x1000); | |
351 | wait_for_completion_killable(&mdp_ppp_comp); | |
352 | mdp_disable_irq(term); | |
353 | ||
354 | if (mdp_debug[MDP_PPP_BLOCK]) { | |
355 | struct timeval now; | |
356 | ||
357 | jiffies_to_timeval(jiffies, &now); | |
358 | mdp_ppp_timeval.tv_usec = | |
359 | now.tv_usec - mdp_ppp_timeval.tv_usec; | |
360 | MSM_FB_INFO("MDP-PPP: %d\n", | |
361 | (int)mdp_ppp_timeval.tv_usec); | |
362 | } | |
363 | } else if (term == MDP_DMA2_TERM) { | |
364 | if (mdp_debug[MDP_DMA2_BLOCK]) { | |
365 | MSM_FB_INFO("MDP-DMA2: %d\n", | |
366 | (int)mdp_dma2_timeval.tv_usec); | |
367 | jiffies_to_timeval(jiffies, &mdp_dma2_timeval); | |
368 | } | |
369 | /* DMA update timestamp */ | |
370 | mdp_dma2_last_update_time = ktime_get_real(); | |
371 | /* let's turn on DMA2 block */ | |
372 | #if 0 | |
373 | mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE); | |
374 | #endif | |
375 | #ifdef CONFIG_FB_MSM_MDP22 | |
376 | outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */ | |
377 | #else | |
378 | if (mdp_lut_push) { | |
379 | mutex_lock(&mdp_lut_push_sem); | |
380 | mdp_lut_push = 0; | |
381 | MDP_OUTP(MDP_BASE + 0x90070, | |
382 | (mdp_lut_push_i << 10) | 0x17); | |
383 | mutex_unlock(&mdp_lut_push_sem); | |
384 | } | |
385 | #ifdef CONFIG_FB_MSM_MDP40 | |
386 | outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */ | |
387 | #else | |
388 | outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */ | |
389 | #endif | |
390 | #endif | |
391 | #ifdef CONFIG_FB_MSM_MDP40 | |
392 | } else if (term == MDP_DMA_S_TERM) { | |
393 | mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE); | |
394 | outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */ | |
395 | } else if (term == MDP_DMA_E_TERM) { | |
396 | mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE); | |
397 | outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */ | |
398 | } else if (term == MDP_OVERLAY0_TERM) { | |
399 | mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE); | |
400 | outpdw(MDP_BASE + 0x0004, 0); | |
401 | } else if (term == MDP_OVERLAY1_TERM) { | |
402 | mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE); | |
403 | outpdw(MDP_BASE + 0x0008, 0); | |
404 | } | |
405 | #else | |
406 | } else if (term == MDP_DMA_S_TERM) { | |
407 | mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE); | |
408 | outpdw(MDP_BASE + 0x0048, 0x0); /* start DMA */ | |
409 | } | |
410 | #endif | |
411 | } | |
412 | ||
413 | static void mdp_pipe_ctrl_workqueue_handler(struct work_struct *work) | |
414 | { | |
415 | mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); | |
416 | } | |
417 | ||
418 | void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state, | |
419 | boolean isr) | |
420 | { | |
421 | boolean mdp_all_blocks_off = TRUE; | |
422 | int i; | |
423 | unsigned long flag; | |
424 | ||
425 | spin_lock_irqsave(&mdp_spin_lock, flag); | |
426 | if (MDP_BLOCK_POWER_ON == state) { | |
427 | mdp_block_power_cnt[block]++; | |
428 | ||
429 | if (MDP_DMA2_BLOCK == block) | |
430 | mdp_in_processing = TRUE; | |
431 | } else { | |
432 | mdp_block_power_cnt[block]--; | |
433 | ||
434 | if (mdp_block_power_cnt[block] < 0) { | |
435 | /* | |
436 | * Master has to serve a request to power off MDP always | |
437 | * It also has a timer to power off. So, in case of | |
438 | * timer expires first and DMA2 finishes later, | |
439 | * master has to power off two times | |
440 | * There shouldn't be multiple power-off request for | |
441 | * other blocks | |
442 | */ | |
443 | if (block != MDP_MASTER_BLOCK) { | |
444 | MSM_FB_INFO("mdp_block_power_cnt[block=%d] \ | |
445 | multiple power-off request\n", block); | |
446 | } | |
447 | mdp_block_power_cnt[block] = 0; | |
448 | } | |
449 | ||
450 | if (MDP_DMA2_BLOCK == block) | |
451 | mdp_in_processing = FALSE; | |
452 | } | |
453 | spin_unlock_irqrestore(&mdp_spin_lock, flag); | |
454 | ||
455 | /* | |
456 | * If it's in isr, we send our request to workqueue. | |
457 | * Otherwise, processing happens in the current context | |
458 | */ | |
459 | if (isr) { | |
460 | /* checking all blocks power state */ | |
461 | for (i = 0; i < MDP_MAX_BLOCK; i++) { | |
462 | if (mdp_block_power_cnt[i] > 0) | |
463 | mdp_all_blocks_off = FALSE; | |
464 | } | |
465 | ||
466 | if ((mdp_all_blocks_off) && (mdp_current_clk_on)) { | |
467 | /* send workqueue to turn off mdp power */ | |
468 | queue_delayed_work(mdp_pipe_ctrl_wq, | |
469 | &mdp_pipe_ctrl_worker, | |
470 | mdp_timer_duration); | |
471 | } | |
472 | } else { | |
473 | down(&mdp_pipe_ctrl_mutex); | |
474 | /* checking all blocks power state */ | |
475 | for (i = 0; i < MDP_MAX_BLOCK; i++) { | |
476 | if (mdp_block_power_cnt[i] > 0) | |
477 | mdp_all_blocks_off = FALSE; | |
478 | } | |
479 | ||
480 | /* | |
481 | * find out whether a delayable work item is currently | |
482 | * pending | |
483 | */ | |
484 | ||
485 | if (delayed_work_pending(&mdp_pipe_ctrl_worker)) { | |
486 | /* | |
487 | * try to cancel the current work if it fails to | |
488 | * stop (which means del_timer can't delete it | |
489 | * from the list, it's about to expire and run), | |
490 | * we have to let it run. queue_delayed_work won't | |
491 | * accept the next job which is same as | |
492 | * queue_delayed_work(mdp_timer_duration = 0) | |
493 | */ | |
494 | cancel_delayed_work(&mdp_pipe_ctrl_worker); | |
495 | } | |
496 | ||
497 | if ((mdp_all_blocks_off) && (mdp_current_clk_on)) { | |
498 | if (block == MDP_MASTER_BLOCK) { | |
499 | mdp_current_clk_on = FALSE; | |
500 | /* turn off MDP clks */ | |
501 | if (mdp_clk != NULL) { | |
502 | clk_disable(mdp_clk); | |
503 | MSM_FB_DEBUG("MDP CLK OFF\n"); | |
504 | } | |
505 | if (mdp_pclk != NULL) { | |
506 | clk_disable(mdp_pclk); | |
507 | MSM_FB_DEBUG("MDP PCLK OFF\n"); | |
508 | } | |
509 | } else { | |
510 | /* send workqueue to turn off mdp power */ | |
511 | queue_delayed_work(mdp_pipe_ctrl_wq, | |
512 | &mdp_pipe_ctrl_worker, | |
513 | mdp_timer_duration); | |
514 | } | |
515 | } else if ((!mdp_all_blocks_off) && (!mdp_current_clk_on)) { | |
516 | mdp_current_clk_on = TRUE; | |
517 | /* turn on MDP clks */ | |
518 | if (mdp_clk != NULL) { | |
519 | clk_enable(mdp_clk); | |
520 | MSM_FB_DEBUG("MDP CLK ON\n"); | |
521 | } | |
522 | if (mdp_pclk != NULL) { | |
523 | clk_enable(mdp_pclk); | |
524 | MSM_FB_DEBUG("MDP PCLK ON\n"); | |
525 | } | |
526 | } | |
527 | up(&mdp_pipe_ctrl_mutex); | |
528 | } | |
529 | } | |
530 | ||
531 | #ifndef CONFIG_FB_MSM_MDP40 | |
532 | irqreturn_t mdp_isr(int irq, void *ptr) | |
533 | { | |
534 | uint32 mdp_interrupt = 0; | |
535 | struct mdp_dma_data *dma; | |
536 | ||
537 | mdp_is_in_isr = TRUE; | |
538 | do { | |
539 | mdp_interrupt = inp32(MDP_INTR_STATUS); | |
540 | outp32(MDP_INTR_CLEAR, mdp_interrupt); | |
541 | ||
542 | mdp_interrupt &= mdp_intr_mask; | |
543 | ||
544 | if (mdp_interrupt & TV_ENC_UNDERRUN) { | |
545 | mdp_interrupt &= ~(TV_ENC_UNDERRUN); | |
546 | mdp_tv_underflow_cnt++; | |
547 | } | |
548 | ||
549 | if (!mdp_interrupt) | |
550 | break; | |
551 | ||
552 | /* DMA3 TV-Out Start */ | |
553 | if (mdp_interrupt & TV_OUT_DMA3_START) { | |
554 | /* let's disable TV out interrupt */ | |
555 | mdp_intr_mask &= ~TV_OUT_DMA3_START; | |
556 | outp32(MDP_INTR_ENABLE, mdp_intr_mask); | |
557 | ||
558 | dma = &dma3_data; | |
559 | if (dma->waiting) { | |
560 | dma->waiting = FALSE; | |
561 | complete(&dma->comp); | |
562 | } | |
563 | } | |
564 | #ifndef CONFIG_FB_MSM_MDP22 | |
565 | if (mdp_interrupt & MDP_HIST_DONE) { | |
566 | outp32(MDP_BASE + 0x94018, 0x3); | |
567 | outp32(MDP_INTR_CLEAR, MDP_HIST_DONE); | |
568 | if (mdp_hist.r) | |
569 | memcpy(mdp_hist.r, MDP_BASE + 0x94100, | |
570 | mdp_hist.bin_cnt*4); | |
571 | if (mdp_hist.g) | |
572 | memcpy(mdp_hist.g, MDP_BASE + 0x94200, | |
573 | mdp_hist.bin_cnt*4); | |
574 | if (mdp_hist.b) | |
575 | memcpy(mdp_hist.b, MDP_BASE + 0x94300, | |
576 | mdp_hist.bin_cnt*4); | |
577 | complete(&mdp_hist_comp); | |
578 | } | |
579 | ||
580 | /* LCDC UnderFlow */ | |
581 | if (mdp_interrupt & LCDC_UNDERFLOW) { | |
582 | mdp_lcdc_underflow_cnt++; | |
583 | } | |
584 | /* LCDC Frame Start */ | |
585 | if (mdp_interrupt & LCDC_FRAME_START) { | |
586 | /* let's disable LCDC interrupt */ | |
587 | mdp_intr_mask &= ~LCDC_FRAME_START; | |
588 | outp32(MDP_INTR_ENABLE, mdp_intr_mask); | |
589 | ||
590 | dma = &dma2_data; | |
591 | if (dma->waiting) { | |
592 | dma->waiting = FALSE; | |
593 | complete(&dma->comp); | |
594 | } | |
595 | } | |
596 | ||
597 | /* DMA2 LCD-Out Complete */ | |
598 | if (mdp_interrupt & MDP_DMA_S_DONE) { | |
599 | dma = &dma_s_data; | |
600 | dma->busy = FALSE; | |
601 | mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_OFF, | |
602 | TRUE); | |
603 | complete(&dma->comp); | |
604 | } | |
605 | #endif | |
606 | ||
607 | /* DMA2 LCD-Out Complete */ | |
608 | if (mdp_interrupt & MDP_DMA_P_DONE) { | |
609 | struct timeval now; | |
610 | ktime_t now_k; | |
611 | ||
612 | now_k = ktime_get_real(); | |
613 | mdp_dma2_last_update_time.tv.sec = | |
614 | now_k.tv.sec - mdp_dma2_last_update_time.tv.sec; | |
615 | mdp_dma2_last_update_time.tv.nsec = | |
616 | now_k.tv.nsec - mdp_dma2_last_update_time.tv.nsec; | |
617 | ||
618 | if (mdp_debug[MDP_DMA2_BLOCK]) { | |
619 | jiffies_to_timeval(jiffies, &now); | |
620 | mdp_dma2_timeval.tv_usec = | |
621 | now.tv_usec - mdp_dma2_timeval.tv_usec; | |
622 | } | |
623 | ||
624 | dma = &dma2_data; | |
625 | dma->busy = FALSE; | |
626 | mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF, | |
627 | TRUE); | |
628 | complete(&dma->comp); | |
629 | } | |
630 | /* PPP Complete */ | |
631 | if (mdp_interrupt & MDP_PPP_DONE) { | |
632 | #ifdef CONFIG_MDP_PPP_ASYNC_OP | |
633 | mdp_ppp_djob_done(); | |
634 | #else | |
635 | mdp_pipe_ctrl(MDP_PPP_BLOCK, | |
636 | MDP_BLOCK_POWER_OFF, TRUE); | |
637 | if (mdp_ppp_waiting) { | |
638 | mdp_ppp_waiting = FALSE; | |
639 | complete(&mdp_ppp_comp); | |
640 | } | |
641 | #endif | |
642 | } | |
643 | } while (1); | |
644 | ||
645 | mdp_is_in_isr = FALSE; | |
646 | ||
647 | return IRQ_HANDLED; | |
648 | } | |
649 | #endif | |
650 | ||
651 | static void mdp_drv_init(void) | |
652 | { | |
653 | int i; | |
654 | ||
655 | for (i = 0; i < MDP_MAX_BLOCK; i++) { | |
656 | mdp_debug[i] = 0; | |
657 | } | |
658 | ||
659 | /* initialize spin lock and workqueue */ | |
660 | spin_lock_init(&mdp_spin_lock); | |
661 | mdp_dma_wq = create_singlethread_workqueue("mdp_dma_wq"); | |
662 | mdp_vsync_wq = create_singlethread_workqueue("mdp_vsync_wq"); | |
663 | mdp_pipe_ctrl_wq = create_singlethread_workqueue("mdp_pipe_ctrl_wq"); | |
664 | INIT_DELAYED_WORK(&mdp_pipe_ctrl_worker, | |
665 | mdp_pipe_ctrl_workqueue_handler); | |
666 | #ifdef CONFIG_MDP_PPP_ASYNC_OP | |
667 | mdp_ppp_dq_init(); | |
668 | #endif | |
669 | ||
670 | /* initialize semaphore */ | |
671 | init_completion(&mdp_ppp_comp); | |
672 | init_MUTEX(&mdp_ppp_mutex); | |
673 | init_MUTEX(&mdp_pipe_ctrl_mutex); | |
674 | ||
675 | dma2_data.busy = FALSE; | |
676 | dma2_data.waiting = FALSE; | |
677 | init_completion(&dma2_data.comp); | |
678 | init_MUTEX(&dma2_data.mutex); | |
679 | mutex_init(&dma2_data.ov_mutex); | |
680 | ||
681 | dma3_data.busy = FALSE; | |
682 | dma3_data.waiting = FALSE; | |
683 | init_completion(&dma3_data.comp); | |
684 | init_MUTEX(&dma3_data.mutex); | |
685 | ||
686 | dma_s_data.busy = FALSE; | |
687 | dma_s_data.waiting = FALSE; | |
688 | init_completion(&dma_s_data.comp); | |
689 | init_MUTEX(&dma_s_data.mutex); | |
690 | ||
691 | dma_e_data.busy = FALSE; | |
692 | dma_e_data.waiting = FALSE; | |
693 | init_completion(&dma_e_data.comp); | |
694 | ||
695 | #ifndef CONFIG_FB_MSM_MDP22 | |
696 | init_completion(&mdp_hist_comp); | |
697 | #endif | |
698 | ||
699 | /* initializing mdp power block counter to 0 */ | |
700 | for (i = 0; i < MDP_MAX_BLOCK; i++) { | |
701 | mdp_block_power_cnt[i] = 0; | |
702 | } | |
703 | ||
704 | #ifdef MSM_FB_ENABLE_DBGFS | |
705 | { | |
706 | struct dentry *root; | |
707 | char sub_name[] = "mdp"; | |
708 | ||
709 | root = msm_fb_get_debugfs_root(); | |
710 | if (root != NULL) { | |
711 | mdp_dir = debugfs_create_dir(sub_name, root); | |
712 | ||
713 | if (mdp_dir) { | |
714 | msm_fb_debugfs_file_create(mdp_dir, | |
715 | "dma2_update_time_in_usec", | |
716 | (u32 *) &mdp_dma2_update_time_in_usec); | |
717 | msm_fb_debugfs_file_create(mdp_dir, | |
718 | "vs_rdcnt_slow", | |
719 | (u32 *) &mdp_lcd_rd_cnt_offset_slow); | |
720 | msm_fb_debugfs_file_create(mdp_dir, | |
721 | "vs_rdcnt_fast", | |
722 | (u32 *) &mdp_lcd_rd_cnt_offset_fast); | |
723 | msm_fb_debugfs_file_create(mdp_dir, | |
724 | "mdp_usec_diff_threshold", | |
725 | (u32 *) &mdp_usec_diff_threshold); | |
726 | msm_fb_debugfs_file_create(mdp_dir, | |
727 | "mdp_current_clk_on", | |
728 | (u32 *) &mdp_current_clk_on); | |
729 | #ifdef CONFIG_FB_MSM_LCDC | |
730 | msm_fb_debugfs_file_create(mdp_dir, | |
731 | "lcdc_start_x", | |
732 | (u32 *) &first_pixel_start_x); | |
733 | msm_fb_debugfs_file_create(mdp_dir, | |
734 | "lcdc_start_y", | |
735 | (u32 *) &first_pixel_start_y); | |
736 | msm_fb_debugfs_file_create(mdp_dir, | |
737 | "mdp_lcdc_pclk_clk_rate", | |
738 | (u32 *) &mdp_lcdc_pclk_clk_rate); | |
739 | msm_fb_debugfs_file_create(mdp_dir, | |
740 | "mdp_lcdc_pad_pclk_clk_rate", | |
741 | (u32 *) &mdp_lcdc_pad_pclk_clk_rate); | |
742 | #endif | |
743 | } | |
744 | } | |
745 | } | |
746 | #endif | |
747 | } | |
748 | ||
749 | static int mdp_probe(struct platform_device *pdev); | |
750 | static int mdp_remove(struct platform_device *pdev); | |
751 | ||
752 | static struct platform_driver mdp_driver = { | |
753 | .probe = mdp_probe, | |
754 | .remove = mdp_remove, | |
755 | #ifndef CONFIG_HAS_EARLYSUSPEND | |
756 | .suspend = mdp_suspend, | |
757 | .resume = NULL, | |
758 | #endif | |
759 | .shutdown = NULL, | |
760 | .driver = { | |
761 | /* | |
762 | * Driver name must match the device name added in | |
763 | * platform.c. | |
764 | */ | |
765 | .name = "mdp", | |
766 | }, | |
767 | }; | |
768 | ||
769 | static int mdp_off(struct platform_device *pdev) | |
770 | { | |
771 | int ret = 0; | |
772 | ||
773 | #ifdef MDP_HW_VSYNC | |
774 | struct msm_fb_data_type *mfd = platform_get_drvdata(pdev); | |
775 | #endif | |
776 | ||
777 | ret = panel_next_off(pdev); | |
778 | ||
779 | #ifdef MDP_HW_VSYNC | |
780 | mdp_hw_vsync_clk_disable(mfd); | |
781 | #endif | |
782 | ||
783 | return ret; | |
784 | } | |
785 | ||
786 | static int mdp_on(struct platform_device *pdev) | |
787 | { | |
788 | #ifdef MDP_HW_VSYNC | |
789 | struct msm_fb_data_type *mfd = platform_get_drvdata(pdev); | |
790 | #endif | |
791 | ||
792 | int ret = 0; | |
793 | ||
794 | #ifdef MDP_HW_VSYNC | |
795 | mdp_hw_vsync_clk_enable(mfd); | |
796 | #endif | |
797 | ||
798 | ret = panel_next_on(pdev); | |
799 | ||
800 | return ret; | |
801 | } | |
802 | ||
803 | static int mdp_irq_clk_setup(void) | |
804 | { | |
805 | int ret; | |
806 | ||
807 | #ifdef CONFIG_FB_MSM_MDP40 | |
808 | ret = request_irq(INT_MDP, mdp4_isr, IRQF_DISABLED, "MDP", 0); | |
809 | #else | |
810 | ret = request_irq(INT_MDP, mdp_isr, IRQF_DISABLED, "MDP", 0); | |
811 | #endif | |
812 | if (ret) { | |
813 | printk(KERN_ERR "mdp request_irq() failed!\n"); | |
814 | return ret; | |
815 | } | |
816 | disable_irq(INT_MDP); | |
817 | ||
818 | mdp_clk = clk_get(NULL, "mdp_clk"); | |
819 | ||
820 | if (IS_ERR(mdp_clk)) { | |
821 | ret = PTR_ERR(mdp_clk); | |
822 | printk(KERN_ERR "can't get mdp_clk error:%d!\n", ret); | |
823 | free_irq(INT_MDP, 0); | |
824 | return ret; | |
825 | } | |
826 | ||
827 | mdp_pclk = clk_get(NULL, "mdp_pclk"); | |
828 | if (IS_ERR(mdp_pclk)) | |
829 | mdp_pclk = NULL; | |
830 | ||
831 | ||
832 | #ifdef CONFIG_FB_MSM_MDP40 | |
833 | /* | |
834 | * mdp_clk should greater than mdp_pclk always | |
835 | */ | |
836 | clk_set_rate(mdp_clk, 122880000); /* 122.88 Mhz */ | |
837 | printk(KERN_INFO "mdp_clk: mdp_clk=%d mdp_pclk=%d\n", | |
838 | (int)clk_get_rate(mdp_clk), (int)clk_get_rate(mdp_pclk)); | |
839 | #endif | |
840 | ||
841 | return 0; | |
842 | } | |
843 | ||
844 | static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST]; | |
845 | static int pdev_list_cnt; | |
846 | static int mdp_resource_initialized; | |
847 | static struct msm_panel_common_pdata *mdp_pdata; | |
848 | ||
849 | static int mdp_probe(struct platform_device *pdev) | |
850 | { | |
851 | struct platform_device *msm_fb_dev = NULL; | |
852 | struct msm_fb_data_type *mfd; | |
853 | struct msm_fb_panel_data *pdata = NULL; | |
854 | int rc; | |
855 | resource_size_t size ; | |
856 | #ifdef CONFIG_FB_MSM_MDP40 | |
857 | int intf, if_no; | |
858 | #else | |
859 | unsigned long flag; | |
860 | #endif | |
861 | ||
862 | if ((pdev->id == 0) && (pdev->num_resources > 0)) { | |
863 | mdp_pdata = pdev->dev.platform_data; | |
864 | ||
865 | size = resource_size(&pdev->resource[0]); | |
866 | msm_mdp_base = ioremap(pdev->resource[0].start, size); | |
867 | ||
868 | MSM_FB_INFO("MDP HW Base phy_Address = 0x%x virt = 0x%x\n", | |
869 | (int)pdev->resource[0].start, (int)msm_mdp_base); | |
870 | ||
871 | if (unlikely(!msm_mdp_base)) | |
872 | return -ENOMEM; | |
873 | ||
874 | printk("irq clk setup\n"); | |
875 | rc = mdp_irq_clk_setup(); | |
876 | printk("irq clk setup done\n"); | |
877 | if (rc) | |
878 | return rc; | |
879 | ||
880 | /* initializing mdp hw */ | |
881 | #ifdef CONFIG_FB_MSM_MDP40 | |
882 | mdp4_hw_init(); | |
883 | #else | |
884 | mdp_hw_init(); | |
885 | #endif | |
886 | ||
887 | mdp_resource_initialized = 1; | |
888 | return 0; | |
889 | } | |
890 | ||
891 | if (!mdp_resource_initialized) | |
892 | return -EPERM; | |
893 | ||
894 | mfd = platform_get_drvdata(pdev); | |
895 | ||
896 | if (!mfd) | |
897 | return -ENODEV; | |
898 | ||
899 | if (mfd->key != MFD_KEY) | |
900 | return -EINVAL; | |
901 | ||
902 | if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST) | |
903 | return -ENOMEM; | |
904 | ||
905 | msm_fb_dev = platform_device_alloc("msm_fb", pdev->id); | |
906 | if (!msm_fb_dev) | |
907 | return -ENOMEM; | |
908 | ||
909 | /* link to the latest pdev */ | |
910 | mfd->pdev = msm_fb_dev; | |
911 | ||
912 | /* add panel data */ | |
913 | if (platform_device_add_data | |
914 | (msm_fb_dev, pdev->dev.platform_data, | |
915 | sizeof(struct msm_fb_panel_data))) { | |
916 | printk(KERN_ERR "mdp_probe: platform_device_add_data failed!\n"); | |
917 | rc = -ENOMEM; | |
918 | goto mdp_probe_err; | |
919 | } | |
920 | /* data chain */ | |
921 | pdata = msm_fb_dev->dev.platform_data; | |
922 | pdata->on = mdp_on; | |
923 | pdata->off = mdp_off; | |
924 | pdata->next = pdev; | |
925 | ||
926 | switch (mfd->panel.type) { | |
927 | case EXT_MDDI_PANEL: | |
928 | case MDDI_PANEL: | |
929 | case EBI2_PANEL: | |
930 | INIT_WORK(&mfd->dma_update_worker, | |
931 | mdp_lcd_update_workqueue_handler); | |
932 | INIT_WORK(&mfd->vsync_resync_worker, | |
933 | mdp_vsync_resync_workqueue_handler); | |
934 | mfd->hw_refresh = FALSE; | |
935 | ||
936 | if (mfd->panel.type == EXT_MDDI_PANEL) { | |
937 | /* 15 fps -> 66 msec */ | |
938 | mfd->refresh_timer_duration = (66 * HZ / 1000); | |
939 | } else { | |
940 | /* 24 fps -> 42 msec */ | |
941 | mfd->refresh_timer_duration = (42 * HZ / 1000); | |
942 | } | |
943 | ||
944 | #ifdef CONFIG_FB_MSM_MDP22 | |
945 | mfd->dma_fnc = mdp_dma2_update; | |
946 | mfd->dma = &dma2_data; | |
947 | #else | |
948 | if (mfd->panel_info.pdest == DISPLAY_1) { | |
949 | #ifdef CONFIG_FB_MSM_OVERLAY | |
950 | mfd->dma_fnc = mdp4_mddi_overlay; | |
951 | #else | |
952 | mfd->dma_fnc = mdp_dma2_update; | |
953 | #endif | |
954 | mfd->dma = &dma2_data; | |
955 | mfd->lut_update = mdp_lut_update_nonlcdc; | |
956 | mfd->do_histogram = mdp_do_histogram; | |
957 | } else { | |
958 | mfd->dma_fnc = mdp_dma_s_update; | |
959 | mfd->dma = &dma_s_data; | |
960 | } | |
961 | #endif | |
962 | if (mdp_pdata) | |
963 | mfd->vsync_gpio = mdp_pdata->gpio; | |
964 | else | |
965 | mfd->vsync_gpio = -1; | |
966 | ||
967 | #ifdef CONFIG_FB_MSM_MDP40 | |
968 | if (mfd->panel.type == EBI2_PANEL) | |
969 | intf = EBI2_INTF; | |
970 | else | |
971 | intf = MDDI_INTF; | |
972 | ||
973 | if (mfd->panel_info.pdest == DISPLAY_1) | |
974 | if_no = PRIMARY_INTF_SEL; | |
975 | else | |
976 | if_no = SECONDARY_INTF_SEL; | |
977 | ||
978 | mdp4_display_intf_sel(if_no, intf); | |
979 | #endif | |
980 | mdp_config_vsync(mfd); | |
981 | break; | |
982 | ||
983 | case HDMI_PANEL: | |
984 | case LCDC_PANEL: | |
985 | pdata->on = mdp_lcdc_on; | |
986 | pdata->off = mdp_lcdc_off; | |
987 | mfd->hw_refresh = TRUE; | |
988 | mfd->cursor_update = mdp_hw_cursor_update; | |
989 | #ifndef CONFIG_FB_MSM_MDP22 | |
990 | mfd->lut_update = mdp_lut_update_lcdc; | |
991 | mfd->do_histogram = mdp_do_histogram; | |
992 | #endif | |
993 | #ifdef CONFIG_FB_MSM_OVERLAY | |
994 | mfd->dma_fnc = mdp4_lcdc_overlay; | |
995 | #else | |
996 | mfd->dma_fnc = mdp_lcdc_update; | |
997 | #endif | |
998 | ||
999 | #ifdef CONFIG_FB_MSM_MDP40 | |
1000 | if (mfd->panel.type == HDMI_PANEL) { | |
1001 | mfd->dma = &dma_e_data; | |
1002 | mdp4_display_intf_sel(EXTERNAL_INTF_SEL, LCDC_RGB_INTF); | |
1003 | } else { | |
1004 | mfd->dma = &dma2_data; | |
1005 | mdp4_display_intf_sel(PRIMARY_INTF_SEL, LCDC_RGB_INTF); | |
1006 | } | |
1007 | #else | |
1008 | mfd->dma = &dma2_data; | |
1009 | spin_lock_irqsave(&mdp_spin_lock, flag); | |
1010 | mdp_intr_mask &= ~MDP_DMA_P_DONE; | |
1011 | outp32(MDP_INTR_ENABLE, mdp_intr_mask); | |
1012 | spin_unlock_irqrestore(&mdp_spin_lock, flag); | |
1013 | #endif | |
1014 | break; | |
1015 | ||
1016 | case TV_PANEL: | |
1017 | pdata->on = mdp_dma3_on; | |
1018 | pdata->off = mdp_dma3_off; | |
1019 | mfd->hw_refresh = TRUE; | |
1020 | mfd->dma_fnc = mdp_dma3_update; | |
1021 | mfd->dma = &dma3_data; | |
1022 | break; | |
1023 | ||
1024 | default: | |
1025 | printk(KERN_ERR "mdp_probe: unknown device type!\n"); | |
1026 | rc = -ENODEV; | |
1027 | goto mdp_probe_err; | |
1028 | } | |
1029 | ||
1030 | /* set driver data */ | |
1031 | platform_set_drvdata(msm_fb_dev, mfd); | |
1032 | ||
1033 | rc = platform_device_add(msm_fb_dev); | |
1034 | if (rc) { | |
1035 | goto mdp_probe_err; | |
1036 | } | |
1037 | ||
1038 | pdev_list[pdev_list_cnt++] = pdev; | |
1039 | return 0; | |
1040 | ||
1041 | mdp_probe_err: | |
1042 | platform_device_put(msm_fb_dev); | |
1043 | return rc; | |
1044 | } | |
1045 | ||
1046 | static void mdp_suspend_sub(void) | |
1047 | { | |
1048 | /* cancel pipe ctrl worker */ | |
1049 | cancel_delayed_work(&mdp_pipe_ctrl_worker); | |
1050 | ||
1051 | /* for workder can't be cancelled... */ | |
1052 | flush_workqueue(mdp_pipe_ctrl_wq); | |
1053 | ||
1054 | /* let's wait for PPP completion */ | |
1055 | while (mdp_block_power_cnt[MDP_PPP_BLOCK] > 0) ; | |
1056 | ||
1057 | /* try to power down */ | |
1058 | mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); | |
1059 | } | |
1060 | ||
1061 | #if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND) | |
1062 | static int mdp_suspend(struct platform_device *pdev, pm_message_t state) | |
1063 | { | |
1064 | mdp_suspend_sub(); | |
1065 | return 0; | |
1066 | } | |
1067 | #endif | |
1068 | ||
1069 | #ifdef CONFIG_HAS_EARLYSUSPEND | |
1070 | static void mdp_early_suspend(struct early_suspend *h) | |
1071 | { | |
1072 | mdp_suspend_sub(); | |
1073 | } | |
1074 | #endif | |
1075 | ||
1076 | static int mdp_remove(struct platform_device *pdev) | |
1077 | { | |
1078 | iounmap(msm_mdp_base); | |
1079 | return 0; | |
1080 | } | |
1081 | ||
1082 | static int mdp_register_driver(void) | |
1083 | { | |
1084 | #ifdef CONFIG_HAS_EARLYSUSPEND | |
1085 | early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1; | |
1086 | early_suspend.suspend = mdp_early_suspend; | |
1087 | register_early_suspend(&early_suspend); | |
1088 | #endif | |
1089 | ||
1090 | return platform_driver_register(&mdp_driver); | |
1091 | } | |
1092 | ||
1093 | static int __init mdp_driver_init(void) | |
1094 | { | |
1095 | int ret; | |
1096 | ||
1097 | mdp_drv_init(); | |
1098 | ||
1099 | ret = mdp_register_driver(); | |
1100 | if (ret) { | |
1101 | printk(KERN_ERR "mdp_register_driver() failed!\n"); | |
1102 | return ret; | |
1103 | } | |
1104 | ||
1105 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_FB_MSM_MDP40) | |
1106 | mdp4_debugfs_init(); | |
1107 | #endif | |
1108 | ||
1109 | return 0; | |
1110 | ||
1111 | } | |
1112 | ||
1113 | module_init(mdp_driver_init); |