Merge tag 'cleanup-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / imx-drm / ipu-v3 / ipu-common.c
1 /*
2 * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
3 * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15 #include <linux/module.h>
16 #include <linux/export.h>
17 #include <linux/types.h>
18 #include <linux/init.h>
19 #include <linux/platform_device.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/clk.h>
26 #include <linux/list.h>
27 #include <linux/irq.h>
28 #include <linux/irqchip/chained_irq.h>
29 #include <linux/of_device.h>
30
31 #include "imx-ipu-v3.h"
32 #include "ipu-prv.h"
33
34 static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
35 {
36 return readl(ipu->cm_reg + offset);
37 }
38
39 static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
40 {
41 writel(value, ipu->cm_reg + offset);
42 }
43
44 static inline u32 ipu_idmac_read(struct ipu_soc *ipu, unsigned offset)
45 {
46 return readl(ipu->idmac_reg + offset);
47 }
48
49 static inline void ipu_idmac_write(struct ipu_soc *ipu, u32 value,
50 unsigned offset)
51 {
52 writel(value, ipu->idmac_reg + offset);
53 }
54
55 void ipu_srm_dp_sync_update(struct ipu_soc *ipu)
56 {
57 u32 val;
58
59 val = ipu_cm_read(ipu, IPU_SRM_PRI2);
60 val |= 0x8;
61 ipu_cm_write(ipu, val, IPU_SRM_PRI2);
62 }
63 EXPORT_SYMBOL_GPL(ipu_srm_dp_sync_update);
64
65 struct ipu_ch_param __iomem *ipu_get_cpmem(struct ipuv3_channel *channel)
66 {
67 struct ipu_soc *ipu = channel->ipu;
68
69 return ipu->cpmem_base + channel->num;
70 }
71 EXPORT_SYMBOL_GPL(ipu_get_cpmem);
72
73 void ipu_cpmem_set_high_priority(struct ipuv3_channel *channel)
74 {
75 struct ipu_soc *ipu = channel->ipu;
76 struct ipu_ch_param __iomem *p = ipu_get_cpmem(channel);
77 u32 val;
78
79 if (ipu->ipu_type == IPUV3EX)
80 ipu_ch_param_write_field(p, IPU_FIELD_ID, 1);
81
82 val = ipu_idmac_read(ipu, IDMAC_CHA_PRI(channel->num));
83 val |= 1 << (channel->num % 32);
84 ipu_idmac_write(ipu, val, IDMAC_CHA_PRI(channel->num));
85 };
86 EXPORT_SYMBOL_GPL(ipu_cpmem_set_high_priority);
87
88 void ipu_ch_param_write_field(struct ipu_ch_param __iomem *base, u32 wbs, u32 v)
89 {
90 u32 bit = (wbs >> 8) % 160;
91 u32 size = wbs & 0xff;
92 u32 word = (wbs >> 8) / 160;
93 u32 i = bit / 32;
94 u32 ofs = bit % 32;
95 u32 mask = (1 << size) - 1;
96 u32 val;
97
98 pr_debug("%s %d %d %d\n", __func__, word, bit , size);
99
100 val = readl(&base->word[word].data[i]);
101 val &= ~(mask << ofs);
102 val |= v << ofs;
103 writel(val, &base->word[word].data[i]);
104
105 if ((bit + size - 1) / 32 > i) {
106 val = readl(&base->word[word].data[i + 1]);
107 val &= ~(mask >> (ofs ? (32 - ofs) : 0));
108 val |= v >> (ofs ? (32 - ofs) : 0);
109 writel(val, &base->word[word].data[i + 1]);
110 }
111 }
112 EXPORT_SYMBOL_GPL(ipu_ch_param_write_field);
113
114 u32 ipu_ch_param_read_field(struct ipu_ch_param __iomem *base, u32 wbs)
115 {
116 u32 bit = (wbs >> 8) % 160;
117 u32 size = wbs & 0xff;
118 u32 word = (wbs >> 8) / 160;
119 u32 i = bit / 32;
120 u32 ofs = bit % 32;
121 u32 mask = (1 << size) - 1;
122 u32 val = 0;
123
124 pr_debug("%s %d %d %d\n", __func__, word, bit , size);
125
126 val = (readl(&base->word[word].data[i]) >> ofs) & mask;
127
128 if ((bit + size - 1) / 32 > i) {
129 u32 tmp;
130 tmp = readl(&base->word[word].data[i + 1]);
131 tmp &= mask >> (ofs ? (32 - ofs) : 0);
132 val |= tmp << (ofs ? (32 - ofs) : 0);
133 }
134
135 return val;
136 }
137 EXPORT_SYMBOL_GPL(ipu_ch_param_read_field);
138
139 int ipu_cpmem_set_format_rgb(struct ipu_ch_param __iomem *p,
140 struct ipu_rgb *rgb)
141 {
142 int bpp = 0, npb = 0, ro, go, bo, to;
143
144 ro = rgb->bits_per_pixel - rgb->red.length - rgb->red.offset;
145 go = rgb->bits_per_pixel - rgb->green.length - rgb->green.offset;
146 bo = rgb->bits_per_pixel - rgb->blue.length - rgb->blue.offset;
147 to = rgb->bits_per_pixel - rgb->transp.length - rgb->transp.offset;
148
149 ipu_ch_param_write_field(p, IPU_FIELD_WID0, rgb->red.length - 1);
150 ipu_ch_param_write_field(p, IPU_FIELD_OFS0, ro);
151 ipu_ch_param_write_field(p, IPU_FIELD_WID1, rgb->green.length - 1);
152 ipu_ch_param_write_field(p, IPU_FIELD_OFS1, go);
153 ipu_ch_param_write_field(p, IPU_FIELD_WID2, rgb->blue.length - 1);
154 ipu_ch_param_write_field(p, IPU_FIELD_OFS2, bo);
155
156 if (rgb->transp.length) {
157 ipu_ch_param_write_field(p, IPU_FIELD_WID3,
158 rgb->transp.length - 1);
159 ipu_ch_param_write_field(p, IPU_FIELD_OFS3, to);
160 } else {
161 ipu_ch_param_write_field(p, IPU_FIELD_WID3, 7);
162 ipu_ch_param_write_field(p, IPU_FIELD_OFS3,
163 rgb->bits_per_pixel);
164 }
165
166 switch (rgb->bits_per_pixel) {
167 case 32:
168 bpp = 0;
169 npb = 15;
170 break;
171 case 24:
172 bpp = 1;
173 npb = 19;
174 break;
175 case 16:
176 bpp = 3;
177 npb = 31;
178 break;
179 case 8:
180 bpp = 5;
181 npb = 63;
182 break;
183 default:
184 return -EINVAL;
185 }
186 ipu_ch_param_write_field(p, IPU_FIELD_BPP, bpp);
187 ipu_ch_param_write_field(p, IPU_FIELD_NPB, npb);
188 ipu_ch_param_write_field(p, IPU_FIELD_PFS, 7); /* rgb mode */
189
190 return 0;
191 }
192 EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_rgb);
193
194 int ipu_cpmem_set_format_passthrough(struct ipu_ch_param __iomem *p,
195 int width)
196 {
197 int bpp = 0, npb = 0;
198
199 switch (width) {
200 case 32:
201 bpp = 0;
202 npb = 15;
203 break;
204 case 24:
205 bpp = 1;
206 npb = 19;
207 break;
208 case 16:
209 bpp = 3;
210 npb = 31;
211 break;
212 case 8:
213 bpp = 5;
214 npb = 63;
215 break;
216 default:
217 return -EINVAL;
218 }
219
220 ipu_ch_param_write_field(p, IPU_FIELD_BPP, bpp);
221 ipu_ch_param_write_field(p, IPU_FIELD_NPB, npb);
222 ipu_ch_param_write_field(p, IPU_FIELD_PFS, 6); /* raw mode */
223
224 return 0;
225 }
226 EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough);
227
228 void ipu_cpmem_set_yuv_interleaved(struct ipu_ch_param __iomem *p,
229 u32 pixel_format)
230 {
231 switch (pixel_format) {
232 case V4L2_PIX_FMT_UYVY:
233 ipu_ch_param_write_field(p, IPU_FIELD_BPP, 3); /* bits/pixel */
234 ipu_ch_param_write_field(p, IPU_FIELD_PFS, 0xA); /* pix format */
235 ipu_ch_param_write_field(p, IPU_FIELD_NPB, 31); /* burst size */
236 break;
237 case V4L2_PIX_FMT_YUYV:
238 ipu_ch_param_write_field(p, IPU_FIELD_BPP, 3); /* bits/pixel */
239 ipu_ch_param_write_field(p, IPU_FIELD_PFS, 0x8); /* pix format */
240 ipu_ch_param_write_field(p, IPU_FIELD_NPB, 31); /* burst size */
241 break;
242 }
243 }
244 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
245
246 void ipu_cpmem_set_yuv_planar_full(struct ipu_ch_param __iomem *p,
247 u32 pixel_format, int stride, int u_offset, int v_offset)
248 {
249 switch (pixel_format) {
250 case V4L2_PIX_FMT_YUV420:
251 ipu_ch_param_write_field(p, IPU_FIELD_SLUV, (stride / 2) - 1);
252 ipu_ch_param_write_field(p, IPU_FIELD_UBO, u_offset / 8);
253 ipu_ch_param_write_field(p, IPU_FIELD_VBO, v_offset / 8);
254 break;
255 case V4L2_PIX_FMT_YVU420:
256 ipu_ch_param_write_field(p, IPU_FIELD_SLUV, (stride / 2) - 1);
257 ipu_ch_param_write_field(p, IPU_FIELD_UBO, v_offset / 8);
258 ipu_ch_param_write_field(p, IPU_FIELD_VBO, u_offset / 8);
259 break;
260 }
261 }
262 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
263
264 void ipu_cpmem_set_yuv_planar(struct ipu_ch_param __iomem *p, u32 pixel_format,
265 int stride, int height)
266 {
267 int u_offset, v_offset;
268 int uv_stride = 0;
269
270 switch (pixel_format) {
271 case V4L2_PIX_FMT_YUV420:
272 case V4L2_PIX_FMT_YVU420:
273 uv_stride = stride / 2;
274 u_offset = stride * height;
275 v_offset = u_offset + (uv_stride * height / 2);
276 ipu_cpmem_set_yuv_planar_full(p, pixel_format, stride,
277 u_offset, v_offset);
278 break;
279 }
280 }
281 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
282
283 static struct ipu_rgb def_rgb_32 = {
284 .red = { .offset = 16, .length = 8, },
285 .green = { .offset = 8, .length = 8, },
286 .blue = { .offset = 0, .length = 8, },
287 .transp = { .offset = 24, .length = 8, },
288 .bits_per_pixel = 32,
289 };
290
291 static struct ipu_rgb def_bgr_32 = {
292 .red = { .offset = 16, .length = 8, },
293 .green = { .offset = 8, .length = 8, },
294 .blue = { .offset = 0, .length = 8, },
295 .transp = { .offset = 24, .length = 8, },
296 .bits_per_pixel = 32,
297 };
298
299 static struct ipu_rgb def_rgb_24 = {
300 .red = { .offset = 0, .length = 8, },
301 .green = { .offset = 8, .length = 8, },
302 .blue = { .offset = 16, .length = 8, },
303 .transp = { .offset = 0, .length = 0, },
304 .bits_per_pixel = 24,
305 };
306
307 static struct ipu_rgb def_bgr_24 = {
308 .red = { .offset = 16, .length = 8, },
309 .green = { .offset = 8, .length = 8, },
310 .blue = { .offset = 0, .length = 8, },
311 .transp = { .offset = 0, .length = 0, },
312 .bits_per_pixel = 24,
313 };
314
315 static struct ipu_rgb def_rgb_16 = {
316 .red = { .offset = 11, .length = 5, },
317 .green = { .offset = 5, .length = 6, },
318 .blue = { .offset = 0, .length = 5, },
319 .transp = { .offset = 0, .length = 0, },
320 .bits_per_pixel = 16,
321 };
322
323 #define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
324 #define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
325 (pix->width * (y) / 4) + (x) / 2)
326 #define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
327 (pix->width * pix->height / 4) + \
328 (pix->width * (y) / 4) + (x) / 2)
329
330 int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem *cpmem, u32 pixelformat)
331 {
332 switch (pixelformat) {
333 case V4L2_PIX_FMT_YUV420:
334 case V4L2_PIX_FMT_YVU420:
335 /* pix format */
336 ipu_ch_param_write_field(cpmem, IPU_FIELD_PFS, 2);
337 /* burst size */
338 ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 63);
339 break;
340 case V4L2_PIX_FMT_UYVY:
341 /* bits/pixel */
342 ipu_ch_param_write_field(cpmem, IPU_FIELD_BPP, 3);
343 /* pix format */
344 ipu_ch_param_write_field(cpmem, IPU_FIELD_PFS, 0xA);
345 /* burst size */
346 ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 31);
347 break;
348 case V4L2_PIX_FMT_YUYV:
349 /* bits/pixel */
350 ipu_ch_param_write_field(cpmem, IPU_FIELD_BPP, 3);
351 /* pix format */
352 ipu_ch_param_write_field(cpmem, IPU_FIELD_PFS, 0x8);
353 /* burst size */
354 ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 31);
355 break;
356 case V4L2_PIX_FMT_RGB32:
357 ipu_cpmem_set_format_rgb(cpmem, &def_rgb_32);
358 break;
359 case V4L2_PIX_FMT_RGB565:
360 ipu_cpmem_set_format_rgb(cpmem, &def_rgb_16);
361 break;
362 case V4L2_PIX_FMT_BGR32:
363 ipu_cpmem_set_format_rgb(cpmem, &def_bgr_32);
364 break;
365 case V4L2_PIX_FMT_RGB24:
366 ipu_cpmem_set_format_rgb(cpmem, &def_rgb_24);
367 break;
368 case V4L2_PIX_FMT_BGR24:
369 ipu_cpmem_set_format_rgb(cpmem, &def_bgr_24);
370 break;
371 default:
372 return -EINVAL;
373 }
374
375 return 0;
376 }
377 EXPORT_SYMBOL_GPL(ipu_cpmem_set_fmt);
378
379 int ipu_cpmem_set_image(struct ipu_ch_param __iomem *cpmem,
380 struct ipu_image *image)
381 {
382 struct v4l2_pix_format *pix = &image->pix;
383 int y_offset, u_offset, v_offset;
384
385 pr_debug("%s: resolution: %dx%d stride: %d\n",
386 __func__, pix->width, pix->height,
387 pix->bytesperline);
388
389 ipu_cpmem_set_resolution(cpmem, image->rect.width,
390 image->rect.height);
391 ipu_cpmem_set_stride(cpmem, pix->bytesperline);
392
393 ipu_cpmem_set_fmt(cpmem, pix->pixelformat);
394
395 switch (pix->pixelformat) {
396 case V4L2_PIX_FMT_YUV420:
397 case V4L2_PIX_FMT_YVU420:
398 y_offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
399 u_offset = U_OFFSET(pix, image->rect.left,
400 image->rect.top) - y_offset;
401 v_offset = V_OFFSET(pix, image->rect.left,
402 image->rect.top) - y_offset;
403
404 ipu_cpmem_set_yuv_planar_full(cpmem, pix->pixelformat,
405 pix->bytesperline, u_offset, v_offset);
406 ipu_cpmem_set_buffer(cpmem, 0, image->phys + y_offset);
407 break;
408 case V4L2_PIX_FMT_UYVY:
409 case V4L2_PIX_FMT_YUYV:
410 ipu_cpmem_set_buffer(cpmem, 0, image->phys +
411 image->rect.left * 2 +
412 image->rect.top * image->pix.bytesperline);
413 break;
414 case V4L2_PIX_FMT_RGB32:
415 case V4L2_PIX_FMT_BGR32:
416 ipu_cpmem_set_buffer(cpmem, 0, image->phys +
417 image->rect.left * 4 +
418 image->rect.top * image->pix.bytesperline);
419 break;
420 case V4L2_PIX_FMT_RGB565:
421 ipu_cpmem_set_buffer(cpmem, 0, image->phys +
422 image->rect.left * 2 +
423 image->rect.top * image->pix.bytesperline);
424 break;
425 case V4L2_PIX_FMT_RGB24:
426 case V4L2_PIX_FMT_BGR24:
427 ipu_cpmem_set_buffer(cpmem, 0, image->phys +
428 image->rect.left * 3 +
429 image->rect.top * image->pix.bytesperline);
430 break;
431 default:
432 return -EINVAL;
433 }
434
435 return 0;
436 }
437 EXPORT_SYMBOL_GPL(ipu_cpmem_set_image);
438
439 enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
440 {
441 switch (pixelformat) {
442 case V4L2_PIX_FMT_YUV420:
443 case V4L2_PIX_FMT_YVU420:
444 case V4L2_PIX_FMT_UYVY:
445 case V4L2_PIX_FMT_YUYV:
446 return IPUV3_COLORSPACE_YUV;
447 case V4L2_PIX_FMT_RGB32:
448 case V4L2_PIX_FMT_BGR32:
449 case V4L2_PIX_FMT_RGB24:
450 case V4L2_PIX_FMT_BGR24:
451 case V4L2_PIX_FMT_RGB565:
452 return IPUV3_COLORSPACE_RGB;
453 default:
454 return IPUV3_COLORSPACE_UNKNOWN;
455 }
456 }
457 EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
458
459 struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
460 {
461 struct ipuv3_channel *channel;
462
463 dev_dbg(ipu->dev, "%s %d\n", __func__, num);
464
465 if (num > 63)
466 return ERR_PTR(-ENODEV);
467
468 mutex_lock(&ipu->channel_lock);
469
470 channel = &ipu->channel[num];
471
472 if (channel->busy) {
473 channel = ERR_PTR(-EBUSY);
474 goto out;
475 }
476
477 channel->busy = 1;
478 channel->num = num;
479
480 out:
481 mutex_unlock(&ipu->channel_lock);
482
483 return channel;
484 }
485 EXPORT_SYMBOL_GPL(ipu_idmac_get);
486
487 void ipu_idmac_put(struct ipuv3_channel *channel)
488 {
489 struct ipu_soc *ipu = channel->ipu;
490
491 dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
492
493 mutex_lock(&ipu->channel_lock);
494
495 channel->busy = 0;
496
497 mutex_unlock(&ipu->channel_lock);
498 }
499 EXPORT_SYMBOL_GPL(ipu_idmac_put);
500
501 #define idma_mask(ch) (1 << (ch & 0x1f))
502
503 void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
504 bool doublebuffer)
505 {
506 struct ipu_soc *ipu = channel->ipu;
507 unsigned long flags;
508 u32 reg;
509
510 spin_lock_irqsave(&ipu->lock, flags);
511
512 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
513 if (doublebuffer)
514 reg |= idma_mask(channel->num);
515 else
516 reg &= ~idma_mask(channel->num);
517 ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
518
519 spin_unlock_irqrestore(&ipu->lock, flags);
520 }
521 EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
522
523 int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
524 {
525 unsigned long lock_flags;
526 u32 val;
527
528 spin_lock_irqsave(&ipu->lock, lock_flags);
529
530 val = ipu_cm_read(ipu, IPU_DISP_GEN);
531
532 if (mask & IPU_CONF_DI0_EN)
533 val |= IPU_DI0_COUNTER_RELEASE;
534 if (mask & IPU_CONF_DI1_EN)
535 val |= IPU_DI1_COUNTER_RELEASE;
536
537 ipu_cm_write(ipu, val, IPU_DISP_GEN);
538
539 val = ipu_cm_read(ipu, IPU_CONF);
540 val |= mask;
541 ipu_cm_write(ipu, val, IPU_CONF);
542
543 spin_unlock_irqrestore(&ipu->lock, lock_flags);
544
545 return 0;
546 }
547 EXPORT_SYMBOL_GPL(ipu_module_enable);
548
549 int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
550 {
551 unsigned long lock_flags;
552 u32 val;
553
554 spin_lock_irqsave(&ipu->lock, lock_flags);
555
556 val = ipu_cm_read(ipu, IPU_CONF);
557 val &= ~mask;
558 ipu_cm_write(ipu, val, IPU_CONF);
559
560 val = ipu_cm_read(ipu, IPU_DISP_GEN);
561
562 if (mask & IPU_CONF_DI0_EN)
563 val &= ~IPU_DI0_COUNTER_RELEASE;
564 if (mask & IPU_CONF_DI1_EN)
565 val &= ~IPU_DI1_COUNTER_RELEASE;
566
567 ipu_cm_write(ipu, val, IPU_DISP_GEN);
568
569 spin_unlock_irqrestore(&ipu->lock, lock_flags);
570
571 return 0;
572 }
573 EXPORT_SYMBOL_GPL(ipu_module_disable);
574
575 void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
576 {
577 struct ipu_soc *ipu = channel->ipu;
578 unsigned int chno = channel->num;
579 unsigned long flags;
580
581 spin_lock_irqsave(&ipu->lock, flags);
582
583 /* Mark buffer as ready. */
584 if (buf_num == 0)
585 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
586 else
587 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
588
589 spin_unlock_irqrestore(&ipu->lock, flags);
590 }
591 EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
592
593 int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
594 {
595 struct ipu_soc *ipu = channel->ipu;
596 u32 val;
597 unsigned long flags;
598
599 spin_lock_irqsave(&ipu->lock, flags);
600
601 val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
602 val |= idma_mask(channel->num);
603 ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
604
605 spin_unlock_irqrestore(&ipu->lock, flags);
606
607 return 0;
608 }
609 EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
610
611 int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
612 {
613 struct ipu_soc *ipu = channel->ipu;
614 u32 val;
615 unsigned long flags;
616 unsigned long timeout;
617
618 timeout = jiffies + msecs_to_jiffies(50);
619 while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
620 idma_mask(channel->num)) {
621 if (time_after(jiffies, timeout)) {
622 dev_warn(ipu->dev, "disabling busy idmac channel %d\n",
623 channel->num);
624 break;
625 }
626 cpu_relax();
627 }
628
629 spin_lock_irqsave(&ipu->lock, flags);
630
631 /* Disable DMA channel(s) */
632 val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
633 val &= ~idma_mask(channel->num);
634 ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
635
636 /* Set channel buffers NOT to be ready */
637 ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
638
639 if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
640 idma_mask(channel->num)) {
641 ipu_cm_write(ipu, idma_mask(channel->num),
642 IPU_CHA_BUF0_RDY(channel->num));
643 }
644
645 if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
646 idma_mask(channel->num)) {
647 ipu_cm_write(ipu, idma_mask(channel->num),
648 IPU_CHA_BUF1_RDY(channel->num));
649 }
650
651 ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
652
653 /* Reset the double buffer */
654 val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
655 val &= ~idma_mask(channel->num);
656 ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
657
658 spin_unlock_irqrestore(&ipu->lock, flags);
659
660 return 0;
661 }
662 EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
663
664 static int ipu_reset(struct ipu_soc *ipu)
665 {
666 unsigned long timeout;
667
668 ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
669
670 timeout = jiffies + msecs_to_jiffies(1000);
671 while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
672 if (time_after(jiffies, timeout))
673 return -ETIME;
674 cpu_relax();
675 }
676
677 return 0;
678 }
679
680 struct ipu_devtype {
681 const char *name;
682 unsigned long cm_ofs;
683 unsigned long cpmem_ofs;
684 unsigned long srm_ofs;
685 unsigned long tpm_ofs;
686 unsigned long disp0_ofs;
687 unsigned long disp1_ofs;
688 unsigned long dc_tmpl_ofs;
689 unsigned long vdi_ofs;
690 enum ipuv3_type type;
691 };
692
693 static struct ipu_devtype ipu_type_imx51 = {
694 .name = "IPUv3EX",
695 .cm_ofs = 0x1e000000,
696 .cpmem_ofs = 0x1f000000,
697 .srm_ofs = 0x1f040000,
698 .tpm_ofs = 0x1f060000,
699 .disp0_ofs = 0x1e040000,
700 .disp1_ofs = 0x1e048000,
701 .dc_tmpl_ofs = 0x1f080000,
702 .vdi_ofs = 0x1e068000,
703 .type = IPUV3EX,
704 };
705
706 static struct ipu_devtype ipu_type_imx53 = {
707 .name = "IPUv3M",
708 .cm_ofs = 0x06000000,
709 .cpmem_ofs = 0x07000000,
710 .srm_ofs = 0x07040000,
711 .tpm_ofs = 0x07060000,
712 .disp0_ofs = 0x06040000,
713 .disp1_ofs = 0x06048000,
714 .dc_tmpl_ofs = 0x07080000,
715 .vdi_ofs = 0x06068000,
716 .type = IPUV3M,
717 };
718
719 static struct ipu_devtype ipu_type_imx6q = {
720 .name = "IPUv3H",
721 .cm_ofs = 0x00200000,
722 .cpmem_ofs = 0x00300000,
723 .srm_ofs = 0x00340000,
724 .tpm_ofs = 0x00360000,
725 .disp0_ofs = 0x00240000,
726 .disp1_ofs = 0x00248000,
727 .dc_tmpl_ofs = 0x00380000,
728 .vdi_ofs = 0x00268000,
729 .type = IPUV3H,
730 };
731
732 static const struct of_device_id imx_ipu_dt_ids[] = {
733 { .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
734 { .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
735 { .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
736 { /* sentinel */ }
737 };
738 MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
739
740 static int ipu_submodules_init(struct ipu_soc *ipu,
741 struct platform_device *pdev, unsigned long ipu_base,
742 struct clk *ipu_clk)
743 {
744 char *unit;
745 int ret;
746 struct device *dev = &pdev->dev;
747 const struct ipu_devtype *devtype = ipu->devtype;
748
749 ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
750 IPU_CONF_DI0_EN, ipu_clk);
751 if (ret) {
752 unit = "di0";
753 goto err_di_0;
754 }
755
756 ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
757 IPU_CONF_DI1_EN, ipu_clk);
758 if (ret) {
759 unit = "di1";
760 goto err_di_1;
761 }
762
763 ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
764 IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
765 if (ret) {
766 unit = "dc_template";
767 goto err_dc;
768 }
769
770 ret = ipu_dmfc_init(ipu, dev, ipu_base +
771 devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
772 if (ret) {
773 unit = "dmfc";
774 goto err_dmfc;
775 }
776
777 ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
778 if (ret) {
779 unit = "dp";
780 goto err_dp;
781 }
782
783 return 0;
784
785 err_dp:
786 ipu_dmfc_exit(ipu);
787 err_dmfc:
788 ipu_dc_exit(ipu);
789 err_dc:
790 ipu_di_exit(ipu, 1);
791 err_di_1:
792 ipu_di_exit(ipu, 0);
793 err_di_0:
794 dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
795 return ret;
796 }
797
798 static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
799 {
800 unsigned long status;
801 int i, bit, irq_base;
802
803 for (i = 0; i < num_regs; i++) {
804
805 status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
806 status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
807
808 irq_base = ipu->irq_start + regs[i] * 32;
809 for_each_set_bit(bit, &status, 32)
810 generic_handle_irq(irq_base + bit);
811 }
812 }
813
814 static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
815 {
816 struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
817 const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
818 struct irq_chip *chip = irq_get_chip(irq);
819
820 chained_irq_enter(chip, desc);
821
822 ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
823
824 chained_irq_exit(chip, desc);
825 }
826
827 static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
828 {
829 struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
830 const int int_reg[] = { 4, 5, 8, 9};
831 struct irq_chip *chip = irq_get_chip(irq);
832
833 chained_irq_enter(chip, desc);
834
835 ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
836
837 chained_irq_exit(chip, desc);
838 }
839
840 static void ipu_ack_irq(struct irq_data *d)
841 {
842 struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
843 unsigned int irq = d->irq - ipu->irq_start;
844
845 ipu_cm_write(ipu, 1 << (irq % 32), IPU_INT_STAT(irq / 32));
846 }
847
848 static void ipu_unmask_irq(struct irq_data *d)
849 {
850 struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
851 unsigned int irq = d->irq - ipu->irq_start;
852 unsigned long flags;
853 u32 reg;
854
855 spin_lock_irqsave(&ipu->lock, flags);
856
857 reg = ipu_cm_read(ipu, IPU_INT_CTRL(irq / 32));
858 reg |= 1 << (irq % 32);
859 ipu_cm_write(ipu, reg, IPU_INT_CTRL(irq / 32));
860
861 spin_unlock_irqrestore(&ipu->lock, flags);
862 }
863
864 static void ipu_mask_irq(struct irq_data *d)
865 {
866 struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
867 unsigned int irq = d->irq - ipu->irq_start;
868 unsigned long flags;
869 u32 reg;
870
871 spin_lock_irqsave(&ipu->lock, flags);
872
873 reg = ipu_cm_read(ipu, IPU_INT_CTRL(irq / 32));
874 reg &= ~(1 << (irq % 32));
875 ipu_cm_write(ipu, reg, IPU_INT_CTRL(irq / 32));
876
877 spin_unlock_irqrestore(&ipu->lock, flags);
878 }
879
880 static struct irq_chip ipu_irq_chip = {
881 .name = "IPU",
882 .irq_ack = ipu_ack_irq,
883 .irq_mask = ipu_mask_irq,
884 .irq_unmask = ipu_unmask_irq,
885 };
886
887 int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
888 enum ipu_channel_irq irq_type)
889 {
890 return ipu->irq_start + irq_type + channel->num;
891 }
892 EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
893
894 static void ipu_submodules_exit(struct ipu_soc *ipu)
895 {
896 ipu_dp_exit(ipu);
897 ipu_dmfc_exit(ipu);
898 ipu_dc_exit(ipu);
899 ipu_di_exit(ipu, 1);
900 ipu_di_exit(ipu, 0);
901 }
902
903 static int platform_remove_devices_fn(struct device *dev, void *unused)
904 {
905 struct platform_device *pdev = to_platform_device(dev);
906
907 platform_device_unregister(pdev);
908
909 return 0;
910 }
911
912 static void platform_device_unregister_children(struct platform_device *pdev)
913 {
914 device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
915 }
916
917 struct ipu_platform_reg {
918 struct ipu_client_platformdata pdata;
919 const char *name;
920 };
921
922 static const struct ipu_platform_reg client_reg[] = {
923 {
924 .pdata = {
925 .di = 0,
926 .dc = 5,
927 .dp = IPU_DP_FLOW_SYNC_BG,
928 .dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
929 .dma[1] = -EINVAL,
930 },
931 .name = "imx-ipuv3-crtc",
932 }, {
933 .pdata = {
934 .di = 1,
935 .dc = 1,
936 .dp = -EINVAL,
937 .dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
938 .dma[1] = -EINVAL,
939 },
940 .name = "imx-ipuv3-crtc",
941 },
942 };
943
944 static int ipu_client_id;
945
946 static int ipu_add_subdevice_pdata(struct device *dev,
947 const struct ipu_platform_reg *reg)
948 {
949 struct platform_device *pdev;
950
951 pdev = platform_device_register_data(dev, reg->name, ipu_client_id++,
952 &reg->pdata, sizeof(struct ipu_platform_reg));
953
954 return pdev ? 0 : -EINVAL;
955 }
956
957 static int ipu_add_client_devices(struct ipu_soc *ipu)
958 {
959 int ret;
960 int i;
961
962 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
963 const struct ipu_platform_reg *reg = &client_reg[i];
964 ret = ipu_add_subdevice_pdata(ipu->dev, reg);
965 if (ret)
966 goto err_register;
967 }
968
969 return 0;
970
971 err_register:
972 platform_device_unregister_children(to_platform_device(ipu->dev));
973
974 return ret;
975 }
976
977 static int ipu_irq_init(struct ipu_soc *ipu)
978 {
979 int i;
980
981 ipu->irq_start = irq_alloc_descs(-1, 0, IPU_NUM_IRQS, 0);
982 if (ipu->irq_start < 0)
983 return ipu->irq_start;
984
985 for (i = ipu->irq_start; i < ipu->irq_start + IPU_NUM_IRQS; i++) {
986 irq_set_chip_and_handler(i, &ipu_irq_chip, handle_level_irq);
987 set_irq_flags(i, IRQF_VALID);
988 irq_set_chip_data(i, ipu);
989 }
990
991 irq_set_chained_handler(ipu->irq_sync, ipu_irq_handler);
992 irq_set_handler_data(ipu->irq_sync, ipu);
993 irq_set_chained_handler(ipu->irq_err, ipu_err_irq_handler);
994 irq_set_handler_data(ipu->irq_err, ipu);
995
996 return 0;
997 }
998
999 static void ipu_irq_exit(struct ipu_soc *ipu)
1000 {
1001 int i;
1002
1003 irq_set_chained_handler(ipu->irq_err, NULL);
1004 irq_set_handler_data(ipu->irq_err, NULL);
1005 irq_set_chained_handler(ipu->irq_sync, NULL);
1006 irq_set_handler_data(ipu->irq_sync, NULL);
1007
1008 for (i = ipu->irq_start; i < ipu->irq_start + IPU_NUM_IRQS; i++) {
1009 set_irq_flags(i, 0);
1010 irq_set_chip(i, NULL);
1011 irq_set_chip_data(i, NULL);
1012 }
1013
1014 irq_free_descs(ipu->irq_start, IPU_NUM_IRQS);
1015 }
1016
1017 static int ipu_probe(struct platform_device *pdev)
1018 {
1019 const struct of_device_id *of_id =
1020 of_match_device(imx_ipu_dt_ids, &pdev->dev);
1021 struct ipu_soc *ipu;
1022 struct resource *res;
1023 unsigned long ipu_base;
1024 int i, ret, irq_sync, irq_err;
1025 const struct ipu_devtype *devtype;
1026
1027 devtype = of_id->data;
1028
1029 irq_sync = platform_get_irq(pdev, 0);
1030 irq_err = platform_get_irq(pdev, 1);
1031 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1032
1033 dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
1034 irq_sync, irq_err);
1035
1036 if (!res || irq_sync < 0 || irq_err < 0)
1037 return -ENODEV;
1038
1039 ipu_base = res->start;
1040
1041 ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
1042 if (!ipu)
1043 return -ENODEV;
1044
1045 for (i = 0; i < 64; i++)
1046 ipu->channel[i].ipu = ipu;
1047 ipu->devtype = devtype;
1048 ipu->ipu_type = devtype->type;
1049
1050 spin_lock_init(&ipu->lock);
1051 mutex_init(&ipu->channel_lock);
1052
1053 dev_dbg(&pdev->dev, "cm_reg: 0x%08lx\n",
1054 ipu_base + devtype->cm_ofs);
1055 dev_dbg(&pdev->dev, "idmac: 0x%08lx\n",
1056 ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
1057 dev_dbg(&pdev->dev, "cpmem: 0x%08lx\n",
1058 ipu_base + devtype->cpmem_ofs);
1059 dev_dbg(&pdev->dev, "disp0: 0x%08lx\n",
1060 ipu_base + devtype->disp0_ofs);
1061 dev_dbg(&pdev->dev, "disp1: 0x%08lx\n",
1062 ipu_base + devtype->disp1_ofs);
1063 dev_dbg(&pdev->dev, "srm: 0x%08lx\n",
1064 ipu_base + devtype->srm_ofs);
1065 dev_dbg(&pdev->dev, "tpm: 0x%08lx\n",
1066 ipu_base + devtype->tpm_ofs);
1067 dev_dbg(&pdev->dev, "dc: 0x%08lx\n",
1068 ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
1069 dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
1070 ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
1071 dev_dbg(&pdev->dev, "dmfc: 0x%08lx\n",
1072 ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
1073 dev_dbg(&pdev->dev, "vdi: 0x%08lx\n",
1074 ipu_base + devtype->vdi_ofs);
1075
1076 ipu->cm_reg = devm_ioremap(&pdev->dev,
1077 ipu_base + devtype->cm_ofs, PAGE_SIZE);
1078 ipu->idmac_reg = devm_ioremap(&pdev->dev,
1079 ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
1080 PAGE_SIZE);
1081 ipu->cpmem_base = devm_ioremap(&pdev->dev,
1082 ipu_base + devtype->cpmem_ofs, PAGE_SIZE);
1083
1084 if (!ipu->cm_reg || !ipu->idmac_reg || !ipu->cpmem_base) {
1085 ret = -ENOMEM;
1086 goto failed_ioremap;
1087 }
1088
1089 ipu->clk = devm_clk_get(&pdev->dev, "bus");
1090 if (IS_ERR(ipu->clk)) {
1091 ret = PTR_ERR(ipu->clk);
1092 dev_err(&pdev->dev, "clk_get failed with %d", ret);
1093 goto failed_clk_get;
1094 }
1095
1096 platform_set_drvdata(pdev, ipu);
1097
1098 clk_prepare_enable(ipu->clk);
1099
1100 ipu->dev = &pdev->dev;
1101 ipu->irq_sync = irq_sync;
1102 ipu->irq_err = irq_err;
1103
1104 ret = ipu_irq_init(ipu);
1105 if (ret)
1106 goto out_failed_irq;
1107
1108 ret = ipu_reset(ipu);
1109 if (ret)
1110 goto out_failed_reset;
1111
1112 /* Set MCU_T to divide MCU access window into 2 */
1113 ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
1114 IPU_DISP_GEN);
1115
1116 ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
1117 if (ret)
1118 goto failed_submodules_init;
1119
1120 ret = ipu_add_client_devices(ipu);
1121 if (ret) {
1122 dev_err(&pdev->dev, "adding client devices failed with %d\n",
1123 ret);
1124 goto failed_add_clients;
1125 }
1126
1127 dev_info(&pdev->dev, "%s probed\n", devtype->name);
1128
1129 return 0;
1130
1131 failed_add_clients:
1132 ipu_submodules_exit(ipu);
1133 failed_submodules_init:
1134 ipu_irq_exit(ipu);
1135 out_failed_reset:
1136 out_failed_irq:
1137 clk_disable_unprepare(ipu->clk);
1138 failed_clk_get:
1139 failed_ioremap:
1140 return ret;
1141 }
1142
1143 static int ipu_remove(struct platform_device *pdev)
1144 {
1145 struct ipu_soc *ipu = platform_get_drvdata(pdev);
1146
1147 platform_device_unregister_children(pdev);
1148 ipu_submodules_exit(ipu);
1149 ipu_irq_exit(ipu);
1150
1151 clk_disable_unprepare(ipu->clk);
1152
1153 return 0;
1154 }
1155
1156 static struct platform_driver imx_ipu_driver = {
1157 .driver = {
1158 .name = "imx-ipuv3",
1159 .of_match_table = imx_ipu_dt_ids,
1160 },
1161 .probe = ipu_probe,
1162 .remove = ipu_remove,
1163 };
1164
1165 module_platform_driver(imx_ipu_driver);
1166
1167 MODULE_DESCRIPTION("i.MX IPU v3 driver");
1168 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
1169 MODULE_LICENSE("GPL");