include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / udlfb / udlfb.c
1 /*
2 * udlfb.c -- Framebuffer driver for DisplayLink USB controller
3 *
4 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
5 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
6 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License v2. See the file COPYING in the main directory of this archive for
10 * more details.
11 *
12 * Layout is based on skeletonfb by James Simmons and Geert Uytterhoeven,
13 * usb-skeleton by GregKH.
14 *
15 * Device-specific portions based on information from Displaylink, with work
16 * from Florian Echtler, Henrik Bjerregaard Pedersen, and others.
17 */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/usb.h>
23 #include <linux/uaccess.h>
24 #include <linux/mm.h>
25 #include <linux/fb.h>
26 #include <linux/vmalloc.h>
27 #include <linux/slab.h>
28
29 #include "udlfb.h"
30
31 static struct fb_fix_screeninfo dlfb_fix = {
32 .id = "udlfb",
33 .type = FB_TYPE_PACKED_PIXELS,
34 .visual = FB_VISUAL_TRUECOLOR,
35 .xpanstep = 0,
36 .ypanstep = 0,
37 .ywrapstep = 0,
38 .accel = FB_ACCEL_NONE,
39 };
40
41 static const u32 udlfb_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST |
42 #ifdef FBINFO_VIRTFB
43 FBINFO_VIRTFB |
44 #endif
45 FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT |
46 FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR;
47
48 /*
49 * There are many DisplayLink-based products, all with unique PIDs. We are able
50 * to support all volume ones (circa 2009) with a single driver, so we match
51 * globally on VID. TODO: Probe() needs to detect when we might be running
52 * "future" chips, and bail on those, so a compatible driver can match.
53 */
54 static struct usb_device_id id_table[] = {
55 {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,},
56 {},
57 };
58 MODULE_DEVICE_TABLE(usb, id_table);
59
60 #ifndef CONFIG_FB_DEFERRED_IO
61 #warning message "kernel FB_DEFFERRED_IO option to support generic fbdev apps"
62 #endif
63
64 #ifndef CONFIG_FB_SYS_IMAGEBLIT
65 #ifndef CONFIG_FB_SYS_IMAGEBLIT_MODULE
66 #warning message "FB_SYS_* in kernel or module option to support fb console"
67 #endif
68 #endif
69
70 #ifndef CONFIG_FB_MODE_HELPERS
71 #warning message "kernel FB_MODE_HELPERS required. Expect build break"
72 #endif
73
74 /* dlfb keeps a list of urbs for efficient bulk transfers */
75 static void dlfb_urb_completion(struct urb *urb);
76 static struct urb *dlfb_get_urb(struct dlfb_data *dev);
77 static int dlfb_submit_urb(struct dlfb_data *dev, struct urb * urb, size_t len);
78 static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size);
79 static void dlfb_free_urb_list(struct dlfb_data *dev);
80
81 /* other symbols with dependents */
82 #ifdef CONFIG_FB_DEFERRED_IO
83 static struct fb_deferred_io dlfb_defio;
84 #endif
85
86 /*
87 * All DisplayLink bulk operations start with 0xAF, followed by specific code
88 * All operations are written to buffers which then later get sent to device
89 */
90 static char *dlfb_set_register(char *buf, u8 reg, u8 val)
91 {
92 *buf++ = 0xAF;
93 *buf++ = 0x20;
94 *buf++ = reg;
95 *buf++ = val;
96 return buf;
97 }
98
99 static char *dlfb_vidreg_lock(char *buf)
100 {
101 return dlfb_set_register(buf, 0xFF, 0x00);
102 }
103
104 static char *dlfb_vidreg_unlock(char *buf)
105 {
106 return dlfb_set_register(buf, 0xFF, 0xFF);
107 }
108
109 /*
110 * On/Off for driving the DisplayLink framebuffer to the display
111 */
112 static char *dlfb_enable_hvsync(char *buf, bool enable)
113 {
114 if (enable)
115 return dlfb_set_register(buf, 0x1F, 0x00);
116 else
117 return dlfb_set_register(buf, 0x1F, 0x01);
118 }
119
120 static char *dlfb_set_color_depth(char *buf, u8 selection)
121 {
122 return dlfb_set_register(buf, 0x00, selection);
123 }
124
125 static char *dlfb_set_base16bpp(char *wrptr, u32 base)
126 {
127 /* the base pointer is 16 bits wide, 0x20 is hi byte. */
128 wrptr = dlfb_set_register(wrptr, 0x20, base >> 16);
129 wrptr = dlfb_set_register(wrptr, 0x21, base >> 8);
130 return dlfb_set_register(wrptr, 0x22, base);
131 }
132
133 /*
134 * DisplayLink HW has separate 16bpp and 8bpp framebuffers.
135 * In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer
136 */
137 static char *dlfb_set_base8bpp(char *wrptr, u32 base)
138 {
139 wrptr = dlfb_set_register(wrptr, 0x26, base >> 16);
140 wrptr = dlfb_set_register(wrptr, 0x27, base >> 8);
141 return dlfb_set_register(wrptr, 0x28, base);
142 }
143
144 static char *dlfb_set_register_16(char *wrptr, u8 reg, u16 value)
145 {
146 wrptr = dlfb_set_register(wrptr, reg, value >> 8);
147 return dlfb_set_register(wrptr, reg+1, value);
148 }
149
150 /*
151 * This is kind of weird because the controller takes some
152 * register values in a different byte order than other registers.
153 */
154 static char *dlfb_set_register_16be(char *wrptr, u8 reg, u16 value)
155 {
156 wrptr = dlfb_set_register(wrptr, reg, value);
157 return dlfb_set_register(wrptr, reg+1, value >> 8);
158 }
159
160 /*
161 * LFSR is linear feedback shift register. The reason we have this is
162 * because the display controller needs to minimize the clock depth of
163 * various counters used in the display path. So this code reverses the
164 * provided value into the lfsr16 value by counting backwards to get
165 * the value that needs to be set in the hardware comparator to get the
166 * same actual count. This makes sense once you read above a couple of
167 * times and think about it from a hardware perspective.
168 */
169 static u16 dlfb_lfsr16(u16 actual_count)
170 {
171 u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */
172
173 while (actual_count--) {
174 lv = ((lv << 1) |
175 (((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1))
176 & 0xFFFF;
177 }
178
179 return (u16) lv;
180 }
181
182 /*
183 * This does LFSR conversion on the value that is to be written.
184 * See LFSR explanation above for more detail.
185 */
186 static char *dlfb_set_register_lfsr16(char *wrptr, u8 reg, u16 value)
187 {
188 return dlfb_set_register_16(wrptr, reg, dlfb_lfsr16(value));
189 }
190
191 /*
192 * This takes a standard fbdev screeninfo struct and all of its monitor mode
193 * details and converts them into the DisplayLink equivalent register commands.
194 */
195 static char *dlfb_set_vid_cmds(char *wrptr, struct fb_var_screeninfo *var)
196 {
197 u16 xds, yds;
198 u16 xde, yde;
199 u16 yec;
200
201 /* x display start */
202 xds = var->left_margin + var->hsync_len;
203 wrptr = dlfb_set_register_lfsr16(wrptr, 0x01, xds);
204 /* x display end */
205 xde = xds + var->xres;
206 wrptr = dlfb_set_register_lfsr16(wrptr, 0x03, xde);
207
208 /* y display start */
209 yds = var->upper_margin + var->vsync_len;
210 wrptr = dlfb_set_register_lfsr16(wrptr, 0x05, yds);
211 /* y display end */
212 yde = yds + var->yres;
213 wrptr = dlfb_set_register_lfsr16(wrptr, 0x07, yde);
214
215 /* x end count is active + blanking - 1 */
216 wrptr = dlfb_set_register_lfsr16(wrptr, 0x09,
217 xde + var->right_margin - 1);
218
219 /* libdlo hardcodes hsync start to 1 */
220 wrptr = dlfb_set_register_lfsr16(wrptr, 0x0B, 1);
221
222 /* hsync end is width of sync pulse + 1 */
223 wrptr = dlfb_set_register_lfsr16(wrptr, 0x0D, var->hsync_len + 1);
224
225 /* hpixels is active pixels */
226 wrptr = dlfb_set_register_16(wrptr, 0x0F, var->xres);
227
228 /* yendcount is vertical active + vertical blanking */
229 yec = var->yres + var->upper_margin + var->lower_margin +
230 var->vsync_len;
231 wrptr = dlfb_set_register_lfsr16(wrptr, 0x11, yec);
232
233 /* libdlo hardcodes vsync start to 0 */
234 wrptr = dlfb_set_register_lfsr16(wrptr, 0x13, 0);
235
236 /* vsync end is width of vsync pulse */
237 wrptr = dlfb_set_register_lfsr16(wrptr, 0x15, var->vsync_len);
238
239 /* vpixels is active pixels */
240 wrptr = dlfb_set_register_16(wrptr, 0x17, var->yres);
241
242 /* convert picoseconds to 5kHz multiple for pclk5k = x * 1E12/5k */
243 wrptr = dlfb_set_register_16be(wrptr, 0x1B,
244 200*1000*1000/var->pixclock);
245
246 return wrptr;
247 }
248
249 /*
250 * This takes a standard fbdev screeninfo struct that was fetched or prepared
251 * and then generates the appropriate command sequence that then drives the
252 * display controller.
253 */
254 static int dlfb_set_video_mode(struct dlfb_data *dev,
255 struct fb_var_screeninfo *var)
256 {
257 char *buf;
258 char *wrptr;
259 int retval = 0;
260 int writesize;
261 struct urb *urb;
262
263 if (!atomic_read(&dev->usb_active))
264 return -EPERM;
265
266 urb = dlfb_get_urb(dev);
267 if (!urb)
268 return -ENOMEM;
269 buf = (char *) urb->transfer_buffer;
270
271 /*
272 * This first section has to do with setting the base address on the
273 * controller * associated with the display. There are 2 base
274 * pointers, currently, we only * use the 16 bpp segment.
275 */
276 wrptr = dlfb_vidreg_lock(buf);
277 wrptr = dlfb_set_color_depth(wrptr, 0x00);
278 /* set base for 16bpp segment to 0 */
279 wrptr = dlfb_set_base16bpp(wrptr, 0);
280 /* set base for 8bpp segment to end of fb */
281 wrptr = dlfb_set_base8bpp(wrptr, dev->info->fix.smem_len);
282
283 wrptr = dlfb_set_vid_cmds(wrptr, var);
284 wrptr = dlfb_enable_hvsync(wrptr, true);
285 wrptr = dlfb_vidreg_unlock(wrptr);
286
287 writesize = wrptr - buf;
288
289 retval = dlfb_submit_urb(dev, urb, writesize);
290
291 return retval;
292 }
293
294 static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
295 {
296 unsigned long start = vma->vm_start;
297 unsigned long size = vma->vm_end - vma->vm_start;
298 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
299 unsigned long page, pos;
300 struct dlfb_data *dev = info->par;
301
302 dl_notice("MMAP: %lu %u\n", offset + size, info->fix.smem_len);
303
304 if (offset + size > info->fix.smem_len)
305 return -EINVAL;
306
307 pos = (unsigned long)info->fix.smem_start + offset;
308
309 while (size > 0) {
310 page = vmalloc_to_pfn((void *)pos);
311 if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
312 return -EAGAIN;
313
314 start += PAGE_SIZE;
315 pos += PAGE_SIZE;
316 if (size > PAGE_SIZE)
317 size -= PAGE_SIZE;
318 else
319 size = 0;
320 }
321
322 vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
323 return 0;
324
325 }
326
327 /*
328 * Trims identical data from front and back of line
329 * Sets new front buffer address and width
330 * And returns byte count of identical pixels
331 * Assumes CPU natural alignment (unsigned long)
332 * for back and front buffer ptrs and width
333 */
334 static int dlfb_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
335 {
336 int j, k;
337 const unsigned long *back = (const unsigned long *) bback;
338 const unsigned long *front = (const unsigned long *) *bfront;
339 const int width = *width_bytes / sizeof(unsigned long);
340 int identical = width;
341 int start = width;
342 int end = width;
343
344 prefetch((void *) front);
345 prefetch((void *) back);
346
347 for (j = 0; j < width; j++) {
348 if (back[j] != front[j]) {
349 start = j;
350 break;
351 }
352 }
353
354 for (k = width - 1; k > j; k--) {
355 if (back[k] != front[k]) {
356 end = k+1;
357 break;
358 }
359 }
360
361 identical = start + (width - end);
362 *bfront = (u8 *) &front[start];
363 *width_bytes = (end - start) * sizeof(unsigned long);
364
365 return identical * sizeof(unsigned long);
366 }
367
368 /*
369 Render a command stream for an encoded horizontal line segment of pixels.
370
371 A command buffer holds several commands.
372 It always begins with a fresh command header
373 (the protocol doesn't require this, but we enforce it to allow
374 multiple buffers to be potentially encoded and sent in parallel).
375 A single command encodes one contiguous horizontal line of pixels
376
377 The function relies on the client to do all allocation, so that
378 rendering can be done directly to output buffers (e.g. USB URBs).
379 The function fills the supplied command buffer, providing information
380 on where it left off, so the client may call in again with additional
381 buffers if the line will take several buffers to complete.
382
383 A single command can transmit a maximum of 256 pixels,
384 regardless of the compression ratio (protocol design limit).
385 To the hardware, 0 for a size byte means 256
386
387 Rather than 256 pixel commands which are either rl or raw encoded,
388 the rlx command simply assumes alternating raw and rl spans within one cmd.
389 This has a slightly larger header overhead, but produces more even results.
390 It also processes all data (read and write) in a single pass.
391 Performance benchmarks of common cases show it having just slightly better
392 compression than 256 pixel raw -or- rle commands, with similar CPU consumpion.
393 But for very rl friendly data, will compress not quite as well.
394 */
395 static void dlfb_compress_hline(
396 const uint16_t **pixel_start_ptr,
397 const uint16_t *const pixel_end,
398 uint32_t *device_address_ptr,
399 uint8_t **command_buffer_ptr,
400 const uint8_t *const cmd_buffer_end)
401 {
402 const uint16_t *pixel = *pixel_start_ptr;
403 uint32_t dev_addr = *device_address_ptr;
404 uint8_t *cmd = *command_buffer_ptr;
405 const int bpp = 2;
406
407 while ((pixel_end > pixel) &&
408 (cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) {
409 uint8_t *raw_pixels_count_byte = 0;
410 uint8_t *cmd_pixels_count_byte = 0;
411 const uint16_t *raw_pixel_start = 0;
412 const uint16_t *cmd_pixel_start, *cmd_pixel_end = 0;
413 const uint32_t be_dev_addr = cpu_to_be32(dev_addr);
414
415 prefetchw((void *) cmd); /* pull in one cache line at least */
416
417 *cmd++ = 0xAF;
418 *cmd++ = 0x6B;
419 *cmd++ = (uint8_t) ((be_dev_addr >> 8) & 0xFF);
420 *cmd++ = (uint8_t) ((be_dev_addr >> 16) & 0xFF);
421 *cmd++ = (uint8_t) ((be_dev_addr >> 24) & 0xFF);
422
423 cmd_pixels_count_byte = cmd++; /* we'll know this later */
424 cmd_pixel_start = pixel;
425
426 raw_pixels_count_byte = cmd++; /* we'll know this later */
427 raw_pixel_start = pixel;
428
429 cmd_pixel_end = pixel + min(MAX_CMD_PIXELS + 1,
430 min((int)(pixel_end - pixel),
431 (int)(cmd_buffer_end - cmd) / bpp));
432
433 prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
434
435 while (pixel < cmd_pixel_end) {
436 const uint16_t * const repeating_pixel = pixel;
437
438 *(uint16_t *)cmd = cpu_to_be16p(pixel);
439 cmd += 2;
440 pixel++;
441
442 if (unlikely((pixel < cmd_pixel_end) &&
443 (*pixel == *repeating_pixel))) {
444 /* go back and fill in raw pixel count */
445 *raw_pixels_count_byte = ((repeating_pixel -
446 raw_pixel_start) + 1) & 0xFF;
447
448 while ((pixel < cmd_pixel_end)
449 && (*pixel == *repeating_pixel)) {
450 pixel++;
451 }
452
453 /* immediately after raw data is repeat byte */
454 *cmd++ = ((pixel - repeating_pixel) - 1) & 0xFF;
455
456 /* Then start another raw pixel span */
457 raw_pixel_start = pixel;
458 raw_pixels_count_byte = cmd++;
459 }
460 }
461
462 if (pixel > raw_pixel_start) {
463 /* finalize last RAW span */
464 *raw_pixels_count_byte = (pixel-raw_pixel_start) & 0xFF;
465 }
466
467 *cmd_pixels_count_byte = (pixel - cmd_pixel_start) & 0xFF;
468 dev_addr += (pixel - cmd_pixel_start) * bpp;
469 }
470
471 if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
472 /* Fill leftover bytes with no-ops */
473 if (cmd_buffer_end > cmd)
474 memset(cmd, 0xAF, cmd_buffer_end - cmd);
475 cmd = (uint8_t *) cmd_buffer_end;
476 }
477
478 *command_buffer_ptr = cmd;
479 *pixel_start_ptr = pixel;
480 *device_address_ptr = dev_addr;
481
482 return;
483 }
484
485 /*
486 * There are 3 copies of every pixel: The front buffer that the fbdev
487 * client renders to, the actual framebuffer across the USB bus in hardware
488 * (that we can only write to, slowly, and can never read), and (optionally)
489 * our shadow copy that tracks what's been sent to that hardware buffer.
490 */
491 static void dlfb_render_hline(struct dlfb_data *dev, struct urb **urb_ptr,
492 const char *front, char **urb_buf_ptr,
493 u32 byte_offset, u32 byte_width,
494 int *ident_ptr, int *sent_ptr)
495 {
496 const u8 *line_start, *line_end, *next_pixel;
497 u32 dev_addr = dev->base16 + byte_offset;
498 struct urb *urb = *urb_ptr;
499 u8 *cmd = *urb_buf_ptr;
500 u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
501
502 line_start = (u8 *) (front + byte_offset);
503 next_pixel = line_start;
504 line_end = next_pixel + byte_width;
505
506 if (dev->backing_buffer) {
507 int offset;
508 const u8 *back_start = (u8 *) (dev->backing_buffer
509 + byte_offset);
510
511 *ident_ptr += dlfb_trim_hline(back_start, &next_pixel,
512 &byte_width);
513
514 offset = next_pixel - line_start;
515 line_end = next_pixel + byte_width;
516 dev_addr += offset;
517 back_start += offset;
518 line_start += offset;
519
520 memcpy((char *)back_start, (char *) line_start,
521 byte_width);
522 }
523
524 while (next_pixel < line_end) {
525
526 dlfb_compress_hline((const uint16_t **) &next_pixel,
527 (const uint16_t *) line_end, &dev_addr,
528 (u8 **) &cmd, (u8 *) cmd_end);
529
530 if (cmd >= cmd_end) {
531 int len = cmd - (u8 *) urb->transfer_buffer;
532 if (dlfb_submit_urb(dev, urb, len))
533 return; /* lost pixels is set */
534 *sent_ptr += len;
535 urb = dlfb_get_urb(dev);
536 if (!urb)
537 return; /* lost_pixels is set */
538 *urb_ptr = urb;
539 cmd = urb->transfer_buffer;
540 cmd_end = &cmd[urb->transfer_buffer_length];
541 }
542 }
543
544 *urb_buf_ptr = cmd;
545 }
546
547 int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
548 int width, int height, char *data)
549 {
550 int i, ret;
551 char *cmd;
552 cycles_t start_cycles, end_cycles;
553 int bytes_sent = 0;
554 int bytes_identical = 0;
555 struct urb *urb;
556 int aligned_x;
557
558 start_cycles = get_cycles();
559
560 aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
561 width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
562 x = aligned_x;
563
564 if ((width <= 0) ||
565 (x + width > dev->info->var.xres) ||
566 (y + height > dev->info->var.yres))
567 return -EINVAL;
568
569 if (!atomic_read(&dev->usb_active))
570 return 0;
571
572 urb = dlfb_get_urb(dev);
573 if (!urb)
574 return 0;
575 cmd = urb->transfer_buffer;
576
577 for (i = y; i < y + height ; i++) {
578 const int line_offset = dev->info->fix.line_length * i;
579 const int byte_offset = line_offset + (x * BPP);
580
581 dlfb_render_hline(dev, &urb, (char *) dev->info->fix.smem_start,
582 &cmd, byte_offset, width * BPP,
583 &bytes_identical, &bytes_sent);
584 }
585
586 if (cmd > (char *) urb->transfer_buffer) {
587 /* Send partial buffer remaining before exiting */
588 int len = cmd - (char *) urb->transfer_buffer;
589 ret = dlfb_submit_urb(dev, urb, len);
590 bytes_sent += len;
591 } else
592 dlfb_urb_completion(urb);
593
594 atomic_add(bytes_sent, &dev->bytes_sent);
595 atomic_add(bytes_identical, &dev->bytes_identical);
596 atomic_add(width*height*2, &dev->bytes_rendered);
597 end_cycles = get_cycles();
598 atomic_add(((unsigned int) ((end_cycles - start_cycles)
599 >> 10)), /* Kcycles */
600 &dev->cpu_kcycles_used);
601
602 return 0;
603 }
604
605 /* hardware has native COPY command (see libdlo), but not worth it for fbcon */
606 static void dlfb_ops_copyarea(struct fb_info *info,
607 const struct fb_copyarea *area)
608 {
609
610 struct dlfb_data *dev = info->par;
611
612 #if defined CONFIG_FB_SYS_COPYAREA || defined CONFIG_FB_SYS_COPYAREA_MODULE
613
614 sys_copyarea(info, area);
615
616 dlfb_handle_damage(dev, area->dx, area->dy,
617 area->width, area->height, info->screen_base);
618 #endif
619 atomic_inc(&dev->copy_count);
620
621 }
622
623 static void dlfb_ops_imageblit(struct fb_info *info,
624 const struct fb_image *image)
625 {
626 struct dlfb_data *dev = info->par;
627
628 #if defined CONFIG_FB_SYS_IMAGEBLIT || defined CONFIG_FB_SYS_IMAGEBLIT_MODULE
629
630 sys_imageblit(info, image);
631
632 dlfb_handle_damage(dev, image->dx, image->dy,
633 image->width, image->height, info->screen_base);
634
635 #endif
636
637 atomic_inc(&dev->blit_count);
638 }
639
640 static void dlfb_ops_fillrect(struct fb_info *info,
641 const struct fb_fillrect *rect)
642 {
643 struct dlfb_data *dev = info->par;
644
645 #if defined CONFIG_FB_SYS_FILLRECT || defined CONFIG_FB_SYS_FILLRECT_MODULE
646
647 sys_fillrect(info, rect);
648
649 dlfb_handle_damage(dev, rect->dx, rect->dy, rect->width,
650 rect->height, info->screen_base);
651 #endif
652
653 atomic_inc(&dev->fill_count);
654
655 }
656
657 static void dlfb_get_edid(struct dlfb_data *dev)
658 {
659 int i;
660 int ret;
661 char rbuf[2];
662
663 for (i = 0; i < sizeof(dev->edid); i++) {
664 ret = usb_control_msg(dev->udev,
665 usb_rcvctrlpipe(dev->udev, 0), (0x02),
666 (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
667 0);
668 dev->edid[i] = rbuf[1];
669 }
670 }
671
672 static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
673 unsigned long arg)
674 {
675
676 struct dlfb_data *dev = info->par;
677 struct dloarea *area = NULL;
678
679 if (!atomic_read(&dev->usb_active))
680 return 0;
681
682 /* TODO: Update X server to get this from sysfs instead */
683 if (cmd == DLFB_IOCTL_RETURN_EDID) {
684 char *edid = (char *)arg;
685 dlfb_get_edid(dev);
686 if (copy_to_user(edid, dev->edid, sizeof(dev->edid)))
687 return -EFAULT;
688 return 0;
689 }
690
691 /* TODO: Help propose a standard fb.h ioctl to report mmap damage */
692 if (cmd == DLFB_IOCTL_REPORT_DAMAGE) {
693
694 area = (struct dloarea *)arg;
695
696 if (area->x < 0)
697 area->x = 0;
698
699 if (area->x > info->var.xres)
700 area->x = info->var.xres;
701
702 if (area->y < 0)
703 area->y = 0;
704
705 if (area->y > info->var.yres)
706 area->y = info->var.yres;
707
708 atomic_set(&dev->use_defio, 0);
709
710 dlfb_handle_damage(dev, area->x, area->y, area->w, area->h,
711 info->screen_base);
712 atomic_inc(&dev->damage_count);
713 }
714
715 return 0;
716 }
717
718 /* taken from vesafb */
719 static int
720 dlfb_ops_setcolreg(unsigned regno, unsigned red, unsigned green,
721 unsigned blue, unsigned transp, struct fb_info *info)
722 {
723 int err = 0;
724
725 if (regno >= info->cmap.len)
726 return 1;
727
728 if (regno < 16) {
729 if (info->var.red.offset == 10) {
730 /* 1:5:5:5 */
731 ((u32 *) (info->pseudo_palette))[regno] =
732 ((red & 0xf800) >> 1) |
733 ((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11);
734 } else {
735 /* 0:5:6:5 */
736 ((u32 *) (info->pseudo_palette))[regno] =
737 ((red & 0xf800)) |
738 ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11);
739 }
740 }
741
742 return err;
743 }
744
745 /*
746 * It's common for several clients to have framebuffer open simultaneously.
747 * e.g. both fbcon and X. Makes things interesting.
748 */
749 static int dlfb_ops_open(struct fb_info *info, int user)
750 {
751 struct dlfb_data *dev = info->par;
752
753 /* if (user == 0)
754 * We could special case kernel mode clients (fbcon) here
755 */
756
757 mutex_lock(&dev->fb_open_lock);
758
759 dev->fb_count++;
760
761 #ifdef CONFIG_FB_DEFERRED_IO
762 if ((atomic_read(&dev->use_defio)) && (info->fbdefio == NULL)) {
763 /* enable defio */
764 info->fbdefio = &dlfb_defio;
765 fb_deferred_io_init(info);
766 }
767 #endif
768
769 dl_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
770 info->node, user, info, dev->fb_count);
771
772 mutex_unlock(&dev->fb_open_lock);
773
774 return 0;
775 }
776
777 static int dlfb_ops_release(struct fb_info *info, int user)
778 {
779 struct dlfb_data *dev = info->par;
780
781 mutex_lock(&dev->fb_open_lock);
782
783 dev->fb_count--;
784
785 #ifdef CONFIG_FB_DEFERRED_IO
786 if ((dev->fb_count == 0) && (info->fbdefio)) {
787 fb_deferred_io_cleanup(info);
788 info->fbdefio = NULL;
789 info->fbops->fb_mmap = dlfb_ops_mmap;
790 }
791 #endif
792
793 dl_notice("release /dev/fb%d user=%d count=%d\n",
794 info->node, user, dev->fb_count);
795
796 mutex_unlock(&dev->fb_open_lock);
797
798 return 0;
799 }
800
801 /*
802 * Called when all client interfaces to start transactions have been disabled,
803 * and all references to our device instance (dlfb_data) are released.
804 * Every transaction must have a reference, so we know are fully spun down
805 */
806 static void dlfb_delete(struct kref *kref)
807 {
808 struct dlfb_data *dev = container_of(kref, struct dlfb_data, kref);
809
810 if (dev->backing_buffer)
811 vfree(dev->backing_buffer);
812
813 mutex_destroy(&dev->fb_open_lock);
814
815 kfree(dev);
816 }
817
818 /*
819 * Called by fbdev as last part of unregister_framebuffer() process
820 * No new clients can open connections. Deallocate everything fb_info.
821 */
822 static void dlfb_ops_destroy(struct fb_info *info)
823 {
824 struct dlfb_data *dev = info->par;
825
826 if (info->cmap.len != 0)
827 fb_dealloc_cmap(&info->cmap);
828 if (info->monspecs.modedb)
829 fb_destroy_modedb(info->monspecs.modedb);
830 if (info->screen_base)
831 vfree(info->screen_base);
832
833 fb_destroy_modelist(&info->modelist);
834
835 framebuffer_release(info);
836
837 /* ref taken before register_framebuffer() for dlfb_data clients */
838 kref_put(&dev->kref, dlfb_delete);
839 }
840
841 /*
842 * Check whether a video mode is supported by the DisplayLink chip
843 * We start from monitor's modes, so don't need to filter that here
844 */
845 static int dlfb_is_valid_mode(struct fb_videomode *mode,
846 struct fb_info *info)
847 {
848 struct dlfb_data *dev = info->par;
849
850 if (mode->xres * mode->yres > dev->sku_pixel_limit)
851 return 0;
852
853 return 1;
854 }
855
856 static void dlfb_var_color_format(struct fb_var_screeninfo *var)
857 {
858 const struct fb_bitfield red = { 11, 5, 0 };
859 const struct fb_bitfield green = { 5, 6, 0 };
860 const struct fb_bitfield blue = { 0, 5, 0 };
861
862 var->bits_per_pixel = 16;
863 var->red = red;
864 var->green = green;
865 var->blue = blue;
866 }
867
868 static int dlfb_ops_check_var(struct fb_var_screeninfo *var,
869 struct fb_info *info)
870 {
871 struct fb_videomode mode;
872
873 /* TODO: support dynamically changing framebuffer size */
874 if ((var->xres * var->yres * 2) > info->fix.smem_len)
875 return -EINVAL;
876
877 /* set device-specific elements of var unrelated to mode */
878 dlfb_var_color_format(var);
879
880 fb_var_to_videomode(&mode, var);
881
882 if (!dlfb_is_valid_mode(&mode, info))
883 return -EINVAL;
884
885 return 0;
886 }
887
888 static int dlfb_ops_set_par(struct fb_info *info)
889 {
890 struct dlfb_data *dev = info->par;
891
892 dl_notice("set_par mode %dx%d\n", info->var.xres, info->var.yres);
893
894 return dlfb_set_video_mode(dev, &info->var);
895 }
896
897 static int dlfb_ops_blank(int blank_mode, struct fb_info *info)
898 {
899 struct dlfb_data *dev = info->par;
900 char *bufptr;
901 struct urb *urb;
902
903 urb = dlfb_get_urb(dev);
904 if (!urb)
905 return 0;
906 bufptr = (char *) urb->transfer_buffer;
907
908 /* overloading usb_active. UNBLANK can conflict with teardown */
909
910 bufptr = dlfb_vidreg_lock(bufptr);
911 if (blank_mode != FB_BLANK_UNBLANK) {
912 atomic_set(&dev->usb_active, 0);
913 bufptr = dlfb_enable_hvsync(bufptr, false);
914 } else {
915 atomic_set(&dev->usb_active, 1);
916 bufptr = dlfb_enable_hvsync(bufptr, true);
917 }
918 bufptr = dlfb_vidreg_unlock(bufptr);
919
920 dlfb_submit_urb(dev, urb, bufptr - (char *) urb->transfer_buffer);
921
922 return 0;
923 }
924
925 static struct fb_ops dlfb_ops = {
926 .owner = THIS_MODULE,
927 .fb_setcolreg = dlfb_ops_setcolreg,
928 .fb_fillrect = dlfb_ops_fillrect,
929 .fb_copyarea = dlfb_ops_copyarea,
930 .fb_imageblit = dlfb_ops_imageblit,
931 .fb_mmap = dlfb_ops_mmap,
932 .fb_ioctl = dlfb_ops_ioctl,
933 .fb_open = dlfb_ops_open,
934 .fb_release = dlfb_ops_release,
935 .fb_blank = dlfb_ops_blank,
936 .fb_check_var = dlfb_ops_check_var,
937 .fb_set_par = dlfb_ops_set_par,
938 };
939
940 /*
941 * Calls dlfb_get_edid() to query the EDID of attached monitor via usb cmds
942 * Then parses EDID into three places used by various parts of fbdev:
943 * fb_var_screeninfo contains the timing of the monitor's preferred mode
944 * fb_info.monspecs is full parsed EDID info, including monspecs.modedb
945 * fb_info.modelist is a linked list of all monitor & VESA modes which work
946 *
947 * If EDID is not readable/valid, then modelist is all VESA modes,
948 * monspecs is NULL, and fb_var_screeninfo is set to safe VESA mode
949 * Returns 0 if EDID parses successfully
950 */
951 static int dlfb_parse_edid(struct dlfb_data *dev,
952 struct fb_var_screeninfo *var,
953 struct fb_info *info)
954 {
955 int i;
956 const struct fb_videomode *default_vmode = NULL;
957 int result = 0;
958
959 fb_destroy_modelist(&info->modelist);
960 memset(&info->monspecs, 0, sizeof(info->monspecs));
961
962 dlfb_get_edid(dev);
963 fb_edid_to_monspecs(dev->edid, &info->monspecs);
964
965 if (info->monspecs.modedb_len > 0) {
966
967 for (i = 0; i < info->monspecs.modedb_len; i++) {
968 if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info))
969 fb_add_videomode(&info->monspecs.modedb[i],
970 &info->modelist);
971 }
972
973 default_vmode = fb_find_best_display(&info->monspecs,
974 &info->modelist);
975 } else {
976 struct fb_videomode fb_vmode = {0};
977
978 dl_err("Unable to get valid EDID from device/display\n");
979 result = 1;
980
981 /*
982 * Add the standard VESA modes to our modelist
983 * Since we don't have EDID, there may be modes that
984 * overspec monitor and/or are incorrect aspect ratio, etc.
985 * But at least the user has a chance to choose
986 */
987 for (i = 0; i < VESA_MODEDB_SIZE; i++) {
988 if (dlfb_is_valid_mode((struct fb_videomode *)
989 &vesa_modes[i], info))
990 fb_add_videomode(&vesa_modes[i],
991 &info->modelist);
992 }
993
994 /*
995 * default to resolution safe for projectors
996 * (since they are most common case without EDID)
997 */
998 fb_vmode.xres = 800;
999 fb_vmode.yres = 600;
1000 fb_vmode.refresh = 60;
1001 default_vmode = fb_find_nearest_mode(&fb_vmode,
1002 &info->modelist);
1003 }
1004
1005 fb_videomode_to_var(var, default_vmode);
1006 dlfb_var_color_format(var);
1007
1008 return result;
1009 }
1010
1011 static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
1012 struct device_attribute *a, char *buf) {
1013 struct fb_info *fb_info = dev_get_drvdata(fbdev);
1014 struct dlfb_data *dev = fb_info->par;
1015 return snprintf(buf, PAGE_SIZE, "%u\n",
1016 atomic_read(&dev->bytes_rendered));
1017 }
1018
1019 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
1020 struct device_attribute *a, char *buf) {
1021 struct fb_info *fb_info = dev_get_drvdata(fbdev);
1022 struct dlfb_data *dev = fb_info->par;
1023 return snprintf(buf, PAGE_SIZE, "%u\n",
1024 atomic_read(&dev->bytes_identical));
1025 }
1026
1027 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
1028 struct device_attribute *a, char *buf) {
1029 struct fb_info *fb_info = dev_get_drvdata(fbdev);
1030 struct dlfb_data *dev = fb_info->par;
1031 return snprintf(buf, PAGE_SIZE, "%u\n",
1032 atomic_read(&dev->bytes_sent));
1033 }
1034
1035 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
1036 struct device_attribute *a, char *buf) {
1037 struct fb_info *fb_info = dev_get_drvdata(fbdev);
1038 struct dlfb_data *dev = fb_info->par;
1039 return snprintf(buf, PAGE_SIZE, "%u\n",
1040 atomic_read(&dev->cpu_kcycles_used));
1041 }
1042
1043 static ssize_t metrics_misc_show(struct device *fbdev,
1044 struct device_attribute *a, char *buf) {
1045 struct fb_info *fb_info = dev_get_drvdata(fbdev);
1046 struct dlfb_data *dev = fb_info->par;
1047 return snprintf(buf, PAGE_SIZE,
1048 "Calls to\ndamage: %u\nblit: %u\n"
1049 "defio faults: %u\ncopy: %u\n"
1050 "fill: %u\n\n"
1051 "active framebuffer clients: %d\n"
1052 "urbs available %d(%d)\n"
1053 "Shadow framebuffer in use? %s\n"
1054 "Any lost pixels? %s\n",
1055 atomic_read(&dev->damage_count),
1056 atomic_read(&dev->blit_count),
1057 atomic_read(&dev->defio_fault_count),
1058 atomic_read(&dev->copy_count),
1059 atomic_read(&dev->fill_count),
1060 dev->fb_count,
1061 dev->urbs.available, dev->urbs.limit_sem.count,
1062 (dev->backing_buffer) ? "yes" : "no",
1063 atomic_read(&dev->lost_pixels) ? "yes" : "no");
1064 }
1065
1066 static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *a,
1067 char *buf, loff_t off, size_t count) {
1068 struct device *fbdev = container_of(kobj, struct device, kobj);
1069 struct fb_info *fb_info = dev_get_drvdata(fbdev);
1070 struct dlfb_data *dev = fb_info->par;
1071 char *edid = &dev->edid[0];
1072 const size_t size = sizeof(dev->edid);
1073
1074 if (dlfb_parse_edid(dev, &fb_info->var, fb_info))
1075 return 0;
1076
1077 if (off >= size)
1078 return 0;
1079
1080 if (off + count > size)
1081 count = size - off;
1082 memcpy(buf, edid + off, count);
1083
1084 return count;
1085 }
1086
1087
1088 static ssize_t metrics_reset_store(struct device *fbdev,
1089 struct device_attribute *attr,
1090 const char *buf, size_t count)
1091 {
1092 struct fb_info *fb_info = dev_get_drvdata(fbdev);
1093 struct dlfb_data *dev = fb_info->par;
1094
1095 atomic_set(&dev->bytes_rendered, 0);
1096 atomic_set(&dev->bytes_identical, 0);
1097 atomic_set(&dev->bytes_sent, 0);
1098 atomic_set(&dev->cpu_kcycles_used, 0);
1099 atomic_set(&dev->blit_count, 0);
1100 atomic_set(&dev->copy_count, 0);
1101 atomic_set(&dev->fill_count, 0);
1102 atomic_set(&dev->defio_fault_count, 0);
1103 atomic_set(&dev->damage_count, 0);
1104
1105 return count;
1106 }
1107
1108 static ssize_t use_defio_show(struct device *fbdev,
1109 struct device_attribute *a, char *buf) {
1110 struct fb_info *fb_info = dev_get_drvdata(fbdev);
1111 struct dlfb_data *dev = fb_info->par;
1112 return snprintf(buf, PAGE_SIZE, "%d\n",
1113 atomic_read(&dev->use_defio));
1114 }
1115
1116 static ssize_t use_defio_store(struct device *fbdev,
1117 struct device_attribute *attr,
1118 const char *buf, size_t count)
1119 {
1120 struct fb_info *fb_info = dev_get_drvdata(fbdev);
1121 struct dlfb_data *dev = fb_info->par;
1122
1123 if (count > 0) {
1124 if (buf[0] == '0')
1125 atomic_set(&dev->use_defio, 0);
1126 if (buf[0] == '1')
1127 atomic_set(&dev->use_defio, 1);
1128 }
1129 return count;
1130 }
1131
1132 static struct bin_attribute edid_attr = {
1133 .attr.name = "edid",
1134 .attr.mode = 0444,
1135 .size = 128,
1136 .read = edid_show,
1137 };
1138
1139 static struct device_attribute fb_device_attrs[] = {
1140 __ATTR_RO(metrics_bytes_rendered),
1141 __ATTR_RO(metrics_bytes_identical),
1142 __ATTR_RO(metrics_bytes_sent),
1143 __ATTR_RO(metrics_cpu_kcycles_used),
1144 __ATTR_RO(metrics_misc),
1145 __ATTR(metrics_reset, S_IWUGO, NULL, metrics_reset_store),
1146 __ATTR_RW(use_defio),
1147 };
1148
1149 #ifdef CONFIG_FB_DEFERRED_IO
1150 static void dlfb_dpy_deferred_io(struct fb_info *info,
1151 struct list_head *pagelist)
1152 {
1153 struct page *cur;
1154 struct fb_deferred_io *fbdefio = info->fbdefio;
1155 struct dlfb_data *dev = info->par;
1156 struct urb *urb;
1157 char *cmd;
1158 cycles_t start_cycles, end_cycles;
1159 int bytes_sent = 0;
1160 int bytes_identical = 0;
1161 int bytes_rendered = 0;
1162 int fault_count = 0;
1163
1164 if (!atomic_read(&dev->use_defio))
1165 return;
1166
1167 if (!atomic_read(&dev->usb_active))
1168 return;
1169
1170 start_cycles = get_cycles();
1171
1172 urb = dlfb_get_urb(dev);
1173 if (!urb)
1174 return;
1175 cmd = urb->transfer_buffer;
1176
1177 /* walk the written page list and render each to device */
1178 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
1179 dlfb_render_hline(dev, &urb, (char *) info->fix.smem_start,
1180 &cmd, cur->index << PAGE_SHIFT,
1181 PAGE_SIZE, &bytes_identical, &bytes_sent);
1182 bytes_rendered += PAGE_SIZE;
1183 fault_count++;
1184 }
1185
1186 if (cmd > (char *) urb->transfer_buffer) {
1187 /* Send partial buffer remaining before exiting */
1188 int len = cmd - (char *) urb->transfer_buffer;
1189 dlfb_submit_urb(dev, urb, len);
1190 bytes_sent += len;
1191 } else
1192 dlfb_urb_completion(urb);
1193
1194 atomic_add(fault_count, &dev->defio_fault_count);
1195 atomic_add(bytes_sent, &dev->bytes_sent);
1196 atomic_add(bytes_identical, &dev->bytes_identical);
1197 atomic_add(bytes_rendered, &dev->bytes_rendered);
1198 end_cycles = get_cycles();
1199 atomic_add(((unsigned int) ((end_cycles - start_cycles)
1200 >> 10)), /* Kcycles */
1201 &dev->cpu_kcycles_used);
1202 }
1203
1204 static struct fb_deferred_io dlfb_defio = {
1205 .delay = 5,
1206 .deferred_io = dlfb_dpy_deferred_io,
1207 };
1208
1209 #endif
1210
1211 /*
1212 * This is necessary before we can communicate with the display controller.
1213 */
1214 static int dlfb_select_std_channel(struct dlfb_data *dev)
1215 {
1216 int ret;
1217 u8 set_def_chn[] = { 0x57, 0xCD, 0xDC, 0xA7,
1218 0x1C, 0x88, 0x5E, 0x15,
1219 0x60, 0xFE, 0xC6, 0x97,
1220 0x16, 0x3D, 0x47, 0xF2 };
1221
1222 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
1223 NR_USB_REQUEST_CHANNEL,
1224 (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
1225 set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
1226 return ret;
1227 }
1228
1229
1230 static int dlfb_usb_probe(struct usb_interface *interface,
1231 const struct usb_device_id *id)
1232 {
1233 struct usb_device *usbdev;
1234 struct dlfb_data *dev;
1235 struct fb_info *info;
1236 int videomemorysize;
1237 int i;
1238 unsigned char *videomemory;
1239 int retval = -ENOMEM;
1240 struct fb_var_screeninfo *var;
1241 int registered = 0;
1242 u16 *pix_framebuffer;
1243
1244 /* usb initialization */
1245
1246 usbdev = interface_to_usbdev(interface);
1247
1248 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1249 if (dev == NULL) {
1250 err("dlfb_usb_probe: failed alloc of dev struct\n");
1251 goto error;
1252 }
1253
1254 /* we need to wait for both usb and fbdev to spin down on disconnect */
1255 kref_init(&dev->kref); /* matching kref_put in usb .disconnect fn */
1256 kref_get(&dev->kref); /* matching kref_put in .fb_destroy function*/
1257
1258 dev->udev = usbdev;
1259 dev->gdev = &usbdev->dev; /* our generic struct device * */
1260 usb_set_intfdata(interface, dev);
1261
1262 if (!dlfb_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
1263 retval = -ENOMEM;
1264 dl_err("dlfb_alloc_urb_list failed\n");
1265 goto error;
1266 }
1267
1268 mutex_init(&dev->fb_open_lock);
1269
1270 /* We don't register a new USB class. Our client interface is fbdev */
1271
1272 /* allocates framebuffer driver structure, not framebuffer memory */
1273 info = framebuffer_alloc(0, &usbdev->dev);
1274 if (!info) {
1275 retval = -ENOMEM;
1276 dl_err("framebuffer_alloc failed\n");
1277 goto error;
1278 }
1279 dev->info = info;
1280 info->par = dev;
1281 info->pseudo_palette = dev->pseudo_palette;
1282 info->fbops = &dlfb_ops;
1283
1284 var = &info->var;
1285
1286 /* TODO set limit based on actual SKU detection */
1287 dev->sku_pixel_limit = 2048 * 1152;
1288
1289 INIT_LIST_HEAD(&info->modelist);
1290 dlfb_parse_edid(dev, var, info);
1291
1292 /*
1293 * ok, now that we've got the size info, we can alloc our framebuffer.
1294 */
1295 info->fix = dlfb_fix;
1296 info->fix.line_length = var->xres * (var->bits_per_pixel / 8);
1297 videomemorysize = info->fix.line_length * var->yres;
1298
1299 /*
1300 * The big chunk of system memory we use as a virtual framebuffer.
1301 * TODO: Handle fbcon cursor code calling blit in interrupt context
1302 */
1303 videomemory = vmalloc(videomemorysize);
1304 if (!videomemory) {
1305 retval = -ENOMEM;
1306 dl_err("Virtual framebuffer alloc failed\n");
1307 goto error;
1308 }
1309
1310 info->screen_base = videomemory;
1311 info->fix.smem_len = PAGE_ALIGN(videomemorysize);
1312 info->fix.smem_start = (unsigned long) videomemory;
1313 info->flags = udlfb_info_flags;
1314
1315
1316 /*
1317 * Second framebuffer copy, mirroring the state of the framebuffer
1318 * on the physical USB device. We can function without this.
1319 * But with imperfect damage info we may end up sending pixels over USB
1320 * that were, in fact, unchanged -- wasting limited USB bandwidth
1321 */
1322 dev->backing_buffer = vmalloc(videomemorysize);
1323 if (!dev->backing_buffer)
1324 dl_warn("No shadow/backing buffer allcoated\n");
1325 else
1326 memset(dev->backing_buffer, 0, videomemorysize);
1327
1328 retval = fb_alloc_cmap(&info->cmap, 256, 0);
1329 if (retval < 0) {
1330 dl_err("fb_alloc_cmap failed %x\n", retval);
1331 goto error;
1332 }
1333
1334 /* ready to begin using device */
1335
1336 #ifdef CONFIG_FB_DEFERRED_IO
1337 atomic_set(&dev->use_defio, 1);
1338 #endif
1339 atomic_set(&dev->usb_active, 1);
1340 dlfb_select_std_channel(dev);
1341
1342 dlfb_ops_check_var(var, info);
1343 dlfb_ops_set_par(info);
1344
1345 /* paint greenscreen */
1346 pix_framebuffer = (u16 *) videomemory;
1347 for (i = 0; i < videomemorysize / 2; i++)
1348 pix_framebuffer[i] = 0x37e6;
1349
1350 dlfb_handle_damage(dev, 0, 0, info->var.xres, info->var.yres,
1351 videomemory);
1352
1353 retval = register_framebuffer(info);
1354 if (retval < 0) {
1355 dl_err("register_framebuffer failed %d\n", retval);
1356 goto error;
1357 }
1358 registered = 1;
1359
1360 for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
1361 device_create_file(info->dev, &fb_device_attrs[i]);
1362
1363 device_create_bin_file(info->dev, &edid_attr);
1364
1365 dl_err("DisplayLink USB device /dev/fb%d attached. %dx%d resolution."
1366 " Using %dK framebuffer memory\n", info->node,
1367 var->xres, var->yres,
1368 ((dev->backing_buffer) ?
1369 videomemorysize * 2 : videomemorysize) >> 10);
1370 return 0;
1371
1372 error:
1373 if (dev) {
1374 if (registered) {
1375 unregister_framebuffer(info);
1376 dlfb_ops_destroy(info);
1377 } else
1378 kref_put(&dev->kref, dlfb_delete);
1379
1380 if (dev->urbs.count > 0)
1381 dlfb_free_urb_list(dev);
1382 kref_put(&dev->kref, dlfb_delete); /* last ref from kref_init */
1383
1384 /* dev has been deallocated. Do not dereference */
1385 }
1386
1387 return retval;
1388 }
1389
1390 static void dlfb_usb_disconnect(struct usb_interface *interface)
1391 {
1392 struct dlfb_data *dev;
1393 struct fb_info *info;
1394 int i;
1395
1396 dev = usb_get_intfdata(interface);
1397 info = dev->info;
1398
1399 /* when non-active we'll update virtual framebuffer, but no new urbs */
1400 atomic_set(&dev->usb_active, 0);
1401
1402 usb_set_intfdata(interface, NULL);
1403
1404 for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
1405 device_remove_file(info->dev, &fb_device_attrs[i]);
1406
1407 device_remove_bin_file(info->dev, &edid_attr);
1408
1409 /* this function will wait for all in-flight urbs to complete */
1410 dlfb_free_urb_list(dev);
1411
1412 if (info) {
1413 dl_notice("Detaching /dev/fb%d\n", info->node);
1414 unregister_framebuffer(info);
1415 dlfb_ops_destroy(info);
1416 }
1417
1418 /* release reference taken by kref_init in probe() */
1419 kref_put(&dev->kref, dlfb_delete);
1420
1421 /* consider dlfb_data freed */
1422
1423 return;
1424 }
1425
1426 static struct usb_driver dlfb_driver = {
1427 .name = "udlfb",
1428 .probe = dlfb_usb_probe,
1429 .disconnect = dlfb_usb_disconnect,
1430 .id_table = id_table,
1431 };
1432
1433 static int __init dlfb_module_init(void)
1434 {
1435 int res;
1436
1437 res = usb_register(&dlfb_driver);
1438 if (res)
1439 err("usb_register failed. Error number %d", res);
1440
1441 printk("VMODES initialized\n");
1442
1443 return res;
1444 }
1445
1446 static void __exit dlfb_module_exit(void)
1447 {
1448 usb_deregister(&dlfb_driver);
1449 }
1450
1451 module_init(dlfb_module_init);
1452 module_exit(dlfb_module_exit);
1453
1454 static void dlfb_urb_completion(struct urb *urb)
1455 {
1456 struct urb_node *unode = urb->context;
1457 struct dlfb_data *dev = unode->dev;
1458 unsigned long flags;
1459
1460 /* sync/async unlink faults aren't errors */
1461 if (urb->status) {
1462 if (!(urb->status == -ENOENT ||
1463 urb->status == -ECONNRESET ||
1464 urb->status == -ESHUTDOWN)) {
1465 dl_err("%s - nonzero write bulk status received: %d\n",
1466 __func__, urb->status);
1467 atomic_set(&dev->lost_pixels, 1);
1468 }
1469 }
1470
1471 urb->transfer_buffer_length = dev->urbs.size; /* reset to actual */
1472
1473 spin_lock_irqsave(&dev->urbs.lock, flags);
1474 list_add_tail(&unode->entry, &dev->urbs.list);
1475 dev->urbs.available++;
1476 spin_unlock_irqrestore(&dev->urbs.lock, flags);
1477
1478 up(&dev->urbs.limit_sem);
1479 }
1480
1481 static void dlfb_free_urb_list(struct dlfb_data *dev)
1482 {
1483 int count = dev->urbs.count;
1484 struct list_head *node;
1485 struct urb_node *unode;
1486 struct urb *urb;
1487 int ret;
1488 unsigned long flags;
1489
1490 dl_notice("Waiting for completes and freeing all render urbs\n");
1491
1492 /* keep waiting and freeing, until we've got 'em all */
1493 while (count--) {
1494 /* Timeout means a memory leak and/or fault */
1495 ret = down_timeout(&dev->urbs.limit_sem, FREE_URB_TIMEOUT);
1496 if (ret) {
1497 BUG_ON(ret);
1498 break;
1499 }
1500 spin_lock_irqsave(&dev->urbs.lock, flags);
1501
1502 node = dev->urbs.list.next; /* have reserved one with sem */
1503 list_del_init(node);
1504
1505 spin_unlock_irqrestore(&dev->urbs.lock, flags);
1506
1507 unode = list_entry(node, struct urb_node, entry);
1508 urb = unode->urb;
1509
1510 /* Free each separately allocated piece */
1511 usb_buffer_free(urb->dev, dev->urbs.size,
1512 urb->transfer_buffer, urb->transfer_dma);
1513 usb_free_urb(urb);
1514 kfree(node);
1515 }
1516
1517 kref_put(&dev->kref, dlfb_delete);
1518
1519 }
1520
1521 static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size)
1522 {
1523 int i = 0;
1524 struct urb *urb;
1525 struct urb_node *unode;
1526 char *buf;
1527
1528 spin_lock_init(&dev->urbs.lock);
1529
1530 dev->urbs.size = size;
1531 INIT_LIST_HEAD(&dev->urbs.list);
1532
1533 while (i < count) {
1534 unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
1535 if (!unode)
1536 break;
1537 unode->dev = dev;
1538
1539 urb = usb_alloc_urb(0, GFP_KERNEL);
1540 if (!urb) {
1541 kfree(unode);
1542 break;
1543 }
1544 unode->urb = urb;
1545
1546 buf = usb_buffer_alloc(dev->udev, MAX_TRANSFER, GFP_KERNEL,
1547 &urb->transfer_dma);
1548 if (!buf) {
1549 kfree(unode);
1550 usb_free_urb(urb);
1551 break;
1552 }
1553
1554 /* urb->transfer_buffer_length set to actual before submit */
1555 usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 1),
1556 buf, size, dlfb_urb_completion, unode);
1557 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1558
1559 list_add_tail(&unode->entry, &dev->urbs.list);
1560
1561 i++;
1562 }
1563
1564 sema_init(&dev->urbs.limit_sem, i);
1565 dev->urbs.count = i;
1566 dev->urbs.available = i;
1567
1568 kref_get(&dev->kref); /* released in free_render_urbs() */
1569
1570 dl_notice("allocated %d %d byte urbs \n", i, (int) size);
1571
1572 return i;
1573 }
1574
1575 static struct urb *dlfb_get_urb(struct dlfb_data *dev)
1576 {
1577 int ret = 0;
1578 struct list_head *entry;
1579 struct urb_node *unode;
1580 struct urb *urb = NULL;
1581 unsigned long flags;
1582
1583 /* Wait for an in-flight buffer to complete and get re-queued */
1584 ret = down_timeout(&dev->urbs.limit_sem, GET_URB_TIMEOUT);
1585 if (ret) {
1586 atomic_set(&dev->lost_pixels, 1);
1587 dl_err("wait for urb interrupted: %x\n", ret);
1588 goto error;
1589 }
1590
1591 spin_lock_irqsave(&dev->urbs.lock, flags);
1592
1593 BUG_ON(list_empty(&dev->urbs.list)); /* reserved one with limit_sem */
1594 entry = dev->urbs.list.next;
1595 list_del_init(entry);
1596 dev->urbs.available--;
1597
1598 spin_unlock_irqrestore(&dev->urbs.lock, flags);
1599
1600 unode = list_entry(entry, struct urb_node, entry);
1601 urb = unode->urb;
1602
1603 error:
1604 return urb;
1605 }
1606
1607 static int dlfb_submit_urb(struct dlfb_data *dev, struct urb *urb, size_t len)
1608 {
1609 int ret;
1610
1611 BUG_ON(len > dev->urbs.size);
1612
1613 urb->transfer_buffer_length = len; /* set to actual payload len */
1614 ret = usb_submit_urb(urb, GFP_KERNEL);
1615 if (ret) {
1616 dlfb_urb_completion(urb); /* because no one else will */
1617 atomic_set(&dev->lost_pixels, 1);
1618 dl_err("usb_submit_urb error %x\n", ret);
1619 }
1620 return ret;
1621 }
1622
1623 MODULE_AUTHOR("Roberto De Ioris <roberto@unbit.it>, "
1624 "Jaya Kumar <jayakumar.lkml@gmail.com>, "
1625 "Bernie Thompson <bernie@plugable.com>");
1626 MODULE_DESCRIPTION("DisplayLink kernel framebuffer driver");
1627 MODULE_LICENSE("GPL");
1628