1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_kms.h"
30 /* Might need a hrtimer here? */
31 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
34 void vmw_display_unit_cleanup(struct vmw_display_unit
*du
)
36 if (du
->cursor_surface
)
37 vmw_surface_unreference(&du
->cursor_surface
);
38 if (du
->cursor_dmabuf
)
39 vmw_dmabuf_unreference(&du
->cursor_dmabuf
);
40 drm_crtc_cleanup(&du
->crtc
);
41 drm_encoder_cleanup(&du
->encoder
);
42 drm_connector_cleanup(&du
->connector
);
46 * Display Unit Cursor functions
49 int vmw_cursor_update_image(struct vmw_private
*dev_priv
,
50 u32
*image
, u32 width
, u32 height
,
51 u32 hotspotX
, u32 hotspotY
)
55 SVGAFifoCmdDefineAlphaCursor cursor
;
57 u32 image_size
= width
* height
* 4;
58 u32 cmd_size
= sizeof(*cmd
) + image_size
;
63 cmd
= vmw_fifo_reserve(dev_priv
, cmd_size
);
64 if (unlikely(cmd
== NULL
)) {
65 DRM_ERROR("Fifo reserve failed.\n");
69 memset(cmd
, 0, sizeof(*cmd
));
71 memcpy(&cmd
[1], image
, image_size
);
73 cmd
->cmd
= cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR
);
74 cmd
->cursor
.id
= cpu_to_le32(0);
75 cmd
->cursor
.width
= cpu_to_le32(width
);
76 cmd
->cursor
.height
= cpu_to_le32(height
);
77 cmd
->cursor
.hotspotX
= cpu_to_le32(hotspotX
);
78 cmd
->cursor
.hotspotY
= cpu_to_le32(hotspotY
);
80 vmw_fifo_commit(dev_priv
, cmd_size
);
85 void vmw_cursor_update_position(struct vmw_private
*dev_priv
,
86 bool show
, int x
, int y
)
88 __le32 __iomem
*fifo_mem
= dev_priv
->mmio_virt
;
91 iowrite32(show
? 1 : 0, fifo_mem
+ SVGA_FIFO_CURSOR_ON
);
92 iowrite32(x
, fifo_mem
+ SVGA_FIFO_CURSOR_X
);
93 iowrite32(y
, fifo_mem
+ SVGA_FIFO_CURSOR_Y
);
94 count
= ioread32(fifo_mem
+ SVGA_FIFO_CURSOR_COUNT
);
95 iowrite32(++count
, fifo_mem
+ SVGA_FIFO_CURSOR_COUNT
);
98 int vmw_du_crtc_cursor_set(struct drm_crtc
*crtc
, struct drm_file
*file_priv
,
99 uint32_t handle
, uint32_t width
, uint32_t height
)
101 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
102 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
103 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
104 struct vmw_surface
*surface
= NULL
;
105 struct vmw_dma_buffer
*dmabuf
= NULL
;
109 ret
= vmw_user_surface_lookup(dev_priv
, tfile
,
112 if (!surface
->snooper
.image
) {
113 DRM_ERROR("surface not suitable for cursor\n");
117 ret
= vmw_user_dmabuf_lookup(tfile
,
120 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret
);
126 /* takedown old cursor */
127 if (du
->cursor_surface
) {
128 du
->cursor_surface
->snooper
.crtc
= NULL
;
129 vmw_surface_unreference(&du
->cursor_surface
);
131 if (du
->cursor_dmabuf
)
132 vmw_dmabuf_unreference(&du
->cursor_dmabuf
);
134 /* setup new image */
136 /* vmw_user_surface_lookup takes one reference */
137 du
->cursor_surface
= surface
;
139 du
->cursor_surface
->snooper
.crtc
= crtc
;
140 du
->cursor_age
= du
->cursor_surface
->snooper
.age
;
141 vmw_cursor_update_image(dev_priv
, surface
->snooper
.image
,
142 64, 64, du
->hotspot_x
, du
->hotspot_y
);
144 struct ttm_bo_kmap_obj map
;
145 unsigned long kmap_offset
;
146 unsigned long kmap_num
;
150 /* vmw_user_surface_lookup takes one reference */
151 du
->cursor_dmabuf
= dmabuf
;
154 kmap_num
= (64*64*4) >> PAGE_SHIFT
;
156 ret
= ttm_bo_reserve(&dmabuf
->base
, true, false, false, 0);
157 if (unlikely(ret
!= 0)) {
158 DRM_ERROR("reserve failed\n");
162 ret
= ttm_bo_kmap(&dmabuf
->base
, kmap_offset
, kmap_num
, &map
);
163 if (unlikely(ret
!= 0))
166 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
167 vmw_cursor_update_image(dev_priv
, virtual, 64, 64,
168 du
->hotspot_x
, du
->hotspot_y
);
172 ttm_bo_unreserve(&dmabuf
->base
);
175 vmw_cursor_update_position(dev_priv
, false, 0, 0);
179 vmw_cursor_update_position(dev_priv
, true, du
->cursor_x
, du
->cursor_y
);
184 int vmw_du_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
186 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
187 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
188 bool shown
= du
->cursor_surface
|| du
->cursor_dmabuf
? true : false;
190 du
->cursor_x
= x
+ crtc
->x
;
191 du
->cursor_y
= y
+ crtc
->y
;
193 vmw_cursor_update_position(dev_priv
, shown
,
194 du
->cursor_x
, du
->cursor_y
);
199 void vmw_kms_cursor_snoop(struct vmw_surface
*srf
,
200 struct ttm_object_file
*tfile
,
201 struct ttm_buffer_object
*bo
,
202 SVGA3dCmdHeader
*header
)
204 struct ttm_bo_kmap_obj map
;
205 unsigned long kmap_offset
;
206 unsigned long kmap_num
;
212 SVGA3dCmdHeader header
;
213 SVGA3dCmdSurfaceDMA dma
;
217 cmd
= container_of(header
, struct vmw_dma_cmd
, header
);
219 /* No snooper installed */
220 if (!srf
->snooper
.image
)
223 if (cmd
->dma
.host
.face
!= 0 || cmd
->dma
.host
.mipmap
!= 0) {
224 DRM_ERROR("face and mipmap for cursors should never != 0\n");
228 if (cmd
->header
.size
< 64) {
229 DRM_ERROR("at least one full copy box must be given\n");
233 box
= (SVGA3dCopyBox
*)&cmd
[1];
234 box_count
= (cmd
->header
.size
- sizeof(SVGA3dCmdSurfaceDMA
)) /
235 sizeof(SVGA3dCopyBox
);
237 if (cmd
->dma
.guest
.pitch
!= (64 * 4) ||
238 cmd
->dma
.guest
.ptr
.offset
% PAGE_SIZE
||
239 box
->x
!= 0 || box
->y
!= 0 || box
->z
!= 0 ||
240 box
->srcx
!= 0 || box
->srcy
!= 0 || box
->srcz
!= 0 ||
241 box
->w
!= 64 || box
->h
!= 64 || box
->d
!= 1 ||
243 /* TODO handle none page aligned offsets */
244 /* TODO handle partial uploads and pitch != 256 */
245 /* TODO handle more then one copy (size != 64) */
246 DRM_ERROR("lazy programer, cant handle wierd stuff\n");
250 kmap_offset
= cmd
->dma
.guest
.ptr
.offset
>> PAGE_SHIFT
;
251 kmap_num
= (64*64*4) >> PAGE_SHIFT
;
253 ret
= ttm_bo_reserve(bo
, true, false, false, 0);
254 if (unlikely(ret
!= 0)) {
255 DRM_ERROR("reserve failed\n");
259 ret
= ttm_bo_kmap(bo
, kmap_offset
, kmap_num
, &map
);
260 if (unlikely(ret
!= 0))
263 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
265 memcpy(srf
->snooper
.image
, virtual, 64*64*4);
268 /* we can't call this function from this function since execbuf has
269 * reserved fifo space.
271 * if (srf->snooper.crtc)
272 * vmw_ldu_crtc_cursor_update_image(dev_priv,
273 * srf->snooper.image, 64, 64,
274 * du->hotspot_x, du->hotspot_y);
279 ttm_bo_unreserve(bo
);
282 void vmw_kms_cursor_post_execbuf(struct vmw_private
*dev_priv
)
284 struct drm_device
*dev
= dev_priv
->dev
;
285 struct vmw_display_unit
*du
;
286 struct drm_crtc
*crtc
;
288 mutex_lock(&dev
->mode_config
.mutex
);
290 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
291 du
= vmw_crtc_to_du(crtc
);
292 if (!du
->cursor_surface
||
293 du
->cursor_age
== du
->cursor_surface
->snooper
.age
)
296 du
->cursor_age
= du
->cursor_surface
->snooper
.age
;
297 vmw_cursor_update_image(dev_priv
,
298 du
->cursor_surface
->snooper
.image
,
299 64, 64, du
->hotspot_x
, du
->hotspot_y
);
302 mutex_unlock(&dev
->mode_config
.mutex
);
306 * Generic framebuffer code
309 int vmw_framebuffer_create_handle(struct drm_framebuffer
*fb
,
310 struct drm_file
*file_priv
,
311 unsigned int *handle
)
320 * Surface framebuffer code
323 #define vmw_framebuffer_to_vfbs(x) \
324 container_of(x, struct vmw_framebuffer_surface, base.base)
326 struct vmw_framebuffer_surface
{
327 struct vmw_framebuffer base
;
328 struct vmw_surface
*surface
;
329 struct delayed_work d_work
;
330 struct mutex work_lock
;
334 void vmw_framebuffer_surface_destroy(struct drm_framebuffer
*framebuffer
)
336 struct vmw_framebuffer_surface
*vfb
=
337 vmw_framebuffer_to_vfbs(framebuffer
);
339 cancel_delayed_work_sync(&vfb
->d_work
);
340 drm_framebuffer_cleanup(framebuffer
);
341 vmw_surface_unreference(&vfb
->surface
);
346 static void vmw_framebuffer_present_fs_callback(struct work_struct
*work
)
348 struct delayed_work
*d_work
=
349 container_of(work
, struct delayed_work
, work
);
350 struct vmw_framebuffer_surface
*vfbs
=
351 container_of(d_work
, struct vmw_framebuffer_surface
, d_work
);
352 struct vmw_surface
*surf
= vfbs
->surface
;
353 struct drm_framebuffer
*framebuffer
= &vfbs
->base
.base
;
354 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
357 SVGA3dCmdHeader header
;
358 SVGA3dCmdPresent body
;
362 mutex_lock(&vfbs
->work_lock
);
363 if (!vfbs
->present_fs
)
366 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
367 if (unlikely(cmd
== NULL
))
370 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_PRESENT
);
371 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
) + sizeof(cmd
->cr
));
372 cmd
->body
.sid
= cpu_to_le32(surf
->res
.id
);
373 cmd
->cr
.x
= cpu_to_le32(0);
374 cmd
->cr
.y
= cpu_to_le32(0);
375 cmd
->cr
.srcx
= cmd
->cr
.x
;
376 cmd
->cr
.srcy
= cmd
->cr
.y
;
377 cmd
->cr
.w
= cpu_to_le32(framebuffer
->width
);
378 cmd
->cr
.h
= cpu_to_le32(framebuffer
->height
);
379 vfbs
->present_fs
= false;
380 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
383 * Will not re-add if already pending.
385 schedule_delayed_work(&vfbs
->d_work
, VMWGFX_PRESENT_RATE
);
387 mutex_unlock(&vfbs
->work_lock
);
391 int vmw_framebuffer_surface_dirty(struct drm_framebuffer
*framebuffer
,
392 unsigned flags
, unsigned color
,
393 struct drm_clip_rect
*clips
,
396 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
397 struct vmw_framebuffer_surface
*vfbs
=
398 vmw_framebuffer_to_vfbs(framebuffer
);
399 struct vmw_surface
*surf
= vfbs
->surface
;
400 struct drm_clip_rect norect
;
405 SVGA3dCmdHeader header
;
406 SVGA3dCmdPresent body
;
411 !(dev_priv
->fifo
.capabilities
&
412 SVGA_FIFO_CAP_SCREEN_OBJECT
)) {
415 mutex_lock(&vfbs
->work_lock
);
416 vfbs
->present_fs
= true;
417 ret
= schedule_delayed_work(&vfbs
->d_work
, VMWGFX_PRESENT_RATE
);
418 mutex_unlock(&vfbs
->work_lock
);
421 * No work pending, Force immediate present.
423 vmw_framebuffer_present_fs_callback(&vfbs
->d_work
.work
);
431 norect
.x1
= norect
.y1
= 0;
432 norect
.x2
= framebuffer
->width
;
433 norect
.y2
= framebuffer
->height
;
434 } else if (flags
& DRM_MODE_FB_DIRTY_ANNOTATE_COPY
) {
436 inc
= 2; /* skip source rects */
439 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
) + (num_clips
- 1) * sizeof(cmd
->cr
));
440 if (unlikely(cmd
== NULL
)) {
441 DRM_ERROR("Fifo reserve failed.\n");
445 memset(cmd
, 0, sizeof(*cmd
));
447 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_PRESENT
);
448 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
) + num_clips
* sizeof(cmd
->cr
));
449 cmd
->body
.sid
= cpu_to_le32(surf
->res
.id
);
451 for (i
= 0, cr
= &cmd
->cr
; i
< num_clips
; i
++, cr
++, clips
+= inc
) {
452 cr
->x
= cpu_to_le16(clips
->x1
);
453 cr
->y
= cpu_to_le16(clips
->y1
);
456 cr
->w
= cpu_to_le16(clips
->x2
- clips
->x1
);
457 cr
->h
= cpu_to_le16(clips
->y2
- clips
->y1
);
460 vmw_fifo_commit(dev_priv
, sizeof(*cmd
) + (num_clips
- 1) * sizeof(cmd
->cr
));
465 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs
= {
466 .destroy
= vmw_framebuffer_surface_destroy
,
467 .dirty
= vmw_framebuffer_surface_dirty
,
468 .create_handle
= vmw_framebuffer_create_handle
,
471 int vmw_kms_new_framebuffer_surface(struct vmw_private
*dev_priv
,
472 struct vmw_surface
*surface
,
473 struct vmw_framebuffer
**out
,
474 unsigned width
, unsigned height
)
477 struct drm_device
*dev
= dev_priv
->dev
;
478 struct vmw_framebuffer_surface
*vfbs
;
481 vfbs
= kzalloc(sizeof(*vfbs
), GFP_KERNEL
);
487 ret
= drm_framebuffer_init(dev
, &vfbs
->base
.base
,
488 &vmw_framebuffer_surface_funcs
);
492 if (!vmw_surface_reference(surface
)) {
493 DRM_ERROR("failed to reference surface %p\n", surface
);
497 /* XXX get the first 3 from the surface info */
498 vfbs
->base
.base
.bits_per_pixel
= 32;
499 vfbs
->base
.base
.pitch
= width
* 32 / 4;
500 vfbs
->base
.base
.depth
= 24;
501 vfbs
->base
.base
.width
= width
;
502 vfbs
->base
.base
.height
= height
;
503 vfbs
->base
.pin
= NULL
;
504 vfbs
->base
.unpin
= NULL
;
505 vfbs
->surface
= surface
;
506 mutex_init(&vfbs
->work_lock
);
507 INIT_DELAYED_WORK(&vfbs
->d_work
, &vmw_framebuffer_present_fs_callback
);
513 drm_framebuffer_cleanup(&vfbs
->base
.base
);
521 * Dmabuf framebuffer code
524 #define vmw_framebuffer_to_vfbd(x) \
525 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
527 struct vmw_framebuffer_dmabuf
{
528 struct vmw_framebuffer base
;
529 struct vmw_dma_buffer
*buffer
;
532 void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer
*framebuffer
)
534 struct vmw_framebuffer_dmabuf
*vfbd
=
535 vmw_framebuffer_to_vfbd(framebuffer
);
537 drm_framebuffer_cleanup(framebuffer
);
538 vmw_dmabuf_unreference(&vfbd
->buffer
);
543 int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer
*framebuffer
,
544 unsigned flags
, unsigned color
,
545 struct drm_clip_rect
*clips
,
548 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
549 struct drm_clip_rect norect
;
552 SVGAFifoCmdUpdate body
;
554 int i
, increment
= 1;
557 !(dev_priv
->fifo
.capabilities
&
558 SVGA_FIFO_CAP_SCREEN_OBJECT
)) {
561 norect
.x1
= norect
.y1
= 0;
562 norect
.x2
= framebuffer
->width
;
563 norect
.y2
= framebuffer
->height
;
564 } else if (flags
& DRM_MODE_FB_DIRTY_ANNOTATE_COPY
) {
569 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
) * num_clips
);
570 if (unlikely(cmd
== NULL
)) {
571 DRM_ERROR("Fifo reserve failed.\n");
575 for (i
= 0; i
< num_clips
; i
++, clips
+= increment
) {
576 cmd
[i
].header
= cpu_to_le32(SVGA_CMD_UPDATE
);
577 cmd
[i
].body
.x
= cpu_to_le32(clips
[i
].x1
);
578 cmd
[i
].body
.y
= cpu_to_le32(clips
[i
].y1
);
579 cmd
[i
].body
.width
= cpu_to_le32(clips
[i
].x2
- clips
[i
].x1
);
580 cmd
[i
].body
.height
= cpu_to_le32(clips
[i
].y2
- clips
[i
].y1
);
583 vmw_fifo_commit(dev_priv
, sizeof(*cmd
) * num_clips
);
588 static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs
= {
589 .destroy
= vmw_framebuffer_dmabuf_destroy
,
590 .dirty
= vmw_framebuffer_dmabuf_dirty
,
591 .create_handle
= vmw_framebuffer_create_handle
,
594 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer
*vfb
)
596 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
597 struct vmw_framebuffer_dmabuf
*vfbd
=
598 vmw_framebuffer_to_vfbd(&vfb
->base
);
601 vmw_overlay_pause_all(dev_priv
);
603 ret
= vmw_dmabuf_to_start_of_vram(dev_priv
, vfbd
->buffer
);
605 if (dev_priv
->capabilities
& SVGA_CAP_MULTIMON
) {
606 vmw_write(dev_priv
, SVGA_REG_NUM_GUEST_DISPLAYS
, 1);
607 vmw_write(dev_priv
, SVGA_REG_DISPLAY_ID
, 0);
608 vmw_write(dev_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
, true);
609 vmw_write(dev_priv
, SVGA_REG_DISPLAY_POSITION_X
, 0);
610 vmw_write(dev_priv
, SVGA_REG_DISPLAY_POSITION_Y
, 0);
611 vmw_write(dev_priv
, SVGA_REG_DISPLAY_WIDTH
, 0);
612 vmw_write(dev_priv
, SVGA_REG_DISPLAY_HEIGHT
, 0);
613 vmw_write(dev_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
615 vmw_write(dev_priv
, SVGA_REG_ENABLE
, 1);
616 vmw_write(dev_priv
, SVGA_REG_WIDTH
, vfb
->base
.width
);
617 vmw_write(dev_priv
, SVGA_REG_HEIGHT
, vfb
->base
.height
);
618 vmw_write(dev_priv
, SVGA_REG_BITS_PER_PIXEL
, vfb
->base
.bits_per_pixel
);
619 vmw_write(dev_priv
, SVGA_REG_DEPTH
, vfb
->base
.depth
);
620 vmw_write(dev_priv
, SVGA_REG_RED_MASK
, 0x00ff0000);
621 vmw_write(dev_priv
, SVGA_REG_GREEN_MASK
, 0x0000ff00);
622 vmw_write(dev_priv
, SVGA_REG_BLUE_MASK
, 0x000000ff);
626 vmw_overlay_resume_all(dev_priv
);
631 static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer
*vfb
)
633 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
634 struct vmw_framebuffer_dmabuf
*vfbd
=
635 vmw_framebuffer_to_vfbd(&vfb
->base
);
638 WARN_ON(!vfbd
->buffer
);
642 return vmw_dmabuf_from_vram(dev_priv
, vfbd
->buffer
);
645 int vmw_kms_new_framebuffer_dmabuf(struct vmw_private
*dev_priv
,
646 struct vmw_dma_buffer
*dmabuf
,
647 struct vmw_framebuffer
**out
,
648 unsigned width
, unsigned height
)
651 struct drm_device
*dev
= dev_priv
->dev
;
652 struct vmw_framebuffer_dmabuf
*vfbd
;
655 vfbd
= kzalloc(sizeof(*vfbd
), GFP_KERNEL
);
661 ret
= drm_framebuffer_init(dev
, &vfbd
->base
.base
,
662 &vmw_framebuffer_dmabuf_funcs
);
666 if (!vmw_dmabuf_reference(dmabuf
)) {
667 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf
);
671 /* XXX get the first 3 from the surface info */
672 vfbd
->base
.base
.bits_per_pixel
= 32;
673 vfbd
->base
.base
.pitch
= width
* 32 / 4;
674 vfbd
->base
.base
.depth
= 24;
675 vfbd
->base
.base
.width
= width
;
676 vfbd
->base
.base
.height
= height
;
677 vfbd
->base
.pin
= vmw_framebuffer_dmabuf_pin
;
678 vfbd
->base
.unpin
= vmw_framebuffer_dmabuf_unpin
;
679 vfbd
->buffer
= dmabuf
;
685 drm_framebuffer_cleanup(&vfbd
->base
.base
);
693 * Generic Kernel modesetting functions
696 static struct drm_framebuffer
*vmw_kms_fb_create(struct drm_device
*dev
,
697 struct drm_file
*file_priv
,
698 struct drm_mode_fb_cmd
*mode_cmd
)
700 struct vmw_private
*dev_priv
= vmw_priv(dev
);
701 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
702 struct vmw_framebuffer
*vfb
= NULL
;
703 struct vmw_surface
*surface
= NULL
;
704 struct vmw_dma_buffer
*bo
= NULL
;
707 ret
= vmw_user_surface_lookup(dev_priv
, tfile
,
708 mode_cmd
->handle
, &surface
);
712 ret
= vmw_kms_new_framebuffer_surface(dev_priv
, surface
, &vfb
,
713 mode_cmd
->width
, mode_cmd
->height
);
715 /* vmw_user_surface_lookup takes one ref so does new_fb */
716 vmw_surface_unreference(&surface
);
719 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret
);
725 DRM_INFO("%s: trying buffer\n", __func__
);
727 ret
= vmw_user_dmabuf_lookup(tfile
, mode_cmd
->handle
, &bo
);
729 DRM_ERROR("failed to find buffer: %i\n", ret
);
733 ret
= vmw_kms_new_framebuffer_dmabuf(dev_priv
, bo
, &vfb
,
734 mode_cmd
->width
, mode_cmd
->height
);
736 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
737 vmw_dmabuf_unreference(&bo
);
740 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret
);
747 static int vmw_kms_fb_changed(struct drm_device
*dev
)
752 static struct drm_mode_config_funcs vmw_kms_funcs
= {
753 .fb_create
= vmw_kms_fb_create
,
754 .fb_changed
= vmw_kms_fb_changed
,
757 int vmw_kms_init(struct vmw_private
*dev_priv
)
759 struct drm_device
*dev
= dev_priv
->dev
;
762 drm_mode_config_init(dev
);
763 dev
->mode_config
.funcs
= &vmw_kms_funcs
;
764 dev
->mode_config
.min_width
= 640;
765 dev
->mode_config
.min_height
= 480;
766 dev
->mode_config
.max_width
= 2048;
767 dev
->mode_config
.max_height
= 2048;
769 ret
= vmw_kms_init_legacy_display_system(dev_priv
);
774 int vmw_kms_close(struct vmw_private
*dev_priv
)
777 * Docs says we should take the lock before calling this function
778 * but since it destroys encoders and our destructor calls
779 * drm_encoder_cleanup which takes the lock we deadlock.
781 drm_mode_config_cleanup(dev_priv
->dev
);
782 vmw_kms_close_legacy_display_system(dev_priv
);
786 int vmw_kms_cursor_bypass_ioctl(struct drm_device
*dev
, void *data
,
787 struct drm_file
*file_priv
)
789 struct drm_vmw_cursor_bypass_arg
*arg
= data
;
790 struct vmw_display_unit
*du
;
791 struct drm_mode_object
*obj
;
792 struct drm_crtc
*crtc
;
796 mutex_lock(&dev
->mode_config
.mutex
);
797 if (arg
->flags
& DRM_VMW_CURSOR_BYPASS_ALL
) {
799 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
800 du
= vmw_crtc_to_du(crtc
);
801 du
->hotspot_x
= arg
->xhot
;
802 du
->hotspot_y
= arg
->yhot
;
805 mutex_unlock(&dev
->mode_config
.mutex
);
809 obj
= drm_mode_object_find(dev
, arg
->crtc_id
, DRM_MODE_OBJECT_CRTC
);
815 crtc
= obj_to_crtc(obj
);
816 du
= vmw_crtc_to_du(crtc
);
818 du
->hotspot_x
= arg
->xhot
;
819 du
->hotspot_y
= arg
->yhot
;
822 mutex_unlock(&dev
->mode_config
.mutex
);
827 int vmw_kms_save_vga(struct vmw_private
*vmw_priv
)
830 * setup a single multimon monitor with the size
831 * of 0x0, this stops the UI from resizing when we
832 * change the framebuffer size
834 if (vmw_priv
->capabilities
& SVGA_CAP_MULTIMON
) {
835 vmw_write(vmw_priv
, SVGA_REG_NUM_GUEST_DISPLAYS
, 1);
836 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, 0);
837 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
, true);
838 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
, 0);
839 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
, 0);
840 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
, 0);
841 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
, 0);
842 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
845 vmw_priv
->vga_width
= vmw_read(vmw_priv
, SVGA_REG_WIDTH
);
846 vmw_priv
->vga_height
= vmw_read(vmw_priv
, SVGA_REG_HEIGHT
);
847 vmw_priv
->vga_bpp
= vmw_read(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
);
848 vmw_priv
->vga_depth
= vmw_read(vmw_priv
, SVGA_REG_DEPTH
);
849 vmw_priv
->vga_pseudo
= vmw_read(vmw_priv
, SVGA_REG_PSEUDOCOLOR
);
850 vmw_priv
->vga_red_mask
= vmw_read(vmw_priv
, SVGA_REG_RED_MASK
);
851 vmw_priv
->vga_green_mask
= vmw_read(vmw_priv
, SVGA_REG_GREEN_MASK
);
852 vmw_priv
->vga_blue_mask
= vmw_read(vmw_priv
, SVGA_REG_BLUE_MASK
);
857 int vmw_kms_restore_vga(struct vmw_private
*vmw_priv
)
859 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, vmw_priv
->vga_width
);
860 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, vmw_priv
->vga_height
);
861 vmw_write(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
, vmw_priv
->vga_bpp
);
862 vmw_write(vmw_priv
, SVGA_REG_DEPTH
, vmw_priv
->vga_depth
);
863 vmw_write(vmw_priv
, SVGA_REG_PSEUDOCOLOR
, vmw_priv
->vga_pseudo
);
864 vmw_write(vmw_priv
, SVGA_REG_RED_MASK
, vmw_priv
->vga_red_mask
);
865 vmw_write(vmw_priv
, SVGA_REG_GREEN_MASK
, vmw_priv
->vga_green_mask
);
866 vmw_write(vmw_priv
, SVGA_REG_BLUE_MASK
, vmw_priv
->vga_blue_mask
);
868 /* TODO check for multimon */
869 vmw_write(vmw_priv
, SVGA_REG_NUM_GUEST_DISPLAYS
, 0);