1 /**************************************************************************
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/module.h>
28 #include <linux/console.h>
31 #include "vmwgfx_drv.h"
32 #include "vmwgfx_binding.h"
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_object.h>
36 #include <drm/ttm/ttm_module.h>
37 #include <linux/dma_remapping.h>
39 #define VMWGFX_DRIVER_NAME "vmwgfx"
40 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
41 #define VMWGFX_CHIP_SVGAII 0
42 #define VMW_FB_RESERVATION 0
44 #define VMW_MIN_INITIAL_WIDTH 800
45 #define VMW_MIN_INITIAL_HEIGHT 600
49 * Fully encoded drm commands. Might move to vmw_drm.h
52 #define DRM_IOCTL_VMW_GET_PARAM \
53 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
54 struct drm_vmw_getparam_arg)
55 #define DRM_IOCTL_VMW_ALLOC_DMABUF \
56 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
57 union drm_vmw_alloc_dmabuf_arg)
58 #define DRM_IOCTL_VMW_UNREF_DMABUF \
59 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
60 struct drm_vmw_unref_dmabuf_arg)
61 #define DRM_IOCTL_VMW_CURSOR_BYPASS \
62 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
63 struct drm_vmw_cursor_bypass_arg)
65 #define DRM_IOCTL_VMW_CONTROL_STREAM \
66 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
67 struct drm_vmw_control_stream_arg)
68 #define DRM_IOCTL_VMW_CLAIM_STREAM \
69 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
70 struct drm_vmw_stream_arg)
71 #define DRM_IOCTL_VMW_UNREF_STREAM \
72 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
73 struct drm_vmw_stream_arg)
75 #define DRM_IOCTL_VMW_CREATE_CONTEXT \
76 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
77 struct drm_vmw_context_arg)
78 #define DRM_IOCTL_VMW_UNREF_CONTEXT \
79 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
80 struct drm_vmw_context_arg)
81 #define DRM_IOCTL_VMW_CREATE_SURFACE \
82 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
83 union drm_vmw_surface_create_arg)
84 #define DRM_IOCTL_VMW_UNREF_SURFACE \
85 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
86 struct drm_vmw_surface_arg)
87 #define DRM_IOCTL_VMW_REF_SURFACE \
88 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
89 union drm_vmw_surface_reference_arg)
90 #define DRM_IOCTL_VMW_EXECBUF \
91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
92 struct drm_vmw_execbuf_arg)
93 #define DRM_IOCTL_VMW_GET_3D_CAP \
94 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
95 struct drm_vmw_get_3d_cap_arg)
96 #define DRM_IOCTL_VMW_FENCE_WAIT \
97 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
98 struct drm_vmw_fence_wait_arg)
99 #define DRM_IOCTL_VMW_FENCE_SIGNALED \
100 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
101 struct drm_vmw_fence_signaled_arg)
102 #define DRM_IOCTL_VMW_FENCE_UNREF \
103 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
104 struct drm_vmw_fence_arg)
105 #define DRM_IOCTL_VMW_FENCE_EVENT \
106 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
107 struct drm_vmw_fence_event_arg)
108 #define DRM_IOCTL_VMW_PRESENT \
109 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
110 struct drm_vmw_present_arg)
111 #define DRM_IOCTL_VMW_PRESENT_READBACK \
112 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
113 struct drm_vmw_present_readback_arg)
114 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
115 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
116 struct drm_vmw_update_layout_arg)
117 #define DRM_IOCTL_VMW_CREATE_SHADER \
118 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
119 struct drm_vmw_shader_create_arg)
120 #define DRM_IOCTL_VMW_UNREF_SHADER \
121 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
122 struct drm_vmw_shader_arg)
123 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
124 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
125 union drm_vmw_gb_surface_create_arg)
126 #define DRM_IOCTL_VMW_GB_SURFACE_REF \
127 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
128 union drm_vmw_gb_surface_reference_arg)
129 #define DRM_IOCTL_VMW_SYNCCPU \
130 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
131 struct drm_vmw_synccpu_arg)
132 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
133 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
134 struct drm_vmw_context_arg)
137 * The core DRM version of this macro doesn't account for
141 #define VMW_IOCTL_DEF(ioctl, func, flags) \
142 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
148 static const struct drm_ioctl_desc vmw_ioctls
[] = {
149 VMW_IOCTL_DEF(VMW_GET_PARAM
, vmw_getparam_ioctl
,
150 DRM_AUTH
| DRM_RENDER_ALLOW
),
151 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF
, vmw_dmabuf_alloc_ioctl
,
152 DRM_AUTH
| DRM_RENDER_ALLOW
),
153 VMW_IOCTL_DEF(VMW_UNREF_DMABUF
, vmw_dmabuf_unref_ioctl
,
155 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS
,
156 vmw_kms_cursor_bypass_ioctl
,
157 DRM_MASTER
| DRM_CONTROL_ALLOW
),
159 VMW_IOCTL_DEF(VMW_CONTROL_STREAM
, vmw_overlay_ioctl
,
160 DRM_MASTER
| DRM_CONTROL_ALLOW
),
161 VMW_IOCTL_DEF(VMW_CLAIM_STREAM
, vmw_stream_claim_ioctl
,
162 DRM_MASTER
| DRM_CONTROL_ALLOW
),
163 VMW_IOCTL_DEF(VMW_UNREF_STREAM
, vmw_stream_unref_ioctl
,
164 DRM_MASTER
| DRM_CONTROL_ALLOW
),
166 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT
, vmw_context_define_ioctl
,
167 DRM_AUTH
| DRM_RENDER_ALLOW
),
168 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT
, vmw_context_destroy_ioctl
,
170 VMW_IOCTL_DEF(VMW_CREATE_SURFACE
, vmw_surface_define_ioctl
,
171 DRM_AUTH
| DRM_RENDER_ALLOW
),
172 VMW_IOCTL_DEF(VMW_UNREF_SURFACE
, vmw_surface_destroy_ioctl
,
174 VMW_IOCTL_DEF(VMW_REF_SURFACE
, vmw_surface_reference_ioctl
,
175 DRM_AUTH
| DRM_RENDER_ALLOW
),
176 VMW_IOCTL_DEF(VMW_EXECBUF
, NULL
, DRM_AUTH
|
178 VMW_IOCTL_DEF(VMW_FENCE_WAIT
, vmw_fence_obj_wait_ioctl
,
180 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED
,
181 vmw_fence_obj_signaled_ioctl
,
183 VMW_IOCTL_DEF(VMW_FENCE_UNREF
, vmw_fence_obj_unref_ioctl
,
185 VMW_IOCTL_DEF(VMW_FENCE_EVENT
, vmw_fence_event_ioctl
,
186 DRM_AUTH
| DRM_RENDER_ALLOW
),
187 VMW_IOCTL_DEF(VMW_GET_3D_CAP
, vmw_get_cap_3d_ioctl
,
188 DRM_AUTH
| DRM_RENDER_ALLOW
),
190 /* these allow direct access to the framebuffers mark as master only */
191 VMW_IOCTL_DEF(VMW_PRESENT
, vmw_present_ioctl
,
192 DRM_MASTER
| DRM_AUTH
),
193 VMW_IOCTL_DEF(VMW_PRESENT_READBACK
,
194 vmw_present_readback_ioctl
,
195 DRM_MASTER
| DRM_AUTH
),
196 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT
,
197 vmw_kms_update_layout_ioctl
,
199 VMW_IOCTL_DEF(VMW_CREATE_SHADER
,
200 vmw_shader_define_ioctl
,
201 DRM_AUTH
| DRM_RENDER_ALLOW
),
202 VMW_IOCTL_DEF(VMW_UNREF_SHADER
,
203 vmw_shader_destroy_ioctl
,
205 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE
,
206 vmw_gb_surface_define_ioctl
,
207 DRM_AUTH
| DRM_RENDER_ALLOW
),
208 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF
,
209 vmw_gb_surface_reference_ioctl
,
210 DRM_AUTH
| DRM_RENDER_ALLOW
),
211 VMW_IOCTL_DEF(VMW_SYNCCPU
,
212 vmw_user_dmabuf_synccpu_ioctl
,
214 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT
,
215 vmw_extended_context_define_ioctl
,
216 DRM_AUTH
| DRM_RENDER_ALLOW
),
219 static struct pci_device_id vmw_pci_id_list
[] = {
220 {0x15ad, 0x0405, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, VMWGFX_CHIP_SVGAII
},
223 MODULE_DEVICE_TABLE(pci
, vmw_pci_id_list
);
225 static int enable_fbdev
= IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON
);
226 static int vmw_force_iommu
;
227 static int vmw_restrict_iommu
;
228 static int vmw_force_coherent
;
229 static int vmw_restrict_dma_mask
;
230 static int vmw_assume_16bpp
;
232 static int vmw_probe(struct pci_dev
*, const struct pci_device_id
*);
233 static void vmw_master_init(struct vmw_master
*);
234 static int vmwgfx_pm_notifier(struct notifier_block
*nb
, unsigned long val
,
237 MODULE_PARM_DESC(enable_fbdev
, "Enable vmwgfx fbdev");
238 module_param_named(enable_fbdev
, enable_fbdev
, int, 0600);
239 MODULE_PARM_DESC(force_dma_api
, "Force using the DMA API for TTM pages");
240 module_param_named(force_dma_api
, vmw_force_iommu
, int, 0600);
241 MODULE_PARM_DESC(restrict_iommu
, "Try to limit IOMMU usage for TTM pages");
242 module_param_named(restrict_iommu
, vmw_restrict_iommu
, int, 0600);
243 MODULE_PARM_DESC(force_coherent
, "Force coherent TTM pages");
244 module_param_named(force_coherent
, vmw_force_coherent
, int, 0600);
245 MODULE_PARM_DESC(restrict_dma_mask
, "Restrict DMA mask to 44 bits with IOMMU");
246 module_param_named(restrict_dma_mask
, vmw_restrict_dma_mask
, int, 0600);
247 MODULE_PARM_DESC(assume_16bpp
, "Assume 16-bpp when filtering modes");
248 module_param_named(assume_16bpp
, vmw_assume_16bpp
, int, 0600);
251 static void vmw_print_capabilities(uint32_t capabilities
)
253 DRM_INFO("Capabilities:\n");
254 if (capabilities
& SVGA_CAP_RECT_COPY
)
255 DRM_INFO(" Rect copy.\n");
256 if (capabilities
& SVGA_CAP_CURSOR
)
257 DRM_INFO(" Cursor.\n");
258 if (capabilities
& SVGA_CAP_CURSOR_BYPASS
)
259 DRM_INFO(" Cursor bypass.\n");
260 if (capabilities
& SVGA_CAP_CURSOR_BYPASS_2
)
261 DRM_INFO(" Cursor bypass 2.\n");
262 if (capabilities
& SVGA_CAP_8BIT_EMULATION
)
263 DRM_INFO(" 8bit emulation.\n");
264 if (capabilities
& SVGA_CAP_ALPHA_CURSOR
)
265 DRM_INFO(" Alpha cursor.\n");
266 if (capabilities
& SVGA_CAP_3D
)
268 if (capabilities
& SVGA_CAP_EXTENDED_FIFO
)
269 DRM_INFO(" Extended Fifo.\n");
270 if (capabilities
& SVGA_CAP_MULTIMON
)
271 DRM_INFO(" Multimon.\n");
272 if (capabilities
& SVGA_CAP_PITCHLOCK
)
273 DRM_INFO(" Pitchlock.\n");
274 if (capabilities
& SVGA_CAP_IRQMASK
)
275 DRM_INFO(" Irq mask.\n");
276 if (capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
)
277 DRM_INFO(" Display Topology.\n");
278 if (capabilities
& SVGA_CAP_GMR
)
280 if (capabilities
& SVGA_CAP_TRACES
)
281 DRM_INFO(" Traces.\n");
282 if (capabilities
& SVGA_CAP_GMR2
)
283 DRM_INFO(" GMR2.\n");
284 if (capabilities
& SVGA_CAP_SCREEN_OBJECT_2
)
285 DRM_INFO(" Screen Object 2.\n");
286 if (capabilities
& SVGA_CAP_COMMAND_BUFFERS
)
287 DRM_INFO(" Command Buffers.\n");
288 if (capabilities
& SVGA_CAP_CMD_BUFFERS_2
)
289 DRM_INFO(" Command Buffers 2.\n");
290 if (capabilities
& SVGA_CAP_GBOBJECTS
)
291 DRM_INFO(" Guest Backed Resources.\n");
292 if (capabilities
& SVGA_CAP_DX
)
293 DRM_INFO(" DX Features.\n");
297 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
299 * @dev_priv: A device private structure.
301 * This function creates a small buffer object that holds the query
302 * result for dummy queries emitted as query barriers.
303 * The function will then map the first page and initialize a pending
304 * occlusion query result structure, Finally it will unmap the buffer.
305 * No interruptible waits are done within this function.
307 * Returns an error if bo creation or initialization fails.
309 static int vmw_dummy_query_bo_create(struct vmw_private
*dev_priv
)
312 struct vmw_dma_buffer
*vbo
;
313 struct ttm_bo_kmap_obj map
;
314 volatile SVGA3dQueryResult
*result
;
318 * Create the vbo as pinned, so that a tryreserve will
319 * immediately succeed. This is because we're the only
320 * user of the bo currently.
322 vbo
= kzalloc(sizeof(*vbo
), GFP_KERNEL
);
326 ret
= vmw_dmabuf_init(dev_priv
, vbo
, PAGE_SIZE
,
327 &vmw_sys_ne_placement
, false,
328 &vmw_dmabuf_bo_free
);
329 if (unlikely(ret
!= 0))
332 ret
= ttm_bo_reserve(&vbo
->base
, false, true, false, NULL
);
334 vmw_bo_pin_reserved(vbo
, true);
336 ret
= ttm_bo_kmap(&vbo
->base
, 0, 1, &map
);
337 if (likely(ret
== 0)) {
338 result
= ttm_kmap_obj_virtual(&map
, &dummy
);
339 result
->totalSize
= sizeof(*result
);
340 result
->state
= SVGA3D_QUERYSTATE_PENDING
;
341 result
->result32
= 0xff;
344 vmw_bo_pin_reserved(vbo
, false);
345 ttm_bo_unreserve(&vbo
->base
);
347 if (unlikely(ret
!= 0)) {
348 DRM_ERROR("Dummy query buffer map failed.\n");
349 vmw_dmabuf_unreference(&vbo
);
351 dev_priv
->dummy_query_bo
= vbo
;
357 * vmw_request_device_late - Perform late device setup
359 * @dev_priv: Pointer to device private.
361 * This function performs setup of otables and enables large command
362 * buffer submission. These tasks are split out to a separate function
363 * because it reverts vmw_release_device_early and is intended to be used
364 * by an error path in the hibernation code.
366 static int vmw_request_device_late(struct vmw_private
*dev_priv
)
370 if (dev_priv
->has_mob
) {
371 ret
= vmw_otables_setup(dev_priv
);
372 if (unlikely(ret
!= 0)) {
373 DRM_ERROR("Unable to initialize "
374 "guest Memory OBjects.\n");
379 if (dev_priv
->cman
) {
380 ret
= vmw_cmdbuf_set_pool_size(dev_priv
->cman
,
383 struct vmw_cmdbuf_man
*man
= dev_priv
->cman
;
385 dev_priv
->cman
= NULL
;
386 vmw_cmdbuf_man_destroy(man
);
393 static int vmw_request_device(struct vmw_private
*dev_priv
)
397 ret
= vmw_fifo_init(dev_priv
, &dev_priv
->fifo
);
398 if (unlikely(ret
!= 0)) {
399 DRM_ERROR("Unable to initialize FIFO.\n");
402 vmw_fence_fifo_up(dev_priv
->fman
);
403 dev_priv
->cman
= vmw_cmdbuf_man_create(dev_priv
);
404 if (IS_ERR(dev_priv
->cman
)) {
405 dev_priv
->cman
= NULL
;
406 dev_priv
->has_dx
= false;
409 ret
= vmw_request_device_late(dev_priv
);
413 ret
= vmw_dummy_query_bo_create(dev_priv
);
414 if (unlikely(ret
!= 0))
415 goto out_no_query_bo
;
421 vmw_cmdbuf_remove_pool(dev_priv
->cman
);
422 if (dev_priv
->has_mob
) {
423 (void) ttm_bo_evict_mm(&dev_priv
->bdev
, VMW_PL_MOB
);
424 vmw_otables_takedown(dev_priv
);
427 vmw_cmdbuf_man_destroy(dev_priv
->cman
);
429 vmw_fence_fifo_down(dev_priv
->fman
);
430 vmw_fifo_release(dev_priv
, &dev_priv
->fifo
);
435 * vmw_release_device_early - Early part of fifo takedown.
437 * @dev_priv: Pointer to device private struct.
439 * This is the first part of command submission takedown, to be called before
440 * buffer management is taken down.
442 static void vmw_release_device_early(struct vmw_private
*dev_priv
)
445 * Previous destructions should've released
449 BUG_ON(dev_priv
->pinned_bo
!= NULL
);
451 vmw_dmabuf_unreference(&dev_priv
->dummy_query_bo
);
453 vmw_cmdbuf_remove_pool(dev_priv
->cman
);
455 if (dev_priv
->has_mob
) {
456 ttm_bo_evict_mm(&dev_priv
->bdev
, VMW_PL_MOB
);
457 vmw_otables_takedown(dev_priv
);
462 * vmw_release_device_late - Late part of fifo takedown.
464 * @dev_priv: Pointer to device private struct.
466 * This is the last part of the command submission takedown, to be called when
467 * command submission is no longer needed. It may wait on pending fences.
469 static void vmw_release_device_late(struct vmw_private
*dev_priv
)
471 vmw_fence_fifo_down(dev_priv
->fman
);
473 vmw_cmdbuf_man_destroy(dev_priv
->cman
);
475 vmw_fifo_release(dev_priv
, &dev_priv
->fifo
);
479 * Sets the initial_[width|height] fields on the given vmw_private.
481 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
482 * clamping the value to fb_max_[width|height] fields and the
483 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
484 * If the values appear to be invalid, set them to
485 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
487 static void vmw_get_initial_size(struct vmw_private
*dev_priv
)
492 width
= vmw_read(dev_priv
, SVGA_REG_WIDTH
);
493 height
= vmw_read(dev_priv
, SVGA_REG_HEIGHT
);
495 width
= max_t(uint32_t, width
, VMW_MIN_INITIAL_WIDTH
);
496 height
= max_t(uint32_t, height
, VMW_MIN_INITIAL_HEIGHT
);
498 if (width
> dev_priv
->fb_max_width
||
499 height
> dev_priv
->fb_max_height
) {
502 * This is a host error and shouldn't occur.
505 width
= VMW_MIN_INITIAL_WIDTH
;
506 height
= VMW_MIN_INITIAL_HEIGHT
;
509 dev_priv
->initial_width
= width
;
510 dev_priv
->initial_height
= height
;
514 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
517 * @dev_priv: Pointer to a struct vmw_private
519 * This functions tries to determine the IOMMU setup and what actions
520 * need to be taken by the driver to make system pages visible to the
522 * If this function decides that DMA is not possible, it returns -EINVAL.
523 * The driver may then try to disable features of the device that require
526 static int vmw_dma_select_mode(struct vmw_private
*dev_priv
)
528 static const char *names
[vmw_dma_map_max
] = {
529 [vmw_dma_phys
] = "Using physical TTM page addresses.",
530 [vmw_dma_alloc_coherent
] = "Using coherent TTM pages.",
531 [vmw_dma_map_populate
] = "Keeping DMA mappings.",
532 [vmw_dma_map_bind
] = "Giving up DMA mappings early."};
534 const struct dma_map_ops
*dma_ops
= get_dma_ops(dev_priv
->dev
->dev
);
536 #ifdef CONFIG_INTEL_IOMMU
537 if (intel_iommu_enabled
) {
538 dev_priv
->map_mode
= vmw_dma_map_populate
;
543 if (!(vmw_force_iommu
|| vmw_force_coherent
)) {
544 dev_priv
->map_mode
= vmw_dma_phys
;
545 DRM_INFO("DMA map mode: %s\n", names
[dev_priv
->map_mode
]);
549 dev_priv
->map_mode
= vmw_dma_map_populate
;
551 if (dma_ops
->sync_single_for_cpu
)
552 dev_priv
->map_mode
= vmw_dma_alloc_coherent
;
553 #ifdef CONFIG_SWIOTLB
554 if (swiotlb_nr_tbl() == 0)
555 dev_priv
->map_mode
= vmw_dma_map_populate
;
558 #ifdef CONFIG_INTEL_IOMMU
561 if (dev_priv
->map_mode
== vmw_dma_map_populate
&&
563 dev_priv
->map_mode
= vmw_dma_map_bind
;
565 if (vmw_force_coherent
)
566 dev_priv
->map_mode
= vmw_dma_alloc_coherent
;
568 #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
570 * No coherent page pool
572 if (dev_priv
->map_mode
== vmw_dma_alloc_coherent
)
576 #else /* CONFIG_X86 */
577 dev_priv
->map_mode
= vmw_dma_map_populate
;
578 #endif /* CONFIG_X86 */
580 DRM_INFO("DMA map mode: %s\n", names
[dev_priv
->map_mode
]);
586 * vmw_dma_masks - set required page- and dma masks
588 * @dev: Pointer to struct drm-device
590 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
591 * restriction also for 64-bit systems.
593 #ifdef CONFIG_INTEL_IOMMU
594 static int vmw_dma_masks(struct vmw_private
*dev_priv
)
596 struct drm_device
*dev
= dev_priv
->dev
;
598 if (intel_iommu_enabled
&&
599 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask
)) {
600 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
601 return dma_set_mask(dev
->dev
, DMA_BIT_MASK(44));
606 static int vmw_dma_masks(struct vmw_private
*dev_priv
)
612 static int vmw_driver_load(struct drm_device
*dev
, unsigned long chipset
)
614 struct vmw_private
*dev_priv
;
618 bool refuse_dma
= false;
620 dev_priv
= kzalloc(sizeof(*dev_priv
), GFP_KERNEL
);
621 if (unlikely(dev_priv
== NULL
)) {
622 DRM_ERROR("Failed allocating a device private struct.\n");
626 pci_set_master(dev
->pdev
);
629 dev_priv
->vmw_chipset
= chipset
;
630 dev_priv
->last_read_seqno
= (uint32_t) -100;
631 mutex_init(&dev_priv
->cmdbuf_mutex
);
632 mutex_init(&dev_priv
->release_mutex
);
633 mutex_init(&dev_priv
->binding_mutex
);
634 rwlock_init(&dev_priv
->resource_lock
);
635 ttm_lock_init(&dev_priv
->reservation_sem
);
636 spin_lock_init(&dev_priv
->hw_lock
);
637 spin_lock_init(&dev_priv
->waiter_lock
);
638 spin_lock_init(&dev_priv
->cap_lock
);
639 spin_lock_init(&dev_priv
->svga_lock
);
641 for (i
= vmw_res_context
; i
< vmw_res_max
; ++i
) {
642 idr_init(&dev_priv
->res_idr
[i
]);
643 INIT_LIST_HEAD(&dev_priv
->res_lru
[i
]);
646 mutex_init(&dev_priv
->init_mutex
);
647 init_waitqueue_head(&dev_priv
->fence_queue
);
648 init_waitqueue_head(&dev_priv
->fifo_queue
);
649 dev_priv
->fence_queue_waiters
= 0;
650 dev_priv
->fifo_queue_waiters
= 0;
652 dev_priv
->used_memory_size
= 0;
654 dev_priv
->io_start
= pci_resource_start(dev
->pdev
, 0);
655 dev_priv
->vram_start
= pci_resource_start(dev
->pdev
, 1);
656 dev_priv
->mmio_start
= pci_resource_start(dev
->pdev
, 2);
658 dev_priv
->assume_16bpp
= !!vmw_assume_16bpp
;
660 dev_priv
->enable_fb
= enable_fbdev
;
662 vmw_write(dev_priv
, SVGA_REG_ID
, SVGA_ID_2
);
663 svga_id
= vmw_read(dev_priv
, SVGA_REG_ID
);
664 if (svga_id
!= SVGA_ID_2
) {
666 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id
);
670 dev_priv
->capabilities
= vmw_read(dev_priv
, SVGA_REG_CAPABILITIES
);
671 ret
= vmw_dma_select_mode(dev_priv
);
672 if (unlikely(ret
!= 0)) {
673 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
677 dev_priv
->vram_size
= vmw_read(dev_priv
, SVGA_REG_VRAM_SIZE
);
678 dev_priv
->mmio_size
= vmw_read(dev_priv
, SVGA_REG_MEM_SIZE
);
679 dev_priv
->fb_max_width
= vmw_read(dev_priv
, SVGA_REG_MAX_WIDTH
);
680 dev_priv
->fb_max_height
= vmw_read(dev_priv
, SVGA_REG_MAX_HEIGHT
);
682 vmw_get_initial_size(dev_priv
);
684 if (dev_priv
->capabilities
& SVGA_CAP_GMR2
) {
685 dev_priv
->max_gmr_ids
=
686 vmw_read(dev_priv
, SVGA_REG_GMR_MAX_IDS
);
687 dev_priv
->max_gmr_pages
=
688 vmw_read(dev_priv
, SVGA_REG_GMRS_MAX_PAGES
);
689 dev_priv
->memory_size
=
690 vmw_read(dev_priv
, SVGA_REG_MEMORY_SIZE
);
691 dev_priv
->memory_size
-= dev_priv
->vram_size
;
694 * An arbitrary limit of 512MiB on surface
695 * memory. But all HWV8 hardware supports GMR2.
697 dev_priv
->memory_size
= 512*1024*1024;
699 dev_priv
->max_mob_pages
= 0;
700 dev_priv
->max_mob_size
= 0;
701 if (dev_priv
->capabilities
& SVGA_CAP_GBOBJECTS
) {
704 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB
);
707 * Workaround for low memory 2D VMs to compensate for the
708 * allocation taken by fbdev
710 if (!(dev_priv
->capabilities
& SVGA_CAP_3D
))
713 dev_priv
->max_mob_pages
= mem_size
* 1024 / PAGE_SIZE
;
714 dev_priv
->prim_bb_mem
=
716 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM
);
717 dev_priv
->max_mob_size
=
718 vmw_read(dev_priv
, SVGA_REG_MOB_MAX_SIZE
);
719 dev_priv
->stdu_max_width
=
720 vmw_read(dev_priv
, SVGA_REG_SCREENTARGET_MAX_WIDTH
);
721 dev_priv
->stdu_max_height
=
722 vmw_read(dev_priv
, SVGA_REG_SCREENTARGET_MAX_HEIGHT
);
724 vmw_write(dev_priv
, SVGA_REG_DEV_CAP
,
725 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH
);
726 dev_priv
->texture_max_width
= vmw_read(dev_priv
,
728 vmw_write(dev_priv
, SVGA_REG_DEV_CAP
,
729 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT
);
730 dev_priv
->texture_max_height
= vmw_read(dev_priv
,
733 dev_priv
->texture_max_width
= 8192;
734 dev_priv
->texture_max_height
= 8192;
735 dev_priv
->prim_bb_mem
= dev_priv
->vram_size
;
738 vmw_print_capabilities(dev_priv
->capabilities
);
740 ret
= vmw_dma_masks(dev_priv
);
741 if (unlikely(ret
!= 0))
744 if (dev_priv
->capabilities
& SVGA_CAP_GMR2
) {
745 DRM_INFO("Max GMR ids is %u\n",
746 (unsigned)dev_priv
->max_gmr_ids
);
747 DRM_INFO("Max number of GMR pages is %u\n",
748 (unsigned)dev_priv
->max_gmr_pages
);
749 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
750 (unsigned)dev_priv
->memory_size
/ 1024);
752 DRM_INFO("Maximum display memory size is %u kiB\n",
753 dev_priv
->prim_bb_mem
/ 1024);
754 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
755 dev_priv
->vram_start
, dev_priv
->vram_size
/ 1024);
756 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
757 dev_priv
->mmio_start
, dev_priv
->mmio_size
/ 1024);
759 ret
= vmw_ttm_global_init(dev_priv
);
760 if (unlikely(ret
!= 0))
764 vmw_master_init(&dev_priv
->fbdev_master
);
765 ttm_lock_set_kill(&dev_priv
->fbdev_master
.lock
, false, SIGTERM
);
766 dev_priv
->active_master
= &dev_priv
->fbdev_master
;
768 dev_priv
->mmio_virt
= memremap(dev_priv
->mmio_start
,
769 dev_priv
->mmio_size
, MEMREMAP_WB
);
771 if (unlikely(dev_priv
->mmio_virt
== NULL
)) {
773 DRM_ERROR("Failed mapping MMIO.\n");
777 /* Need mmio memory to check for fifo pitchlock cap. */
778 if (!(dev_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
) &&
779 !(dev_priv
->capabilities
& SVGA_CAP_PITCHLOCK
) &&
780 !vmw_fifo_have_pitchlock(dev_priv
)) {
782 DRM_ERROR("Hardware has no pitchlock\n");
786 dev_priv
->tdev
= ttm_object_device_init
787 (dev_priv
->mem_global_ref
.object
, 12, &vmw_prime_dmabuf_ops
);
789 if (unlikely(dev_priv
->tdev
== NULL
)) {
790 DRM_ERROR("Unable to initialize TTM object management.\n");
795 dev
->dev_private
= dev_priv
;
797 ret
= pci_request_regions(dev
->pdev
, "vmwgfx probe");
798 dev_priv
->stealth
= (ret
!= 0);
799 if (dev_priv
->stealth
) {
801 * Request at least the mmio PCI resource.
804 DRM_INFO("It appears like vesafb is loaded. "
805 "Ignore above error if any.\n");
806 ret
= pci_request_region(dev
->pdev
, 2, "vmwgfx stealth probe");
807 if (unlikely(ret
!= 0)) {
808 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
813 if (dev_priv
->capabilities
& SVGA_CAP_IRQMASK
) {
814 ret
= drm_irq_install(dev
, dev
->pdev
->irq
);
816 DRM_ERROR("Failed installing irq: %d\n", ret
);
821 dev_priv
->fman
= vmw_fence_manager_init(dev_priv
);
822 if (unlikely(dev_priv
->fman
== NULL
)) {
827 ret
= ttm_bo_device_init(&dev_priv
->bdev
,
828 dev_priv
->bo_global_ref
.ref
.object
,
830 dev
->anon_inode
->i_mapping
,
831 VMWGFX_FILE_PAGE_OFFSET
,
833 if (unlikely(ret
!= 0)) {
834 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
839 * Enable VRAM, but initially don't use it until SVGA is enabled and
842 ret
= ttm_bo_init_mm(&dev_priv
->bdev
, TTM_PL_VRAM
,
843 (dev_priv
->vram_size
>> PAGE_SHIFT
));
844 if (unlikely(ret
!= 0)) {
845 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
848 dev_priv
->bdev
.man
[TTM_PL_VRAM
].use_type
= false;
850 dev_priv
->has_gmr
= true;
851 if (((dev_priv
->capabilities
& (SVGA_CAP_GMR
| SVGA_CAP_GMR2
)) == 0) ||
852 refuse_dma
|| ttm_bo_init_mm(&dev_priv
->bdev
, VMW_PL_GMR
,
854 DRM_INFO("No GMR memory available. "
855 "Graphics memory resources are very limited.\n");
856 dev_priv
->has_gmr
= false;
859 if (dev_priv
->capabilities
& SVGA_CAP_GBOBJECTS
) {
860 dev_priv
->has_mob
= true;
861 if (ttm_bo_init_mm(&dev_priv
->bdev
, VMW_PL_MOB
,
863 DRM_INFO("No MOB memory available. "
864 "3D will be disabled.\n");
865 dev_priv
->has_mob
= false;
869 if (dev_priv
->has_mob
) {
870 spin_lock(&dev_priv
->cap_lock
);
871 vmw_write(dev_priv
, SVGA_REG_DEV_CAP
, SVGA3D_DEVCAP_DX
);
872 dev_priv
->has_dx
= !!vmw_read(dev_priv
, SVGA_REG_DEV_CAP
);
873 spin_unlock(&dev_priv
->cap_lock
);
877 ret
= vmw_kms_init(dev_priv
);
878 if (unlikely(ret
!= 0))
880 vmw_overlay_init(dev_priv
);
882 ret
= vmw_request_device(dev_priv
);
886 DRM_INFO("DX: %s\n", dev_priv
->has_dx
? "yes." : "no.");
888 if (dev_priv
->enable_fb
) {
889 vmw_fifo_resource_inc(dev_priv
);
890 vmw_svga_enable(dev_priv
);
891 vmw_fb_init(dev_priv
);
894 dev_priv
->pm_nb
.notifier_call
= vmwgfx_pm_notifier
;
895 register_pm_notifier(&dev_priv
->pm_nb
);
900 vmw_overlay_close(dev_priv
);
901 vmw_kms_close(dev_priv
);
903 if (dev_priv
->has_mob
)
904 (void) ttm_bo_clean_mm(&dev_priv
->bdev
, VMW_PL_MOB
);
905 if (dev_priv
->has_gmr
)
906 (void) ttm_bo_clean_mm(&dev_priv
->bdev
, VMW_PL_GMR
);
907 (void)ttm_bo_clean_mm(&dev_priv
->bdev
, TTM_PL_VRAM
);
909 (void)ttm_bo_device_release(&dev_priv
->bdev
);
911 vmw_fence_manager_takedown(dev_priv
->fman
);
913 if (dev_priv
->capabilities
& SVGA_CAP_IRQMASK
)
914 drm_irq_uninstall(dev_priv
->dev
);
916 if (dev_priv
->stealth
)
917 pci_release_region(dev
->pdev
, 2);
919 pci_release_regions(dev
->pdev
);
921 ttm_object_device_release(&dev_priv
->tdev
);
923 memunmap(dev_priv
->mmio_virt
);
925 vmw_ttm_global_release(dev_priv
);
927 for (i
= vmw_res_context
; i
< vmw_res_max
; ++i
)
928 idr_destroy(&dev_priv
->res_idr
[i
]);
930 if (dev_priv
->ctx
.staged_bindings
)
931 vmw_binding_state_free(dev_priv
->ctx
.staged_bindings
);
936 static int vmw_driver_unload(struct drm_device
*dev
)
938 struct vmw_private
*dev_priv
= vmw_priv(dev
);
941 unregister_pm_notifier(&dev_priv
->pm_nb
);
943 if (dev_priv
->ctx
.res_ht_initialized
)
944 drm_ht_remove(&dev_priv
->ctx
.res_ht
);
945 vfree(dev_priv
->ctx
.cmd_bounce
);
946 if (dev_priv
->enable_fb
) {
947 vmw_fb_off(dev_priv
);
948 vmw_fb_close(dev_priv
);
949 vmw_fifo_resource_dec(dev_priv
);
950 vmw_svga_disable(dev_priv
);
953 vmw_kms_close(dev_priv
);
954 vmw_overlay_close(dev_priv
);
956 if (dev_priv
->has_gmr
)
957 (void)ttm_bo_clean_mm(&dev_priv
->bdev
, VMW_PL_GMR
);
958 (void)ttm_bo_clean_mm(&dev_priv
->bdev
, TTM_PL_VRAM
);
960 vmw_release_device_early(dev_priv
);
961 if (dev_priv
->has_mob
)
962 (void) ttm_bo_clean_mm(&dev_priv
->bdev
, VMW_PL_MOB
);
963 (void) ttm_bo_device_release(&dev_priv
->bdev
);
964 vmw_release_device_late(dev_priv
);
965 vmw_fence_manager_takedown(dev_priv
->fman
);
966 if (dev_priv
->capabilities
& SVGA_CAP_IRQMASK
)
967 drm_irq_uninstall(dev_priv
->dev
);
968 if (dev_priv
->stealth
)
969 pci_release_region(dev
->pdev
, 2);
971 pci_release_regions(dev
->pdev
);
973 ttm_object_device_release(&dev_priv
->tdev
);
974 memunmap(dev_priv
->mmio_virt
);
975 if (dev_priv
->ctx
.staged_bindings
)
976 vmw_binding_state_free(dev_priv
->ctx
.staged_bindings
);
977 vmw_ttm_global_release(dev_priv
);
979 for (i
= vmw_res_context
; i
< vmw_res_max
; ++i
)
980 idr_destroy(&dev_priv
->res_idr
[i
]);
987 static void vmw_preclose(struct drm_device
*dev
,
988 struct drm_file
*file_priv
)
990 struct vmw_fpriv
*vmw_fp
= vmw_fpriv(file_priv
);
991 struct vmw_private
*dev_priv
= vmw_priv(dev
);
993 vmw_event_fence_fpriv_gone(dev_priv
->fman
, &vmw_fp
->fence_events
);
996 static void vmw_postclose(struct drm_device
*dev
,
997 struct drm_file
*file_priv
)
999 struct vmw_fpriv
*vmw_fp
;
1001 vmw_fp
= vmw_fpriv(file_priv
);
1003 if (vmw_fp
->locked_master
) {
1004 struct vmw_master
*vmaster
=
1005 vmw_master(vmw_fp
->locked_master
);
1007 ttm_lock_set_kill(&vmaster
->lock
, true, SIGTERM
);
1008 ttm_vt_unlock(&vmaster
->lock
);
1009 drm_master_put(&vmw_fp
->locked_master
);
1012 ttm_object_file_release(&vmw_fp
->tfile
);
1016 static int vmw_driver_open(struct drm_device
*dev
, struct drm_file
*file_priv
)
1018 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1019 struct vmw_fpriv
*vmw_fp
;
1022 vmw_fp
= kzalloc(sizeof(*vmw_fp
), GFP_KERNEL
);
1023 if (unlikely(vmw_fp
== NULL
))
1026 INIT_LIST_HEAD(&vmw_fp
->fence_events
);
1027 vmw_fp
->tfile
= ttm_object_file_init(dev_priv
->tdev
, 10);
1028 if (unlikely(vmw_fp
->tfile
== NULL
))
1031 file_priv
->driver_priv
= vmw_fp
;
1040 static struct vmw_master
*vmw_master_check(struct drm_device
*dev
,
1041 struct drm_file
*file_priv
,
1045 struct vmw_fpriv
*vmw_fp
= vmw_fpriv(file_priv
);
1046 struct vmw_master
*vmaster
;
1048 if (file_priv
->minor
->type
!= DRM_MINOR_LEGACY
||
1049 !(flags
& DRM_AUTH
))
1052 ret
= mutex_lock_interruptible(&dev
->master_mutex
);
1053 if (unlikely(ret
!= 0))
1054 return ERR_PTR(-ERESTARTSYS
);
1056 if (file_priv
->is_master
) {
1057 mutex_unlock(&dev
->master_mutex
);
1062 * Check if we were previously master, but now dropped. In that
1063 * case, allow at least render node functionality.
1065 if (vmw_fp
->locked_master
) {
1066 mutex_unlock(&dev
->master_mutex
);
1068 if (flags
& DRM_RENDER_ALLOW
)
1071 DRM_ERROR("Dropped master trying to access ioctl that "
1072 "requires authentication.\n");
1073 return ERR_PTR(-EACCES
);
1075 mutex_unlock(&dev
->master_mutex
);
1078 * Take the TTM lock. Possibly sleep waiting for the authenticating
1079 * master to become master again, or for a SIGTERM if the
1080 * authenticating master exits.
1082 vmaster
= vmw_master(file_priv
->master
);
1083 ret
= ttm_read_lock(&vmaster
->lock
, true);
1084 if (unlikely(ret
!= 0))
1085 vmaster
= ERR_PTR(ret
);
1090 static long vmw_generic_ioctl(struct file
*filp
, unsigned int cmd
,
1092 long (*ioctl_func
)(struct file
*, unsigned int,
1095 struct drm_file
*file_priv
= filp
->private_data
;
1096 struct drm_device
*dev
= file_priv
->minor
->dev
;
1097 unsigned int nr
= DRM_IOCTL_NR(cmd
);
1098 struct vmw_master
*vmaster
;
1103 * Do extra checking on driver private ioctls.
1106 if ((nr
>= DRM_COMMAND_BASE
) && (nr
< DRM_COMMAND_END
)
1107 && (nr
< DRM_COMMAND_BASE
+ dev
->driver
->num_ioctls
)) {
1108 const struct drm_ioctl_desc
*ioctl
=
1109 &vmw_ioctls
[nr
- DRM_COMMAND_BASE
];
1111 if (nr
== DRM_COMMAND_BASE
+ DRM_VMW_EXECBUF
) {
1112 ret
= (long) drm_ioctl_permit(ioctl
->flags
, file_priv
);
1113 if (unlikely(ret
!= 0))
1116 if (unlikely((cmd
& (IOC_IN
| IOC_OUT
)) != IOC_IN
))
1117 goto out_io_encoding
;
1119 return (long) vmw_execbuf_ioctl(dev
, arg
, file_priv
,
1123 if (unlikely(ioctl
->cmd
!= cmd
))
1124 goto out_io_encoding
;
1126 flags
= ioctl
->flags
;
1127 } else if (!drm_ioctl_flags(nr
, &flags
))
1130 vmaster
= vmw_master_check(dev
, file_priv
, flags
);
1131 if (IS_ERR(vmaster
)) {
1132 ret
= PTR_ERR(vmaster
);
1134 if (ret
!= -ERESTARTSYS
)
1135 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1140 ret
= ioctl_func(filp
, cmd
, arg
);
1142 ttm_read_unlock(&vmaster
->lock
);
1147 DRM_ERROR("Invalid command format, ioctl %d\n",
1148 nr
- DRM_COMMAND_BASE
);
1153 static long vmw_unlocked_ioctl(struct file
*filp
, unsigned int cmd
,
1156 return vmw_generic_ioctl(filp
, cmd
, arg
, &drm_ioctl
);
1159 #ifdef CONFIG_COMPAT
1160 static long vmw_compat_ioctl(struct file
*filp
, unsigned int cmd
,
1163 return vmw_generic_ioctl(filp
, cmd
, arg
, &drm_compat_ioctl
);
1167 static void vmw_lastclose(struct drm_device
*dev
)
1171 static void vmw_master_init(struct vmw_master
*vmaster
)
1173 ttm_lock_init(&vmaster
->lock
);
1176 static int vmw_master_create(struct drm_device
*dev
,
1177 struct drm_master
*master
)
1179 struct vmw_master
*vmaster
;
1181 vmaster
= kzalloc(sizeof(*vmaster
), GFP_KERNEL
);
1182 if (unlikely(vmaster
== NULL
))
1185 vmw_master_init(vmaster
);
1186 ttm_lock_set_kill(&vmaster
->lock
, true, SIGTERM
);
1187 master
->driver_priv
= vmaster
;
1192 static void vmw_master_destroy(struct drm_device
*dev
,
1193 struct drm_master
*master
)
1195 struct vmw_master
*vmaster
= vmw_master(master
);
1197 master
->driver_priv
= NULL
;
1201 static int vmw_master_set(struct drm_device
*dev
,
1202 struct drm_file
*file_priv
,
1205 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1206 struct vmw_fpriv
*vmw_fp
= vmw_fpriv(file_priv
);
1207 struct vmw_master
*active
= dev_priv
->active_master
;
1208 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
1212 BUG_ON(active
!= &dev_priv
->fbdev_master
);
1213 ret
= ttm_vt_lock(&active
->lock
, false, vmw_fp
->tfile
);
1214 if (unlikely(ret
!= 0))
1217 ttm_lock_set_kill(&active
->lock
, true, SIGTERM
);
1218 dev_priv
->active_master
= NULL
;
1221 ttm_lock_set_kill(&vmaster
->lock
, false, SIGTERM
);
1223 ttm_vt_unlock(&vmaster
->lock
);
1224 BUG_ON(vmw_fp
->locked_master
!= file_priv
->master
);
1225 drm_master_put(&vmw_fp
->locked_master
);
1228 dev_priv
->active_master
= vmaster
;
1233 static void vmw_master_drop(struct drm_device
*dev
,
1234 struct drm_file
*file_priv
,
1237 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1238 struct vmw_fpriv
*vmw_fp
= vmw_fpriv(file_priv
);
1239 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
1243 * Make sure the master doesn't disappear while we have
1247 vmw_fp
->locked_master
= drm_master_get(file_priv
->master
);
1248 ret
= ttm_vt_lock(&vmaster
->lock
, false, vmw_fp
->tfile
);
1249 vmw_kms_legacy_hotspot_clear(dev_priv
);
1250 if (unlikely((ret
!= 0))) {
1251 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1252 drm_master_put(&vmw_fp
->locked_master
);
1255 ttm_lock_set_kill(&vmaster
->lock
, false, SIGTERM
);
1257 if (!dev_priv
->enable_fb
)
1258 vmw_svga_disable(dev_priv
);
1260 dev_priv
->active_master
= &dev_priv
->fbdev_master
;
1261 ttm_lock_set_kill(&dev_priv
->fbdev_master
.lock
, false, SIGTERM
);
1262 ttm_vt_unlock(&dev_priv
->fbdev_master
.lock
);
1264 if (dev_priv
->enable_fb
)
1265 vmw_fb_on(dev_priv
);
1269 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1271 * @dev_priv: Pointer to device private struct.
1272 * Needs the reservation sem to be held in non-exclusive mode.
1274 static void __vmw_svga_enable(struct vmw_private
*dev_priv
)
1276 spin_lock(&dev_priv
->svga_lock
);
1277 if (!dev_priv
->bdev
.man
[TTM_PL_VRAM
].use_type
) {
1278 vmw_write(dev_priv
, SVGA_REG_ENABLE
, SVGA_REG_ENABLE
);
1279 dev_priv
->bdev
.man
[TTM_PL_VRAM
].use_type
= true;
1281 spin_unlock(&dev_priv
->svga_lock
);
1285 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1287 * @dev_priv: Pointer to device private struct.
1289 void vmw_svga_enable(struct vmw_private
*dev_priv
)
1291 ttm_read_lock(&dev_priv
->reservation_sem
, false);
1292 __vmw_svga_enable(dev_priv
);
1293 ttm_read_unlock(&dev_priv
->reservation_sem
);
1297 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1299 * @dev_priv: Pointer to device private struct.
1300 * Needs the reservation sem to be held in exclusive mode.
1301 * Will not empty VRAM. VRAM must be emptied by caller.
1303 static void __vmw_svga_disable(struct vmw_private
*dev_priv
)
1305 spin_lock(&dev_priv
->svga_lock
);
1306 if (dev_priv
->bdev
.man
[TTM_PL_VRAM
].use_type
) {
1307 dev_priv
->bdev
.man
[TTM_PL_VRAM
].use_type
= false;
1308 vmw_write(dev_priv
, SVGA_REG_ENABLE
,
1309 SVGA_REG_ENABLE_HIDE
|
1310 SVGA_REG_ENABLE_ENABLE
);
1312 spin_unlock(&dev_priv
->svga_lock
);
1316 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1319 * @dev_priv: Pointer to device private struct.
1322 void vmw_svga_disable(struct vmw_private
*dev_priv
)
1324 ttm_write_lock(&dev_priv
->reservation_sem
, false);
1325 spin_lock(&dev_priv
->svga_lock
);
1326 if (dev_priv
->bdev
.man
[TTM_PL_VRAM
].use_type
) {
1327 dev_priv
->bdev
.man
[TTM_PL_VRAM
].use_type
= false;
1328 spin_unlock(&dev_priv
->svga_lock
);
1329 if (ttm_bo_evict_mm(&dev_priv
->bdev
, TTM_PL_VRAM
))
1330 DRM_ERROR("Failed evicting VRAM buffers.\n");
1331 vmw_write(dev_priv
, SVGA_REG_ENABLE
,
1332 SVGA_REG_ENABLE_HIDE
|
1333 SVGA_REG_ENABLE_ENABLE
);
1335 spin_unlock(&dev_priv
->svga_lock
);
1336 ttm_write_unlock(&dev_priv
->reservation_sem
);
1339 static void vmw_remove(struct pci_dev
*pdev
)
1341 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1343 pci_disable_device(pdev
);
1347 static int vmwgfx_pm_notifier(struct notifier_block
*nb
, unsigned long val
,
1350 struct vmw_private
*dev_priv
=
1351 container_of(nb
, struct vmw_private
, pm_nb
);
1354 case PM_HIBERNATION_PREPARE
:
1355 if (dev_priv
->enable_fb
)
1356 vmw_fb_off(dev_priv
);
1357 ttm_suspend_lock(&dev_priv
->reservation_sem
);
1360 * This empties VRAM and unbinds all GMR bindings.
1361 * Buffer contents is moved to swappable memory.
1363 vmw_execbuf_release_pinned_bo(dev_priv
);
1364 vmw_resource_evict_all(dev_priv
);
1365 vmw_release_device_early(dev_priv
);
1366 ttm_bo_swapout_all(&dev_priv
->bdev
);
1367 vmw_fence_fifo_down(dev_priv
->fman
);
1369 case PM_POST_HIBERNATION
:
1370 case PM_POST_RESTORE
:
1371 vmw_fence_fifo_up(dev_priv
->fman
);
1372 ttm_suspend_unlock(&dev_priv
->reservation_sem
);
1373 if (dev_priv
->enable_fb
)
1374 vmw_fb_on(dev_priv
);
1376 case PM_RESTORE_PREPARE
:
1384 static int vmw_pci_suspend(struct pci_dev
*pdev
, pm_message_t state
)
1386 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1387 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1389 if (dev_priv
->refuse_hibernation
)
1392 pci_save_state(pdev
);
1393 pci_disable_device(pdev
);
1394 pci_set_power_state(pdev
, PCI_D3hot
);
1398 static int vmw_pci_resume(struct pci_dev
*pdev
)
1400 pci_set_power_state(pdev
, PCI_D0
);
1401 pci_restore_state(pdev
);
1402 return pci_enable_device(pdev
);
1405 static int vmw_pm_suspend(struct device
*kdev
)
1407 struct pci_dev
*pdev
= to_pci_dev(kdev
);
1408 struct pm_message dummy
;
1412 return vmw_pci_suspend(pdev
, dummy
);
1415 static int vmw_pm_resume(struct device
*kdev
)
1417 struct pci_dev
*pdev
= to_pci_dev(kdev
);
1419 return vmw_pci_resume(pdev
);
1422 static int vmw_pm_freeze(struct device
*kdev
)
1424 struct pci_dev
*pdev
= to_pci_dev(kdev
);
1425 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1426 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1428 dev_priv
->suspended
= true;
1429 if (dev_priv
->enable_fb
)
1430 vmw_fifo_resource_dec(dev_priv
);
1432 if (atomic_read(&dev_priv
->num_fifo_resources
) != 0) {
1433 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1434 if (dev_priv
->enable_fb
)
1435 vmw_fifo_resource_inc(dev_priv
);
1436 WARN_ON(vmw_request_device_late(dev_priv
));
1437 dev_priv
->suspended
= false;
1441 if (dev_priv
->enable_fb
)
1442 __vmw_svga_disable(dev_priv
);
1444 vmw_release_device_late(dev_priv
);
1449 static int vmw_pm_restore(struct device
*kdev
)
1451 struct pci_dev
*pdev
= to_pci_dev(kdev
);
1452 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1453 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1456 vmw_write(dev_priv
, SVGA_REG_ID
, SVGA_ID_2
);
1457 (void) vmw_read(dev_priv
, SVGA_REG_ID
);
1459 if (dev_priv
->enable_fb
)
1460 vmw_fifo_resource_inc(dev_priv
);
1462 ret
= vmw_request_device(dev_priv
);
1466 if (dev_priv
->enable_fb
)
1467 __vmw_svga_enable(dev_priv
);
1469 dev_priv
->suspended
= false;
1474 static const struct dev_pm_ops vmw_pm_ops
= {
1475 .freeze
= vmw_pm_freeze
,
1476 .thaw
= vmw_pm_restore
,
1477 .restore
= vmw_pm_restore
,
1478 .suspend
= vmw_pm_suspend
,
1479 .resume
= vmw_pm_resume
,
1482 static const struct file_operations vmwgfx_driver_fops
= {
1483 .owner
= THIS_MODULE
,
1485 .release
= drm_release
,
1486 .unlocked_ioctl
= vmw_unlocked_ioctl
,
1488 .poll
= vmw_fops_poll
,
1489 .read
= vmw_fops_read
,
1490 #if defined(CONFIG_COMPAT)
1491 .compat_ioctl
= vmw_compat_ioctl
,
1493 .llseek
= noop_llseek
,
1496 static struct drm_driver driver
= {
1497 .driver_features
= DRIVER_HAVE_IRQ
| DRIVER_IRQ_SHARED
|
1498 DRIVER_MODESET
| DRIVER_PRIME
| DRIVER_RENDER
,
1499 .load
= vmw_driver_load
,
1500 .unload
= vmw_driver_unload
,
1501 .lastclose
= vmw_lastclose
,
1502 .irq_preinstall
= vmw_irq_preinstall
,
1503 .irq_postinstall
= vmw_irq_postinstall
,
1504 .irq_uninstall
= vmw_irq_uninstall
,
1505 .irq_handler
= vmw_irq_handler
,
1506 .get_vblank_counter
= vmw_get_vblank_counter
,
1507 .enable_vblank
= vmw_enable_vblank
,
1508 .disable_vblank
= vmw_disable_vblank
,
1509 .ioctls
= vmw_ioctls
,
1510 .num_ioctls
= ARRAY_SIZE(vmw_ioctls
),
1511 .master_create
= vmw_master_create
,
1512 .master_destroy
= vmw_master_destroy
,
1513 .master_set
= vmw_master_set
,
1514 .master_drop
= vmw_master_drop
,
1515 .open
= vmw_driver_open
,
1516 .preclose
= vmw_preclose
,
1517 .postclose
= vmw_postclose
,
1518 .set_busid
= drm_pci_set_busid
,
1520 .dumb_create
= vmw_dumb_create
,
1521 .dumb_map_offset
= vmw_dumb_map_offset
,
1522 .dumb_destroy
= vmw_dumb_destroy
,
1524 .prime_fd_to_handle
= vmw_prime_fd_to_handle
,
1525 .prime_handle_to_fd
= vmw_prime_handle_to_fd
,
1527 .fops
= &vmwgfx_driver_fops
,
1528 .name
= VMWGFX_DRIVER_NAME
,
1529 .desc
= VMWGFX_DRIVER_DESC
,
1530 .date
= VMWGFX_DRIVER_DATE
,
1531 .major
= VMWGFX_DRIVER_MAJOR
,
1532 .minor
= VMWGFX_DRIVER_MINOR
,
1533 .patchlevel
= VMWGFX_DRIVER_PATCHLEVEL
1536 static struct pci_driver vmw_pci_driver
= {
1537 .name
= VMWGFX_DRIVER_NAME
,
1538 .id_table
= vmw_pci_id_list
,
1540 .remove
= vmw_remove
,
1546 static int vmw_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1548 return drm_get_pci_dev(pdev
, ent
, &driver
);
1551 static int __init
vmwgfx_init(void)
1555 #ifdef CONFIG_VGA_CONSOLE
1556 if (vgacon_text_force())
1560 ret
= drm_pci_init(&driver
, &vmw_pci_driver
);
1562 DRM_ERROR("Failed initializing DRM.\n");
1566 static void __exit
vmwgfx_exit(void)
1568 drm_pci_exit(&driver
, &vmw_pci_driver
);
1571 module_init(vmwgfx_init
);
1572 module_exit(vmwgfx_exit
);
1574 MODULE_AUTHOR("VMware Inc. and others");
1575 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1576 MODULE_LICENSE("GPL and additional rights");
1577 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR
) "."
1578 __stringify(VMWGFX_DRIVER_MINOR
) "."
1579 __stringify(VMWGFX_DRIVER_PATCHLEVEL
) "."