Commit | Line | Data |
---|---|---|
543831cf TH |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | ||
28 | #include "vmwgfx_drv.h" | |
29 | #include "vmwgfx_resource_priv.h" | |
30 | #include <ttm/ttm_placement.h> | |
7e8d9da3 | 31 | #include "svga3d_surfacedefs.h" |
543831cf TH |
32 | |
33 | /** | |
34 | * struct vmw_user_surface - User-space visible surface resource | |
35 | * | |
36 | * @base: The TTM base object handling user-space visibility. | |
37 | * @srf: The surface metadata. | |
38 | * @size: TTM accounting size for the surface. | |
39 | */ | |
40 | struct vmw_user_surface { | |
41 | struct ttm_base_object base; | |
42 | struct vmw_surface srf; | |
43 | uint32_t size; | |
44 | uint32_t backup_handle; | |
45 | }; | |
46 | ||
47 | /** | |
48 | * struct vmw_surface_offset - Backing store mip level offset info | |
49 | * | |
50 | * @face: Surface face. | |
51 | * @mip: Mip level. | |
52 | * @bo_offset: Offset into backing store of this mip level. | |
53 | * | |
54 | */ | |
55 | struct vmw_surface_offset { | |
56 | uint32_t face; | |
57 | uint32_t mip; | |
58 | uint32_t bo_offset; | |
59 | }; | |
60 | ||
61 | static void vmw_user_surface_free(struct vmw_resource *res); | |
62 | static struct vmw_resource * | |
63 | vmw_user_surface_base_to_res(struct ttm_base_object *base); | |
64 | static int vmw_legacy_srf_bind(struct vmw_resource *res, | |
65 | struct ttm_validate_buffer *val_buf); | |
66 | static int vmw_legacy_srf_unbind(struct vmw_resource *res, | |
67 | bool readback, | |
68 | struct ttm_validate_buffer *val_buf); | |
69 | static int vmw_legacy_srf_create(struct vmw_resource *res); | |
70 | static int vmw_legacy_srf_destroy(struct vmw_resource *res); | |
71 | ||
72 | static const struct vmw_user_resource_conv user_surface_conv = { | |
73 | .object_type = VMW_RES_SURFACE, | |
74 | .base_obj_to_res = vmw_user_surface_base_to_res, | |
75 | .res_free = vmw_user_surface_free | |
76 | }; | |
77 | ||
78 | const struct vmw_user_resource_conv *user_surface_converter = | |
79 | &user_surface_conv; | |
80 | ||
81 | ||
82 | static uint64_t vmw_user_surface_size; | |
83 | ||
84 | static const struct vmw_res_func vmw_legacy_surface_func = { | |
85 | .res_type = vmw_res_surface, | |
86 | .needs_backup = false, | |
87 | .may_evict = true, | |
88 | .type_name = "legacy surfaces", | |
89 | .backup_placement = &vmw_srf_placement, | |
90 | .create = &vmw_legacy_srf_create, | |
91 | .destroy = &vmw_legacy_srf_destroy, | |
92 | .bind = &vmw_legacy_srf_bind, | |
93 | .unbind = &vmw_legacy_srf_unbind | |
94 | }; | |
95 | ||
543831cf TH |
96 | /** |
97 | * struct vmw_surface_dma - SVGA3D DMA command | |
98 | */ | |
99 | struct vmw_surface_dma { | |
100 | SVGA3dCmdHeader header; | |
101 | SVGA3dCmdSurfaceDMA body; | |
102 | SVGA3dCopyBox cb; | |
103 | SVGA3dCmdSurfaceDMASuffix suffix; | |
104 | }; | |
105 | ||
106 | /** | |
107 | * struct vmw_surface_define - SVGA3D Surface Define command | |
108 | */ | |
109 | struct vmw_surface_define { | |
110 | SVGA3dCmdHeader header; | |
111 | SVGA3dCmdDefineSurface body; | |
112 | }; | |
113 | ||
114 | /** | |
115 | * struct vmw_surface_destroy - SVGA3D Surface Destroy command | |
116 | */ | |
117 | struct vmw_surface_destroy { | |
118 | SVGA3dCmdHeader header; | |
119 | SVGA3dCmdDestroySurface body; | |
120 | }; | |
121 | ||
122 | ||
123 | /** | |
124 | * vmw_surface_dma_size - Compute fifo size for a dma command. | |
125 | * | |
126 | * @srf: Pointer to a struct vmw_surface | |
127 | * | |
128 | * Computes the required size for a surface dma command for backup or | |
129 | * restoration of the surface represented by @srf. | |
130 | */ | |
131 | static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) | |
132 | { | |
133 | return srf->num_sizes * sizeof(struct vmw_surface_dma); | |
134 | } | |
135 | ||
136 | ||
137 | /** | |
138 | * vmw_surface_define_size - Compute fifo size for a surface define command. | |
139 | * | |
140 | * @srf: Pointer to a struct vmw_surface | |
141 | * | |
142 | * Computes the required size for a surface define command for the definition | |
143 | * of the surface represented by @srf. | |
144 | */ | |
145 | static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) | |
146 | { | |
147 | return sizeof(struct vmw_surface_define) + srf->num_sizes * | |
148 | sizeof(SVGA3dSize); | |
149 | } | |
150 | ||
151 | ||
152 | /** | |
153 | * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. | |
154 | * | |
155 | * Computes the required size for a surface destroy command for the destruction | |
156 | * of a hw surface. | |
157 | */ | |
158 | static inline uint32_t vmw_surface_destroy_size(void) | |
159 | { | |
160 | return sizeof(struct vmw_surface_destroy); | |
161 | } | |
162 | ||
163 | /** | |
164 | * vmw_surface_destroy_encode - Encode a surface_destroy command. | |
165 | * | |
166 | * @id: The surface id | |
167 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | |
168 | */ | |
169 | static void vmw_surface_destroy_encode(uint32_t id, | |
170 | void *cmd_space) | |
171 | { | |
172 | struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *) | |
173 | cmd_space; | |
174 | ||
175 | cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; | |
176 | cmd->header.size = sizeof(cmd->body); | |
177 | cmd->body.sid = id; | |
178 | } | |
179 | ||
180 | /** | |
181 | * vmw_surface_define_encode - Encode a surface_define command. | |
182 | * | |
183 | * @srf: Pointer to a struct vmw_surface object. | |
184 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | |
185 | */ | |
186 | static void vmw_surface_define_encode(const struct vmw_surface *srf, | |
187 | void *cmd_space) | |
188 | { | |
189 | struct vmw_surface_define *cmd = (struct vmw_surface_define *) | |
190 | cmd_space; | |
191 | struct drm_vmw_size *src_size; | |
192 | SVGA3dSize *cmd_size; | |
193 | uint32_t cmd_len; | |
194 | int i; | |
195 | ||
196 | cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); | |
197 | ||
198 | cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; | |
199 | cmd->header.size = cmd_len; | |
200 | cmd->body.sid = srf->res.id; | |
201 | cmd->body.surfaceFlags = srf->flags; | |
202 | cmd->body.format = cpu_to_le32(srf->format); | |
203 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | |
204 | cmd->body.face[i].numMipLevels = srf->mip_levels[i]; | |
205 | ||
206 | cmd += 1; | |
207 | cmd_size = (SVGA3dSize *) cmd; | |
208 | src_size = srf->sizes; | |
209 | ||
210 | for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { | |
211 | cmd_size->width = src_size->width; | |
212 | cmd_size->height = src_size->height; | |
213 | cmd_size->depth = src_size->depth; | |
214 | } | |
215 | } | |
216 | ||
217 | /** | |
218 | * vmw_surface_dma_encode - Encode a surface_dma command. | |
219 | * | |
220 | * @srf: Pointer to a struct vmw_surface object. | |
221 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | |
222 | * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents | |
223 | * should be placed or read from. | |
224 | * @to_surface: Boolean whether to DMA to the surface or from the surface. | |
225 | */ | |
226 | static void vmw_surface_dma_encode(struct vmw_surface *srf, | |
227 | void *cmd_space, | |
228 | const SVGAGuestPtr *ptr, | |
229 | bool to_surface) | |
230 | { | |
231 | uint32_t i; | |
543831cf | 232 | struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; |
7e8d9da3 TH |
233 | const struct svga3d_surface_desc *desc = |
234 | svga3dsurface_get_desc(srf->format); | |
543831cf TH |
235 | |
236 | for (i = 0; i < srf->num_sizes; ++i) { | |
237 | SVGA3dCmdHeader *header = &cmd->header; | |
238 | SVGA3dCmdSurfaceDMA *body = &cmd->body; | |
239 | SVGA3dCopyBox *cb = &cmd->cb; | |
240 | SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; | |
241 | const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; | |
242 | const struct drm_vmw_size *cur_size = &srf->sizes[i]; | |
243 | ||
244 | header->id = SVGA_3D_CMD_SURFACE_DMA; | |
245 | header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); | |
246 | ||
247 | body->guest.ptr = *ptr; | |
248 | body->guest.ptr.offset += cur_offset->bo_offset; | |
7e8d9da3 TH |
249 | body->guest.pitch = svga3dsurface_calculate_pitch(desc, |
250 | cur_size); | |
543831cf TH |
251 | body->host.sid = srf->res.id; |
252 | body->host.face = cur_offset->face; | |
253 | body->host.mipmap = cur_offset->mip; | |
254 | body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : | |
255 | SVGA3D_READ_HOST_VRAM); | |
256 | cb->x = 0; | |
257 | cb->y = 0; | |
258 | cb->z = 0; | |
259 | cb->srcx = 0; | |
260 | cb->srcy = 0; | |
261 | cb->srcz = 0; | |
262 | cb->w = cur_size->width; | |
263 | cb->h = cur_size->height; | |
264 | cb->d = cur_size->depth; | |
265 | ||
266 | suffix->suffixSize = sizeof(*suffix); | |
7e8d9da3 TH |
267 | suffix->maximumOffset = |
268 | svga3dsurface_get_image_buffer_size(desc, cur_size, | |
269 | body->guest.pitch); | |
543831cf TH |
270 | suffix->flags.discard = 0; |
271 | suffix->flags.unsynchronized = 0; | |
272 | suffix->flags.reserved = 0; | |
273 | ++cmd; | |
274 | } | |
275 | }; | |
276 | ||
277 | ||
278 | /** | |
279 | * vmw_hw_surface_destroy - destroy a Device surface | |
280 | * | |
281 | * @res: Pointer to a struct vmw_resource embedded in a struct | |
282 | * vmw_surface. | |
283 | * | |
284 | * Destroys a the device surface associated with a struct vmw_surface if | |
285 | * any, and adjusts accounting and resource count accordingly. | |
286 | */ | |
287 | static void vmw_hw_surface_destroy(struct vmw_resource *res) | |
288 | { | |
289 | ||
290 | struct vmw_private *dev_priv = res->dev_priv; | |
291 | struct vmw_surface *srf; | |
292 | void *cmd; | |
293 | ||
294 | if (res->id != -1) { | |
295 | ||
296 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); | |
297 | if (unlikely(cmd == NULL)) { | |
298 | DRM_ERROR("Failed reserving FIFO space for surface " | |
299 | "destruction.\n"); | |
300 | return; | |
301 | } | |
302 | ||
303 | vmw_surface_destroy_encode(res->id, cmd); | |
304 | vmw_fifo_commit(dev_priv, vmw_surface_destroy_size()); | |
305 | ||
306 | /* | |
307 | * used_memory_size_atomic, or separate lock | |
308 | * to avoid taking dev_priv::cmdbuf_mutex in | |
309 | * the destroy path. | |
310 | */ | |
311 | ||
312 | mutex_lock(&dev_priv->cmdbuf_mutex); | |
313 | srf = vmw_res_to_srf(res); | |
314 | dev_priv->used_memory_size -= res->backup_size; | |
315 | mutex_unlock(&dev_priv->cmdbuf_mutex); | |
316 | } | |
317 | vmw_3d_resource_dec(dev_priv, false); | |
318 | } | |
319 | ||
320 | /** | |
321 | * vmw_legacy_srf_create - Create a device surface as part of the | |
322 | * resource validation process. | |
323 | * | |
324 | * @res: Pointer to a struct vmw_surface. | |
325 | * | |
326 | * If the surface doesn't have a hw id. | |
327 | * | |
328 | * Returns -EBUSY if there wasn't sufficient device resources to | |
329 | * complete the validation. Retry after freeing up resources. | |
330 | * | |
331 | * May return other errors if the kernel is out of guest resources. | |
332 | */ | |
333 | static int vmw_legacy_srf_create(struct vmw_resource *res) | |
334 | { | |
335 | struct vmw_private *dev_priv = res->dev_priv; | |
336 | struct vmw_surface *srf; | |
337 | uint32_t submit_size; | |
338 | uint8_t *cmd; | |
339 | int ret; | |
340 | ||
341 | if (likely(res->id != -1)) | |
342 | return 0; | |
343 | ||
344 | srf = vmw_res_to_srf(res); | |
345 | if (unlikely(dev_priv->used_memory_size + res->backup_size >= | |
346 | dev_priv->memory_size)) | |
347 | return -EBUSY; | |
348 | ||
349 | /* | |
350 | * Alloc id for the resource. | |
351 | */ | |
352 | ||
353 | ret = vmw_resource_alloc_id(res); | |
354 | if (unlikely(ret != 0)) { | |
355 | DRM_ERROR("Failed to allocate a surface id.\n"); | |
356 | goto out_no_id; | |
357 | } | |
358 | ||
359 | if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { | |
360 | ret = -EBUSY; | |
361 | goto out_no_fifo; | |
362 | } | |
363 | ||
364 | /* | |
365 | * Encode surface define- commands. | |
366 | */ | |
367 | ||
368 | submit_size = vmw_surface_define_size(srf); | |
369 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | |
370 | if (unlikely(cmd == NULL)) { | |
371 | DRM_ERROR("Failed reserving FIFO space for surface " | |
372 | "creation.\n"); | |
373 | ret = -ENOMEM; | |
374 | goto out_no_fifo; | |
375 | } | |
376 | ||
377 | vmw_surface_define_encode(srf, cmd); | |
378 | vmw_fifo_commit(dev_priv, submit_size); | |
379 | /* | |
380 | * Surface memory usage accounting. | |
381 | */ | |
382 | ||
383 | dev_priv->used_memory_size += res->backup_size; | |
384 | return 0; | |
385 | ||
386 | out_no_fifo: | |
387 | vmw_resource_release_id(res); | |
388 | out_no_id: | |
389 | return ret; | |
390 | } | |
391 | ||
392 | /** | |
393 | * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface. | |
394 | * | |
395 | * @res: Pointer to a struct vmw_res embedded in a struct | |
396 | * vmw_surface. | |
397 | * @val_buf: Pointer to a struct ttm_validate_buffer containing | |
398 | * information about the backup buffer. | |
399 | * @bind: Boolean wether to DMA to the surface. | |
400 | * | |
401 | * Transfer backup data to or from a legacy surface as part of the | |
402 | * validation process. | |
403 | * May return other errors if the kernel is out of guest resources. | |
404 | * The backup buffer will be fenced or idle upon successful completion, | |
405 | * and if the surface needs persistent backup storage, the backup buffer | |
406 | * will also be returned reserved iff @bind is true. | |
407 | */ | |
408 | static int vmw_legacy_srf_dma(struct vmw_resource *res, | |
409 | struct ttm_validate_buffer *val_buf, | |
410 | bool bind) | |
411 | { | |
412 | SVGAGuestPtr ptr; | |
413 | struct vmw_fence_obj *fence; | |
414 | uint32_t submit_size; | |
415 | struct vmw_surface *srf = vmw_res_to_srf(res); | |
416 | uint8_t *cmd; | |
417 | struct vmw_private *dev_priv = res->dev_priv; | |
418 | ||
419 | BUG_ON(val_buf->bo == NULL); | |
420 | ||
421 | submit_size = vmw_surface_dma_size(srf); | |
422 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | |
423 | if (unlikely(cmd == NULL)) { | |
424 | DRM_ERROR("Failed reserving FIFO space for surface " | |
425 | "DMA.\n"); | |
426 | return -ENOMEM; | |
427 | } | |
428 | vmw_bo_get_guest_ptr(val_buf->bo, &ptr); | |
429 | vmw_surface_dma_encode(srf, cmd, &ptr, bind); | |
430 | ||
431 | vmw_fifo_commit(dev_priv, submit_size); | |
432 | ||
433 | /* | |
434 | * Create a fence object and fence the backup buffer. | |
435 | */ | |
436 | ||
437 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | |
438 | &fence, NULL); | |
439 | ||
440 | vmw_fence_single_bo(val_buf->bo, fence); | |
441 | ||
442 | if (likely(fence != NULL)) | |
443 | vmw_fence_obj_unreference(&fence); | |
444 | ||
445 | return 0; | |
446 | } | |
447 | ||
448 | /** | |
449 | * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the | |
450 | * surface validation process. | |
451 | * | |
452 | * @res: Pointer to a struct vmw_res embedded in a struct | |
453 | * vmw_surface. | |
454 | * @val_buf: Pointer to a struct ttm_validate_buffer containing | |
455 | * information about the backup buffer. | |
456 | * | |
457 | * This function will copy backup data to the surface if the | |
458 | * backup buffer is dirty. | |
459 | */ | |
460 | static int vmw_legacy_srf_bind(struct vmw_resource *res, | |
461 | struct ttm_validate_buffer *val_buf) | |
462 | { | |
463 | if (!res->backup_dirty) | |
464 | return 0; | |
465 | ||
466 | return vmw_legacy_srf_dma(res, val_buf, true); | |
467 | } | |
468 | ||
469 | ||
470 | /** | |
471 | * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the | |
472 | * surface eviction process. | |
473 | * | |
474 | * @res: Pointer to a struct vmw_res embedded in a struct | |
475 | * vmw_surface. | |
476 | * @val_buf: Pointer to a struct ttm_validate_buffer containing | |
477 | * information about the backup buffer. | |
478 | * | |
479 | * This function will copy backup data from the surface. | |
480 | */ | |
481 | static int vmw_legacy_srf_unbind(struct vmw_resource *res, | |
482 | bool readback, | |
483 | struct ttm_validate_buffer *val_buf) | |
484 | { | |
485 | if (unlikely(readback)) | |
486 | return vmw_legacy_srf_dma(res, val_buf, false); | |
487 | return 0; | |
488 | } | |
489 | ||
490 | /** | |
491 | * vmw_legacy_srf_destroy - Destroy a device surface as part of a | |
492 | * resource eviction process. | |
493 | * | |
494 | * @res: Pointer to a struct vmw_res embedded in a struct | |
495 | * vmw_surface. | |
496 | */ | |
497 | static int vmw_legacy_srf_destroy(struct vmw_resource *res) | |
498 | { | |
499 | struct vmw_private *dev_priv = res->dev_priv; | |
500 | uint32_t submit_size; | |
501 | uint8_t *cmd; | |
502 | ||
503 | BUG_ON(res->id == -1); | |
504 | ||
505 | /* | |
506 | * Encode the dma- and surface destroy commands. | |
507 | */ | |
508 | ||
509 | submit_size = vmw_surface_destroy_size(); | |
510 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | |
511 | if (unlikely(cmd == NULL)) { | |
512 | DRM_ERROR("Failed reserving FIFO space for surface " | |
513 | "eviction.\n"); | |
514 | return -ENOMEM; | |
515 | } | |
516 | ||
517 | vmw_surface_destroy_encode(res->id, cmd); | |
518 | vmw_fifo_commit(dev_priv, submit_size); | |
519 | ||
520 | /* | |
521 | * Surface memory usage accounting. | |
522 | */ | |
523 | ||
524 | dev_priv->used_memory_size -= res->backup_size; | |
525 | ||
526 | /* | |
527 | * Release the surface ID. | |
528 | */ | |
529 | ||
530 | vmw_resource_release_id(res); | |
531 | ||
532 | return 0; | |
533 | } | |
534 | ||
535 | ||
536 | /** | |
537 | * vmw_surface_init - initialize a struct vmw_surface | |
538 | * | |
539 | * @dev_priv: Pointer to a device private struct. | |
540 | * @srf: Pointer to the struct vmw_surface to initialize. | |
541 | * @res_free: Pointer to a resource destructor used to free | |
542 | * the object. | |
543 | */ | |
544 | static int vmw_surface_init(struct vmw_private *dev_priv, | |
545 | struct vmw_surface *srf, | |
546 | void (*res_free) (struct vmw_resource *res)) | |
547 | { | |
548 | int ret; | |
549 | struct vmw_resource *res = &srf->res; | |
550 | ||
551 | BUG_ON(res_free == NULL); | |
552 | (void) vmw_3d_resource_inc(dev_priv, false); | |
553 | ret = vmw_resource_init(dev_priv, res, true, res_free, | |
554 | &vmw_legacy_surface_func); | |
555 | ||
556 | if (unlikely(ret != 0)) { | |
557 | vmw_3d_resource_dec(dev_priv, false); | |
558 | res_free(res); | |
559 | return ret; | |
560 | } | |
561 | ||
562 | /* | |
563 | * The surface won't be visible to hardware until a | |
564 | * surface validate. | |
565 | */ | |
566 | ||
567 | vmw_resource_activate(res, vmw_hw_surface_destroy); | |
568 | return ret; | |
569 | } | |
570 | ||
571 | /** | |
572 | * vmw_user_surface_base_to_res - TTM base object to resource converter for | |
573 | * user visible surfaces | |
574 | * | |
575 | * @base: Pointer to a TTM base object | |
576 | * | |
577 | * Returns the struct vmw_resource embedded in a struct vmw_surface | |
578 | * for the user-visible object identified by the TTM base object @base. | |
579 | */ | |
580 | static struct vmw_resource * | |
581 | vmw_user_surface_base_to_res(struct ttm_base_object *base) | |
582 | { | |
583 | return &(container_of(base, struct vmw_user_surface, base)->srf.res); | |
584 | } | |
585 | ||
586 | /** | |
587 | * vmw_user_surface_free - User visible surface resource destructor | |
588 | * | |
589 | * @res: A struct vmw_resource embedded in a struct vmw_surface. | |
590 | */ | |
591 | static void vmw_user_surface_free(struct vmw_resource *res) | |
592 | { | |
593 | struct vmw_surface *srf = vmw_res_to_srf(res); | |
594 | struct vmw_user_surface *user_srf = | |
595 | container_of(srf, struct vmw_user_surface, srf); | |
596 | struct vmw_private *dev_priv = srf->res.dev_priv; | |
597 | uint32_t size = user_srf->size; | |
598 | ||
599 | kfree(srf->offsets); | |
600 | kfree(srf->sizes); | |
601 | kfree(srf->snooper.image); | |
602 | ttm_base_object_kfree(user_srf, base); | |
603 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | |
604 | } | |
605 | ||
606 | /** | |
607 | * vmw_user_surface_free - User visible surface TTM base object destructor | |
608 | * | |
609 | * @p_base: Pointer to a pointer to a TTM base object | |
610 | * embedded in a struct vmw_user_surface. | |
611 | * | |
612 | * Drops the base object's reference on its resource, and the | |
613 | * pointer pointed to by *p_base is set to NULL. | |
614 | */ | |
615 | static void vmw_user_surface_base_release(struct ttm_base_object **p_base) | |
616 | { | |
617 | struct ttm_base_object *base = *p_base; | |
618 | struct vmw_user_surface *user_srf = | |
619 | container_of(base, struct vmw_user_surface, base); | |
620 | struct vmw_resource *res = &user_srf->srf.res; | |
621 | ||
622 | *p_base = NULL; | |
623 | vmw_resource_unreference(&res); | |
624 | } | |
625 | ||
626 | /** | |
627 | * vmw_user_surface_destroy_ioctl - Ioctl function implementing | |
628 | * the user surface destroy functionality. | |
629 | * | |
630 | * @dev: Pointer to a struct drm_device. | |
631 | * @data: Pointer to data copied from / to user-space. | |
632 | * @file_priv: Pointer to a drm file private structure. | |
633 | */ | |
634 | int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | |
635 | struct drm_file *file_priv) | |
636 | { | |
637 | struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; | |
638 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | |
639 | ||
640 | return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); | |
641 | } | |
642 | ||
643 | /** | |
644 | * vmw_user_surface_define_ioctl - Ioctl function implementing | |
645 | * the user surface define functionality. | |
646 | * | |
647 | * @dev: Pointer to a struct drm_device. | |
648 | * @data: Pointer to data copied from / to user-space. | |
649 | * @file_priv: Pointer to a drm file private structure. | |
650 | */ | |
651 | int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |
652 | struct drm_file *file_priv) | |
653 | { | |
654 | struct vmw_private *dev_priv = vmw_priv(dev); | |
655 | struct vmw_user_surface *user_srf; | |
656 | struct vmw_surface *srf; | |
657 | struct vmw_resource *res; | |
658 | struct vmw_resource *tmp; | |
659 | union drm_vmw_surface_create_arg *arg = | |
660 | (union drm_vmw_surface_create_arg *)data; | |
661 | struct drm_vmw_surface_create_req *req = &arg->req; | |
662 | struct drm_vmw_surface_arg *rep = &arg->rep; | |
663 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | |
664 | struct drm_vmw_size __user *user_sizes; | |
665 | int ret; | |
666 | int i, j; | |
667 | uint32_t cur_bo_offset; | |
668 | struct drm_vmw_size *cur_size; | |
669 | struct vmw_surface_offset *cur_offset; | |
543831cf TH |
670 | uint32_t num_sizes; |
671 | uint32_t size; | |
672 | struct vmw_master *vmaster = vmw_master(file_priv->master); | |
7e8d9da3 | 673 | const struct svga3d_surface_desc *desc; |
543831cf TH |
674 | |
675 | if (unlikely(vmw_user_surface_size == 0)) | |
676 | vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + | |
677 | 128; | |
678 | ||
679 | num_sizes = 0; | |
404f763e LQ |
680 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { |
681 | if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) | |
682 | return -EINVAL; | |
543831cf | 683 | num_sizes += req->mip_levels[i]; |
404f763e | 684 | } |
543831cf | 685 | |
30cd45a6 MM |
686 | if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS || |
687 | num_sizes == 0) | |
543831cf TH |
688 | return -EINVAL; |
689 | ||
690 | size = vmw_user_surface_size + 128 + | |
691 | ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + | |
692 | ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); | |
693 | ||
694 | ||
7e8d9da3 TH |
695 | desc = svga3dsurface_get_desc(req->format); |
696 | if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { | |
697 | DRM_ERROR("Invalid surface format for surface creation.\n"); | |
698 | return -EINVAL; | |
699 | } | |
700 | ||
543831cf TH |
701 | ret = ttm_read_lock(&vmaster->lock, true); |
702 | if (unlikely(ret != 0)) | |
703 | return ret; | |
704 | ||
705 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | |
706 | size, false, true); | |
707 | if (unlikely(ret != 0)) { | |
708 | if (ret != -ERESTARTSYS) | |
709 | DRM_ERROR("Out of graphics memory for surface" | |
710 | " creation.\n"); | |
711 | goto out_unlock; | |
712 | } | |
713 | ||
714 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); | |
715 | if (unlikely(user_srf == NULL)) { | |
716 | ret = -ENOMEM; | |
717 | goto out_no_user_srf; | |
718 | } | |
719 | ||
720 | srf = &user_srf->srf; | |
721 | res = &srf->res; | |
722 | ||
723 | srf->flags = req->flags; | |
724 | srf->format = req->format; | |
725 | srf->scanout = req->scanout; | |
726 | ||
727 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); | |
728 | srf->num_sizes = num_sizes; | |
729 | user_srf->size = size; | |
730 | ||
731 | srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); | |
732 | if (unlikely(srf->sizes == NULL)) { | |
733 | ret = -ENOMEM; | |
734 | goto out_no_sizes; | |
735 | } | |
736 | srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), | |
737 | GFP_KERNEL); | |
738 | if (unlikely(srf->sizes == NULL)) { | |
739 | ret = -ENOMEM; | |
740 | goto out_no_offsets; | |
741 | } | |
742 | ||
743 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | |
744 | req->size_addr; | |
745 | ||
746 | ret = copy_from_user(srf->sizes, user_sizes, | |
747 | srf->num_sizes * sizeof(*srf->sizes)); | |
748 | if (unlikely(ret != 0)) { | |
749 | ret = -EFAULT; | |
750 | goto out_no_copy; | |
751 | } | |
752 | ||
753 | srf->base_size = *srf->sizes; | |
754 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; | |
755 | srf->multisample_count = 1; | |
756 | ||
757 | cur_bo_offset = 0; | |
758 | cur_offset = srf->offsets; | |
759 | cur_size = srf->sizes; | |
760 | ||
543831cf TH |
761 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { |
762 | for (j = 0; j < srf->mip_levels[i]; ++j) { | |
7e8d9da3 TH |
763 | uint32_t stride = svga3dsurface_calculate_pitch |
764 | (desc, cur_size); | |
543831cf TH |
765 | |
766 | cur_offset->face = i; | |
767 | cur_offset->mip = j; | |
768 | cur_offset->bo_offset = cur_bo_offset; | |
7e8d9da3 TH |
769 | cur_bo_offset += svga3dsurface_get_image_buffer_size |
770 | (desc, cur_size, stride); | |
543831cf TH |
771 | ++cur_offset; |
772 | ++cur_size; | |
773 | } | |
774 | } | |
775 | res->backup_size = cur_bo_offset; | |
543831cf TH |
776 | if (srf->scanout && |
777 | srf->num_sizes == 1 && | |
778 | srf->sizes[0].width == 64 && | |
779 | srf->sizes[0].height == 64 && | |
780 | srf->format == SVGA3D_A8R8G8B8) { | |
781 | ||
782 | srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); | |
783 | /* clear the image */ | |
784 | if (srf->snooper.image) { | |
785 | memset(srf->snooper.image, 0x00, 64 * 64 * 4); | |
786 | } else { | |
787 | DRM_ERROR("Failed to allocate cursor_image\n"); | |
788 | ret = -ENOMEM; | |
789 | goto out_no_copy; | |
790 | } | |
791 | } else { | |
792 | srf->snooper.image = NULL; | |
793 | } | |
794 | srf->snooper.crtc = NULL; | |
795 | ||
796 | user_srf->base.shareable = false; | |
797 | user_srf->base.tfile = NULL; | |
798 | ||
799 | /** | |
800 | * From this point, the generic resource management functions | |
801 | * destroy the object on failure. | |
802 | */ | |
803 | ||
804 | ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); | |
805 | if (unlikely(ret != 0)) | |
806 | goto out_unlock; | |
807 | ||
808 | tmp = vmw_resource_reference(&srf->res); | |
809 | ret = ttm_base_object_init(tfile, &user_srf->base, | |
810 | req->shareable, VMW_RES_SURFACE, | |
811 | &vmw_user_surface_base_release, NULL); | |
812 | ||
813 | if (unlikely(ret != 0)) { | |
814 | vmw_resource_unreference(&tmp); | |
815 | vmw_resource_unreference(&res); | |
816 | goto out_unlock; | |
817 | } | |
818 | ||
819 | rep->sid = user_srf->base.hash.key; | |
820 | vmw_resource_unreference(&res); | |
821 | ||
822 | ttm_read_unlock(&vmaster->lock); | |
823 | return 0; | |
824 | out_no_copy: | |
825 | kfree(srf->offsets); | |
826 | out_no_offsets: | |
827 | kfree(srf->sizes); | |
828 | out_no_sizes: | |
829 | ttm_base_object_kfree(user_srf, base); | |
830 | out_no_user_srf: | |
831 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | |
832 | out_unlock: | |
833 | ttm_read_unlock(&vmaster->lock); | |
834 | return ret; | |
835 | } | |
836 | ||
837 | /** | |
838 | * vmw_user_surface_define_ioctl - Ioctl function implementing | |
839 | * the user surface reference functionality. | |
840 | * | |
841 | * @dev: Pointer to a struct drm_device. | |
842 | * @data: Pointer to data copied from / to user-space. | |
843 | * @file_priv: Pointer to a drm file private structure. | |
844 | */ | |
845 | int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | |
846 | struct drm_file *file_priv) | |
847 | { | |
848 | union drm_vmw_surface_reference_arg *arg = | |
849 | (union drm_vmw_surface_reference_arg *)data; | |
850 | struct drm_vmw_surface_arg *req = &arg->req; | |
851 | struct drm_vmw_surface_create_req *rep = &arg->rep; | |
852 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | |
853 | struct vmw_surface *srf; | |
854 | struct vmw_user_surface *user_srf; | |
855 | struct drm_vmw_size __user *user_sizes; | |
856 | struct ttm_base_object *base; | |
857 | int ret = -EINVAL; | |
858 | ||
859 | base = ttm_base_object_lookup(tfile, req->sid); | |
860 | if (unlikely(base == NULL)) { | |
861 | DRM_ERROR("Could not find surface to reference.\n"); | |
862 | return -EINVAL; | |
863 | } | |
864 | ||
865 | if (unlikely(base->object_type != VMW_RES_SURFACE)) | |
866 | goto out_bad_resource; | |
867 | ||
868 | user_srf = container_of(base, struct vmw_user_surface, base); | |
869 | srf = &user_srf->srf; | |
870 | ||
871 | ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); | |
872 | if (unlikely(ret != 0)) { | |
873 | DRM_ERROR("Could not add a reference to a surface.\n"); | |
874 | goto out_no_reference; | |
875 | } | |
876 | ||
877 | rep->flags = srf->flags; | |
878 | rep->format = srf->format; | |
879 | memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); | |
880 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | |
881 | rep->size_addr; | |
882 | ||
883 | if (user_sizes) | |
884 | ret = copy_to_user(user_sizes, srf->sizes, | |
885 | srf->num_sizes * sizeof(*srf->sizes)); | |
886 | if (unlikely(ret != 0)) { | |
887 | DRM_ERROR("copy_to_user failed %p %u\n", | |
888 | user_sizes, srf->num_sizes); | |
889 | ret = -EFAULT; | |
890 | } | |
891 | out_bad_resource: | |
892 | out_no_reference: | |
893 | ttm_base_object_unref(&base); | |
894 | ||
895 | return ret; | |
896 | } |