Merge branch 'etnaviv/fixes' of https://git.pengutronix.de/git/lst/linux into drm...
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / drivers / gpu / drm / etnaviv / etnaviv_gem.c
1 /*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17 #include <linux/spinlock.h>
18 #include <linux/shmem_fs.h>
19 #include <linux/sched/mm.h>
20 #include <linux/sched/task.h>
21
22 #include "etnaviv_drv.h"
23 #include "etnaviv_gem.h"
24 #include "etnaviv_gpu.h"
25 #include "etnaviv_mmu.h"
26
27 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
28 {
29 struct drm_device *dev = etnaviv_obj->base.dev;
30 struct sg_table *sgt = etnaviv_obj->sgt;
31
32 /*
33 * For non-cached buffers, ensure the new pages are clean
34 * because display controller, GPU, etc. are not coherent.
35 */
36 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
37 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
38 }
39
40 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
41 {
42 struct drm_device *dev = etnaviv_obj->base.dev;
43 struct sg_table *sgt = etnaviv_obj->sgt;
44
45 /*
46 * For non-cached buffers, ensure the new pages are clean
47 * because display controller, GPU, etc. are not coherent:
48 *
49 * WARNING: The DMA API does not support concurrent CPU
50 * and device access to the memory area. With BIDIRECTIONAL,
51 * we will clean the cache lines which overlap the region,
52 * and invalidate all cache lines (partially) contained in
53 * the region.
54 *
55 * If you have dirty data in the overlapping cache lines,
56 * that will corrupt the GPU-written data. If you have
57 * written into the remainder of the region, this can
58 * discard those writes.
59 */
60 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
61 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
62 }
63
64 /* called with etnaviv_obj->lock held */
65 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
66 {
67 struct drm_device *dev = etnaviv_obj->base.dev;
68 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
69
70 if (IS_ERR(p)) {
71 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
72 return PTR_ERR(p);
73 }
74
75 etnaviv_obj->pages = p;
76
77 return 0;
78 }
79
80 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
81 {
82 if (etnaviv_obj->sgt) {
83 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
84 sg_free_table(etnaviv_obj->sgt);
85 kfree(etnaviv_obj->sgt);
86 etnaviv_obj->sgt = NULL;
87 }
88 if (etnaviv_obj->pages) {
89 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
90 true, false);
91
92 etnaviv_obj->pages = NULL;
93 }
94 }
95
96 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
97 {
98 int ret;
99
100 lockdep_assert_held(&etnaviv_obj->lock);
101
102 if (!etnaviv_obj->pages) {
103 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
104 if (ret < 0)
105 return ERR_PTR(ret);
106 }
107
108 if (!etnaviv_obj->sgt) {
109 struct drm_device *dev = etnaviv_obj->base.dev;
110 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
111 struct sg_table *sgt;
112
113 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
114 if (IS_ERR(sgt)) {
115 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
116 PTR_ERR(sgt));
117 return ERR_CAST(sgt);
118 }
119
120 etnaviv_obj->sgt = sgt;
121
122 etnaviv_gem_scatter_map(etnaviv_obj);
123 }
124
125 return etnaviv_obj->pages;
126 }
127
128 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
129 {
130 lockdep_assert_held(&etnaviv_obj->lock);
131 /* when we start tracking the pin count, then do something here */
132 }
133
134 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
135 struct vm_area_struct *vma)
136 {
137 pgprot_t vm_page_prot;
138
139 vma->vm_flags &= ~VM_PFNMAP;
140 vma->vm_flags |= VM_MIXEDMAP;
141
142 vm_page_prot = vm_get_page_prot(vma->vm_flags);
143
144 if (etnaviv_obj->flags & ETNA_BO_WC) {
145 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
146 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
147 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
148 } else {
149 /*
150 * Shunt off cached objs to shmem file so they have their own
151 * address_space (so unmap_mapping_range does what we want,
152 * in particular in the case of mmap'd dmabufs)
153 */
154 fput(vma->vm_file);
155 get_file(etnaviv_obj->base.filp);
156 vma->vm_pgoff = 0;
157 vma->vm_file = etnaviv_obj->base.filp;
158
159 vma->vm_page_prot = vm_page_prot;
160 }
161
162 return 0;
163 }
164
165 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
166 {
167 struct etnaviv_gem_object *obj;
168 int ret;
169
170 ret = drm_gem_mmap(filp, vma);
171 if (ret) {
172 DBG("mmap failed: %d", ret);
173 return ret;
174 }
175
176 obj = to_etnaviv_bo(vma->vm_private_data);
177 return obj->ops->mmap(obj, vma);
178 }
179
180 int etnaviv_gem_fault(struct vm_fault *vmf)
181 {
182 struct vm_area_struct *vma = vmf->vma;
183 struct drm_gem_object *obj = vma->vm_private_data;
184 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
185 struct page **pages, *page;
186 pgoff_t pgoff;
187 int ret;
188
189 /*
190 * Make sure we don't parallel update on a fault, nor move or remove
191 * something from beneath our feet. Note that vm_insert_page() is
192 * specifically coded to take care of this, so we don't have to.
193 */
194 ret = mutex_lock_interruptible(&etnaviv_obj->lock);
195 if (ret)
196 goto out;
197
198 /* make sure we have pages attached now */
199 pages = etnaviv_gem_get_pages(etnaviv_obj);
200 mutex_unlock(&etnaviv_obj->lock);
201
202 if (IS_ERR(pages)) {
203 ret = PTR_ERR(pages);
204 goto out;
205 }
206
207 /* We don't use vmf->pgoff since that has the fake offset: */
208 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
209
210 page = pages[pgoff];
211
212 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
213 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
214
215 ret = vm_insert_page(vma, vmf->address, page);
216
217 out:
218 switch (ret) {
219 case -EAGAIN:
220 case 0:
221 case -ERESTARTSYS:
222 case -EINTR:
223 case -EBUSY:
224 /*
225 * EBUSY is ok: this just means that another thread
226 * already did the job.
227 */
228 return VM_FAULT_NOPAGE;
229 case -ENOMEM:
230 return VM_FAULT_OOM;
231 default:
232 return VM_FAULT_SIGBUS;
233 }
234 }
235
236 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
237 {
238 int ret;
239
240 /* Make it mmapable */
241 ret = drm_gem_create_mmap_offset(obj);
242 if (ret)
243 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
244 else
245 *offset = drm_vma_node_offset_addr(&obj->vma_node);
246
247 return ret;
248 }
249
250 static struct etnaviv_vram_mapping *
251 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
252 struct etnaviv_iommu *mmu)
253 {
254 struct etnaviv_vram_mapping *mapping;
255
256 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
257 if (mapping->mmu == mmu)
258 return mapping;
259 }
260
261 return NULL;
262 }
263
264 void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
265 {
266 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
267
268 drm_gem_object_get(&etnaviv_obj->base);
269
270 mutex_lock(&etnaviv_obj->lock);
271 WARN_ON(mapping->use == 0);
272 mapping->use += 1;
273 mutex_unlock(&etnaviv_obj->lock);
274 }
275
276 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
277 {
278 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
279
280 mutex_lock(&etnaviv_obj->lock);
281 WARN_ON(mapping->use == 0);
282 mapping->use -= 1;
283 mutex_unlock(&etnaviv_obj->lock);
284
285 drm_gem_object_put_unlocked(&etnaviv_obj->base);
286 }
287
288 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
289 struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
290 {
291 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
292 struct etnaviv_vram_mapping *mapping;
293 struct page **pages;
294 int ret = 0;
295
296 mutex_lock(&etnaviv_obj->lock);
297 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
298 if (mapping) {
299 /*
300 * Holding the object lock prevents the use count changing
301 * beneath us. If the use count is zero, the MMU might be
302 * reaping this object, so take the lock and re-check that
303 * the MMU owns this mapping to close this race.
304 */
305 if (mapping->use == 0) {
306 mutex_lock(&gpu->mmu->lock);
307 if (mapping->mmu == gpu->mmu)
308 mapping->use += 1;
309 else
310 mapping = NULL;
311 mutex_unlock(&gpu->mmu->lock);
312 if (mapping)
313 goto out;
314 } else {
315 mapping->use += 1;
316 goto out;
317 }
318 }
319
320 pages = etnaviv_gem_get_pages(etnaviv_obj);
321 if (IS_ERR(pages)) {
322 ret = PTR_ERR(pages);
323 goto out;
324 }
325
326 /*
327 * See if we have a reaped vram mapping we can re-use before
328 * allocating a fresh mapping.
329 */
330 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
331 if (!mapping) {
332 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
333 if (!mapping) {
334 ret = -ENOMEM;
335 goto out;
336 }
337
338 INIT_LIST_HEAD(&mapping->scan_node);
339 mapping->object = etnaviv_obj;
340 } else {
341 list_del(&mapping->obj_node);
342 }
343
344 mapping->mmu = gpu->mmu;
345 mapping->use = 1;
346
347 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
348 mapping);
349 if (ret < 0)
350 kfree(mapping);
351 else
352 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
353
354 out:
355 mutex_unlock(&etnaviv_obj->lock);
356
357 if (ret)
358 return ERR_PTR(ret);
359
360 /* Take a reference on the object */
361 drm_gem_object_get(obj);
362 return mapping;
363 }
364
365 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
366 {
367 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
368
369 if (etnaviv_obj->vaddr)
370 return etnaviv_obj->vaddr;
371
372 mutex_lock(&etnaviv_obj->lock);
373 /*
374 * Need to check again, as we might have raced with another thread
375 * while waiting for the mutex.
376 */
377 if (!etnaviv_obj->vaddr)
378 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
379 mutex_unlock(&etnaviv_obj->lock);
380
381 return etnaviv_obj->vaddr;
382 }
383
384 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
385 {
386 struct page **pages;
387
388 lockdep_assert_held(&obj->lock);
389
390 pages = etnaviv_gem_get_pages(obj);
391 if (IS_ERR(pages))
392 return NULL;
393
394 return vmap(pages, obj->base.size >> PAGE_SHIFT,
395 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
396 }
397
398 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
399 {
400 if (op & ETNA_PREP_READ)
401 return DMA_FROM_DEVICE;
402 else if (op & ETNA_PREP_WRITE)
403 return DMA_TO_DEVICE;
404 else
405 return DMA_BIDIRECTIONAL;
406 }
407
408 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
409 struct timespec *timeout)
410 {
411 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
412 struct drm_device *dev = obj->dev;
413 bool write = !!(op & ETNA_PREP_WRITE);
414 int ret;
415
416 if (!etnaviv_obj->sgt) {
417 void *ret;
418
419 mutex_lock(&etnaviv_obj->lock);
420 ret = etnaviv_gem_get_pages(etnaviv_obj);
421 mutex_unlock(&etnaviv_obj->lock);
422 if (IS_ERR(ret))
423 return PTR_ERR(ret);
424 }
425
426 if (op & ETNA_PREP_NOSYNC) {
427 if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
428 write))
429 return -EBUSY;
430 } else {
431 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
432
433 ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
434 write, true, remain);
435 if (ret <= 0)
436 return ret == 0 ? -ETIMEDOUT : ret;
437 }
438
439 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
440 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
441 etnaviv_obj->sgt->nents,
442 etnaviv_op_to_dma_dir(op));
443 etnaviv_obj->last_cpu_prep_op = op;
444 }
445
446 return 0;
447 }
448
449 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
450 {
451 struct drm_device *dev = obj->dev;
452 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
453
454 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
455 /* fini without a prep is almost certainly a userspace error */
456 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
457 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
458 etnaviv_obj->sgt->nents,
459 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
460 etnaviv_obj->last_cpu_prep_op = 0;
461 }
462
463 return 0;
464 }
465
466 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
467 struct timespec *timeout)
468 {
469 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
470
471 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
472 }
473
474 #ifdef CONFIG_DEBUG_FS
475 static void etnaviv_gem_describe_fence(struct dma_fence *fence,
476 const char *type, struct seq_file *m)
477 {
478 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
479 seq_printf(m, "\t%9s: %s %s seq %u\n",
480 type,
481 fence->ops->get_driver_name(fence),
482 fence->ops->get_timeline_name(fence),
483 fence->seqno);
484 }
485
486 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
487 {
488 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
489 struct reservation_object *robj = etnaviv_obj->resv;
490 struct reservation_object_list *fobj;
491 struct dma_fence *fence;
492 unsigned long off = drm_vma_node_start(&obj->vma_node);
493
494 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
495 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
496 obj->name, kref_read(&obj->refcount),
497 off, etnaviv_obj->vaddr, obj->size);
498
499 rcu_read_lock();
500 fobj = rcu_dereference(robj->fence);
501 if (fobj) {
502 unsigned int i, shared_count = fobj->shared_count;
503
504 for (i = 0; i < shared_count; i++) {
505 fence = rcu_dereference(fobj->shared[i]);
506 etnaviv_gem_describe_fence(fence, "Shared", m);
507 }
508 }
509
510 fence = rcu_dereference(robj->fence_excl);
511 if (fence)
512 etnaviv_gem_describe_fence(fence, "Exclusive", m);
513 rcu_read_unlock();
514 }
515
516 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
517 struct seq_file *m)
518 {
519 struct etnaviv_gem_object *etnaviv_obj;
520 int count = 0;
521 size_t size = 0;
522
523 mutex_lock(&priv->gem_lock);
524 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
525 struct drm_gem_object *obj = &etnaviv_obj->base;
526
527 seq_puts(m, " ");
528 etnaviv_gem_describe(obj, m);
529 count++;
530 size += obj->size;
531 }
532 mutex_unlock(&priv->gem_lock);
533
534 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
535 }
536 #endif
537
538 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
539 {
540 vunmap(etnaviv_obj->vaddr);
541 put_pages(etnaviv_obj);
542 }
543
544 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
545 .get_pages = etnaviv_gem_shmem_get_pages,
546 .release = etnaviv_gem_shmem_release,
547 .vmap = etnaviv_gem_vmap_impl,
548 .mmap = etnaviv_gem_mmap_obj,
549 };
550
551 void etnaviv_gem_free_object(struct drm_gem_object *obj)
552 {
553 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
554 struct etnaviv_drm_private *priv = obj->dev->dev_private;
555 struct etnaviv_vram_mapping *mapping, *tmp;
556
557 /* object should not be active */
558 WARN_ON(is_active(etnaviv_obj));
559
560 mutex_lock(&priv->gem_lock);
561 list_del(&etnaviv_obj->gem_node);
562 mutex_unlock(&priv->gem_lock);
563
564 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
565 obj_node) {
566 struct etnaviv_iommu *mmu = mapping->mmu;
567
568 WARN_ON(mapping->use);
569
570 if (mmu)
571 etnaviv_iommu_unmap_gem(mmu, mapping);
572
573 list_del(&mapping->obj_node);
574 kfree(mapping);
575 }
576
577 drm_gem_free_mmap_offset(obj);
578 etnaviv_obj->ops->release(etnaviv_obj);
579 if (etnaviv_obj->resv == &etnaviv_obj->_resv)
580 reservation_object_fini(&etnaviv_obj->_resv);
581 drm_gem_object_release(obj);
582
583 kfree(etnaviv_obj);
584 }
585
586 int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
587 {
588 struct etnaviv_drm_private *priv = dev->dev_private;
589 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
590
591 mutex_lock(&priv->gem_lock);
592 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
593 mutex_unlock(&priv->gem_lock);
594
595 return 0;
596 }
597
598 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
599 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
600 struct drm_gem_object **obj)
601 {
602 struct etnaviv_gem_object *etnaviv_obj;
603 unsigned sz = sizeof(*etnaviv_obj);
604 bool valid = true;
605
606 /* validate flags */
607 switch (flags & ETNA_BO_CACHE_MASK) {
608 case ETNA_BO_UNCACHED:
609 case ETNA_BO_CACHED:
610 case ETNA_BO_WC:
611 break;
612 default:
613 valid = false;
614 }
615
616 if (!valid) {
617 dev_err(dev->dev, "invalid cache flag: %x\n",
618 (flags & ETNA_BO_CACHE_MASK));
619 return -EINVAL;
620 }
621
622 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
623 if (!etnaviv_obj)
624 return -ENOMEM;
625
626 etnaviv_obj->flags = flags;
627 etnaviv_obj->ops = ops;
628 if (robj) {
629 etnaviv_obj->resv = robj;
630 } else {
631 etnaviv_obj->resv = &etnaviv_obj->_resv;
632 reservation_object_init(&etnaviv_obj->_resv);
633 }
634
635 mutex_init(&etnaviv_obj->lock);
636 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
637
638 *obj = &etnaviv_obj->base;
639
640 return 0;
641 }
642
643 static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
644 u32 size, u32 flags)
645 {
646 struct drm_gem_object *obj = NULL;
647 int ret;
648
649 size = PAGE_ALIGN(size);
650
651 ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
652 &etnaviv_gem_shmem_ops, &obj);
653 if (ret)
654 goto fail;
655
656 ret = drm_gem_object_init(dev, obj, size);
657 if (ret == 0) {
658 struct address_space *mapping;
659
660 /*
661 * Our buffers are kept pinned, so allocating them
662 * from the MOVABLE zone is a really bad idea, and
663 * conflicts with CMA. See coments above new_inode()
664 * why this is required _and_ expected if you're
665 * going to pin these pages.
666 */
667 mapping = obj->filp->f_mapping;
668 mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
669 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
670 }
671
672 if (ret)
673 goto fail;
674
675 return obj;
676
677 fail:
678 drm_gem_object_put_unlocked(obj);
679 return ERR_PTR(ret);
680 }
681
682 /* convenience method to construct a GEM buffer object, and userspace handle */
683 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
684 u32 size, u32 flags, u32 *handle)
685 {
686 struct drm_gem_object *obj;
687 int ret;
688
689 obj = __etnaviv_gem_new(dev, size, flags);
690 if (IS_ERR(obj))
691 return PTR_ERR(obj);
692
693 ret = etnaviv_gem_obj_add(dev, obj);
694 if (ret < 0) {
695 drm_gem_object_put_unlocked(obj);
696 return ret;
697 }
698
699 ret = drm_gem_handle_create(file, obj, handle);
700
701 /* drop reference from allocate - handle holds it now */
702 drm_gem_object_put_unlocked(obj);
703
704 return ret;
705 }
706
707 struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
708 u32 size, u32 flags)
709 {
710 struct drm_gem_object *obj;
711 int ret;
712
713 obj = __etnaviv_gem_new(dev, size, flags);
714 if (IS_ERR(obj))
715 return obj;
716
717 ret = etnaviv_gem_obj_add(dev, obj);
718 if (ret < 0) {
719 drm_gem_object_put_unlocked(obj);
720 return ERR_PTR(ret);
721 }
722
723 return obj;
724 }
725
726 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
727 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
728 struct etnaviv_gem_object **res)
729 {
730 struct drm_gem_object *obj;
731 int ret;
732
733 ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
734 if (ret)
735 return ret;
736
737 drm_gem_private_object_init(dev, obj, size);
738
739 *res = to_etnaviv_bo(obj);
740
741 return 0;
742 }
743
744 struct get_pages_work {
745 struct work_struct work;
746 struct mm_struct *mm;
747 struct task_struct *task;
748 struct etnaviv_gem_object *etnaviv_obj;
749 };
750
751 static struct page **etnaviv_gem_userptr_do_get_pages(
752 struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
753 {
754 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
755 struct page **pvec;
756 uintptr_t ptr;
757 unsigned int flags = 0;
758
759 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
760 if (!pvec)
761 return ERR_PTR(-ENOMEM);
762
763 if (!etnaviv_obj->userptr.ro)
764 flags |= FOLL_WRITE;
765
766 pinned = 0;
767 ptr = etnaviv_obj->userptr.ptr;
768
769 down_read(&mm->mmap_sem);
770 while (pinned < npages) {
771 ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
772 flags, pvec + pinned, NULL, NULL);
773 if (ret < 0)
774 break;
775
776 ptr += ret * PAGE_SIZE;
777 pinned += ret;
778 }
779 up_read(&mm->mmap_sem);
780
781 if (ret < 0) {
782 release_pages(pvec, pinned, 0);
783 kvfree(pvec);
784 return ERR_PTR(ret);
785 }
786
787 return pvec;
788 }
789
790 static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
791 {
792 struct get_pages_work *work = container_of(_work, typeof(*work), work);
793 struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
794 struct page **pvec;
795
796 pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
797
798 mutex_lock(&etnaviv_obj->lock);
799 if (IS_ERR(pvec)) {
800 etnaviv_obj->userptr.work = ERR_CAST(pvec);
801 } else {
802 etnaviv_obj->userptr.work = NULL;
803 etnaviv_obj->pages = pvec;
804 }
805
806 mutex_unlock(&etnaviv_obj->lock);
807 drm_gem_object_put_unlocked(&etnaviv_obj->base);
808
809 mmput(work->mm);
810 put_task_struct(work->task);
811 kfree(work);
812 }
813
814 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
815 {
816 struct page **pvec = NULL;
817 struct get_pages_work *work;
818 struct mm_struct *mm;
819 int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
820
821 if (etnaviv_obj->userptr.work) {
822 if (IS_ERR(etnaviv_obj->userptr.work)) {
823 ret = PTR_ERR(etnaviv_obj->userptr.work);
824 etnaviv_obj->userptr.work = NULL;
825 } else {
826 ret = -EAGAIN;
827 }
828 return ret;
829 }
830
831 mm = get_task_mm(etnaviv_obj->userptr.task);
832 pinned = 0;
833 if (mm == current->mm) {
834 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
835 if (!pvec) {
836 mmput(mm);
837 return -ENOMEM;
838 }
839
840 pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
841 !etnaviv_obj->userptr.ro, pvec);
842 if (pinned < 0) {
843 kvfree(pvec);
844 mmput(mm);
845 return pinned;
846 }
847
848 if (pinned == npages) {
849 etnaviv_obj->pages = pvec;
850 mmput(mm);
851 return 0;
852 }
853 }
854
855 release_pages(pvec, pinned, 0);
856 kvfree(pvec);
857
858 work = kmalloc(sizeof(*work), GFP_KERNEL);
859 if (!work) {
860 mmput(mm);
861 return -ENOMEM;
862 }
863
864 get_task_struct(current);
865 drm_gem_object_get(&etnaviv_obj->base);
866
867 work->mm = mm;
868 work->task = current;
869 work->etnaviv_obj = etnaviv_obj;
870
871 etnaviv_obj->userptr.work = &work->work;
872 INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
873
874 etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
875
876 return -EAGAIN;
877 }
878
879 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
880 {
881 if (etnaviv_obj->sgt) {
882 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
883 sg_free_table(etnaviv_obj->sgt);
884 kfree(etnaviv_obj->sgt);
885 }
886 if (etnaviv_obj->pages) {
887 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
888
889 release_pages(etnaviv_obj->pages, npages, 0);
890 kvfree(etnaviv_obj->pages);
891 }
892 put_task_struct(etnaviv_obj->userptr.task);
893 }
894
895 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
896 struct vm_area_struct *vma)
897 {
898 return -EINVAL;
899 }
900
901 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
902 .get_pages = etnaviv_gem_userptr_get_pages,
903 .release = etnaviv_gem_userptr_release,
904 .vmap = etnaviv_gem_vmap_impl,
905 .mmap = etnaviv_gem_userptr_mmap_obj,
906 };
907
908 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
909 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
910 {
911 struct etnaviv_gem_object *etnaviv_obj;
912 int ret;
913
914 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
915 &etnaviv_gem_userptr_ops, &etnaviv_obj);
916 if (ret)
917 return ret;
918
919 etnaviv_obj->userptr.ptr = ptr;
920 etnaviv_obj->userptr.task = current;
921 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
922 get_task_struct(current);
923
924 ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
925 if (ret)
926 goto unreference;
927
928 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
929 unreference:
930 /* drop reference from allocate - handle holds it now */
931 drm_gem_object_put_unlocked(&etnaviv_obj->base);
932 return ret;
933 }