drm/i915: Don't leak in i915_gem_shmem_pread_slow()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / gpu / drm / drm_bufs.c
CommitLineData
1da177e4 1/**
b5e89ed5 2 * \file drm_bufs.c
1da177e4 3 * Generic buffer template
b5e89ed5 4 *
1da177e4
LT
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <linux/vmalloc.h>
5a0e3ad6 37#include <linux/slab.h>
f1a2a9b6
DM
38#include <linux/log2.h>
39#include <asm/shmparam.h>
1da177e4
LT
40#include "drmP.h"
41
55910517 42static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
f77d390c 43 struct drm_local_map *map)
836cf046 44{
55910517 45 struct drm_map_list *entry;
bd1b331f 46 list_for_each_entry(entry, &dev->maplist, head) {
41c2e75e
BH
47 /*
48 * Because the kernel-userspace ABI is fixed at a 32-bit offset
49 * while PCI resources may live above that, we ignore the map
50 * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
51 * It is assumed that each driver will have only one resource of
52 * each type.
53 */
54 if (!entry->map ||
55 map->type != entry->map->type ||
56 entry->master != dev->primary->master)
57 continue;
58 switch (map->type) {
59 case _DRM_SHM:
60 if (map->flags != _DRM_CONTAINS_LOCK)
61 break;
62 case _DRM_REGISTERS:
63 case _DRM_FRAME_BUFFER:
89625eb1 64 return entry;
41c2e75e
BH
65 default: /* Make gcc happy */
66 ;
836cf046 67 }
41c2e75e
BH
68 if (entry->map->offset == map->offset)
69 return entry;
836cf046
DA
70 }
71
72 return NULL;
1da177e4 73}
1da177e4 74
e0be428e 75static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
f1a2a9b6 76 unsigned long user_token, int hashed_handle, int shm)
d1f2b55a 77{
f1a2a9b6
DM
78 int use_hashed_handle, shift;
79 unsigned long add;
80
c2604ce0 81#if (BITS_PER_LONG == 64)
8d153f71
TH
82 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
83#elif (BITS_PER_LONG == 32)
84 use_hashed_handle = hashed_handle;
85#else
86#error Unsupported long size. Neither 64 nor 32 bits.
87#endif
d1f2b55a 88
e08870c8
TH
89 if (!use_hashed_handle) {
90 int ret;
1545085a 91 hash->key = user_token >> PAGE_SHIFT;
e08870c8
TH
92 ret = drm_ht_insert_item(&dev->map_hash, hash);
93 if (ret != -EINVAL)
94 return ret;
d1f2b55a 95 }
f1a2a9b6
DM
96
97 shift = 0;
98 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
99 if (shm && (SHMLBA > PAGE_SIZE)) {
100 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
101
102 /* For shared memory, we have to preserve the SHMLBA
103 * bits of the eventual vma->vm_pgoff value during
104 * mmap(). Otherwise we run into cache aliasing problems
105 * on some platforms. On these platforms, the pgoff of
106 * a mmap() request is used to pick a suitable virtual
107 * address for the mmap() region such that it will not
108 * cause cache aliasing problems.
109 *
110 * Therefore, make sure the SHMLBA relevant bits of the
111 * hash value we use are equal to those in the original
112 * kernel virtual address.
113 */
114 shift = bits;
115 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
116 }
117
e08870c8
TH
118 return drm_ht_just_insert_please(&dev->map_hash, hash,
119 user_token, 32 - PAGE_SHIFT - 3,
f1a2a9b6 120 shift, add);
d1f2b55a 121}
9a186645 122
1da177e4 123/**
f77d390c
BH
124 * Core function to create a range of memory available for mapping by a
125 * non-root process.
1da177e4
LT
126 *
127 * Adjusts the memory offset to its absolute value according to the mapping
128 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
129 * applicable and if supported by the kernel.
130 */
41c2e75e 131static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
c60ce623 132 unsigned int size, enum drm_map_type type,
55910517
DA
133 enum drm_map_flags flags,
134 struct drm_map_list ** maplist)
1da177e4 135{
f77d390c 136 struct drm_local_map *map;
55910517 137 struct drm_map_list *list;
9c8da5eb 138 drm_dma_handle_t *dmah;
8d153f71
TH
139 unsigned long user_token;
140 int ret;
1da177e4 141
9a298b2a 142 map = kmalloc(sizeof(*map), GFP_KERNEL);
b5e89ed5 143 if (!map)
1da177e4
LT
144 return -ENOMEM;
145
7ab98401
DA
146 map->offset = offset;
147 map->size = size;
148 map->flags = flags;
149 map->type = type;
1da177e4
LT
150
151 /* Only allow shared memory to be removable since we only keep enough
152 * book keeping information about shared memory to allow for removal
153 * when processes fork.
154 */
b5e89ed5 155 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
9a298b2a 156 kfree(map);
1da177e4
LT
157 return -EINVAL;
158 }
41c2e75e
BH
159 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
160 (unsigned long long)map->offset, map->size, map->type);
b6741377
BH
161
162 /* page-align _DRM_SHM maps. They are allocated here so there is no security
163 * hole created by that and it works around various broken drivers that use
164 * a non-aligned quantity to map the SAREA. --BenH
165 */
166 if (map->type == _DRM_SHM)
167 map->size = PAGE_ALIGN(map->size);
168
41c2e75e 169 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
9a298b2a 170 kfree(map);
1da177e4
LT
171 return -EINVAL;
172 }
b5e89ed5 173 map->mtrr = -1;
1da177e4
LT
174 map->handle = NULL;
175
b5e89ed5 176 switch (map->type) {
1da177e4
LT
177 case _DRM_REGISTERS:
178 case _DRM_FRAME_BUFFER:
4b7fb9b5 179#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
8d2ea625 180 if (map->offset + (map->size-1) < map->offset ||
b5e89ed5 181 map->offset < virt_to_phys(high_memory)) {
9a298b2a 182 kfree(map);
1da177e4
LT
183 return -EINVAL;
184 }
1da177e4 185#endif
836cf046
DA
186 /* Some drivers preinitialize some maps, without the X Server
187 * needing to be aware of it. Therefore, we just return success
188 * when the server tries to create a duplicate map.
189 */
89625eb1
DA
190 list = drm_find_matching_map(dev, map);
191 if (list != NULL) {
192 if (list->map->size != map->size) {
836cf046 193 DRM_DEBUG("Matching maps of type %d with "
b5e89ed5
DA
194 "mismatched sizes, (%ld vs %ld)\n",
195 map->type, map->size,
196 list->map->size);
89625eb1 197 list->map->size = map->size;
836cf046
DA
198 }
199
9a298b2a 200 kfree(map);
89625eb1 201 *maplist = list;
836cf046
DA
202 return 0;
203 }
204
1da177e4 205 if (drm_core_has_MTRR(dev)) {
b5e89ed5
DA
206 if (map->type == _DRM_FRAME_BUFFER ||
207 (map->flags & _DRM_WRITE_COMBINING)) {
208 map->mtrr = mtrr_add(map->offset, map->size,
209 MTRR_TYPE_WRCOMB, 1);
1da177e4
LT
210 }
211 }
0769d39c 212 if (map->type == _DRM_REGISTERS) {
004a7727 213 map->handle = ioremap(map->offset, map->size);
0769d39c 214 if (!map->handle) {
9a298b2a 215 kfree(map);
0769d39c
ST
216 return -ENOMEM;
217 }
218 }
bc5f4523 219
1da177e4 220 break;
1da177e4 221 case _DRM_SHM:
54ba2f76
DA
222 list = drm_find_matching_map(dev, map);
223 if (list != NULL) {
224 if(list->map->size != map->size) {
225 DRM_DEBUG("Matching maps of type %d with "
226 "mismatched sizes, (%ld vs %ld)\n",
227 map->type, map->size, list->map->size);
228 list->map->size = map->size;
229 }
230
9a298b2a 231 kfree(map);
54ba2f76
DA
232 *maplist = list;
233 return 0;
234 }
f239b7b0 235 map->handle = vmalloc_user(map->size);
b5e89ed5
DA
236 DRM_DEBUG("%lu %d %p\n",
237 map->size, drm_order(map->size), map->handle);
238 if (!map->handle) {
9a298b2a 239 kfree(map);
1da177e4
LT
240 return -ENOMEM;
241 }
242 map->offset = (unsigned long)map->handle;
b5e89ed5 243 if (map->flags & _DRM_CONTAINS_LOCK) {
1da177e4 244 /* Prevent a 2nd X Server from creating a 2nd lock */
7c1c2871 245 if (dev->primary->master->lock.hw_lock != NULL) {
b5e89ed5 246 vfree(map->handle);
9a298b2a 247 kfree(map);
1da177e4
LT
248 return -EBUSY;
249 }
7c1c2871 250 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
1da177e4
LT
251 }
252 break;
54ba2f76 253 case _DRM_AGP: {
55910517 254 struct drm_agp_mem *entry;
54ba2f76
DA
255 int valid = 0;
256
257 if (!drm_core_has_AGP(dev)) {
9a298b2a 258 kfree(map);
54ba2f76
DA
259 return -EINVAL;
260 }
1da177e4 261#ifdef __alpha__
54ba2f76 262 map->offset += dev->hose->mem_space->start;
1da177e4 263#endif
47a184a8
EA
264 /* In some cases (i810 driver), user space may have already
265 * added the AGP base itself, because dev->agp->base previously
266 * only got set during AGP enable. So, only add the base
267 * address if the map's offset isn't already within the
268 * aperture.
54ba2f76 269 */
47a184a8
EA
270 if (map->offset < dev->agp->base ||
271 map->offset > dev->agp->base +
272 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
273 map->offset += dev->agp->base;
274 }
54ba2f76
DA
275 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
276
277 /* This assumes the DRM is in total control of AGP space.
278 * It's not always the case as AGP can be in the control
279 * of user space (i.e. i810 driver). So this loop will get
280 * skipped and we double check that dev->agp->memory is
281 * actually set as well as being invalid before EPERM'ing
282 */
bd1b331f 283 list_for_each_entry(entry, &dev->agp->memory, head) {
54ba2f76
DA
284 if ((map->offset >= entry->bound) &&
285 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
286 valid = 1;
287 break;
288 }
1da177e4 289 }
bd1b331f 290 if (!list_empty(&dev->agp->memory) && !valid) {
9a298b2a 291 kfree(map);
54ba2f76
DA
292 return -EPERM;
293 }
41c2e75e
BH
294 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
295 (unsigned long long)map->offset, map->size);
54ba2f76 296
a2c0a97b 297 break;
812c369d 298 }
a2c0a97b 299 case _DRM_GEM:
812c369d 300 DRM_ERROR("tried to addmap GEM object\n");
1da177e4
LT
301 break;
302 case _DRM_SCATTER_GATHER:
303 if (!dev->sg) {
9a298b2a 304 kfree(map);
1da177e4
LT
305 return -EINVAL;
306 }
d1f2b55a 307 map->offset += (unsigned long)dev->sg->virtual;
1da177e4 308 break;
b5e89ed5 309 case _DRM_CONSISTENT:
2d0f9eaf 310 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
9c8da5eb 311 * As we're limiting the address to 2^32-1 (or less),
2d0f9eaf
DA
312 * casting it down to 32 bits is no problem, but we
313 * need to point to a 64bit variable first. */
e6be8d9d 314 dmah = drm_pci_alloc(dev, map->size, map->size);
9c8da5eb 315 if (!dmah) {
9a298b2a 316 kfree(map);
2d0f9eaf
DA
317 return -ENOMEM;
318 }
9c8da5eb
DA
319 map->handle = dmah->vaddr;
320 map->offset = (unsigned long)dmah->busaddr;
321 kfree(dmah);
2d0f9eaf 322 break;
1da177e4 323 default:
9a298b2a 324 kfree(map);
1da177e4
LT
325 return -EINVAL;
326 }
327
94e3370e 328 list = kzalloc(sizeof(*list), GFP_KERNEL);
b5e89ed5 329 if (!list) {
85abb3f9 330 if (map->type == _DRM_REGISTERS)
004a7727 331 iounmap(map->handle);
9a298b2a 332 kfree(map);
1da177e4
LT
333 return -EINVAL;
334 }
1da177e4
LT
335 list->map = map;
336
30e2fb18 337 mutex_lock(&dev->struct_mutex);
bd1b331f 338 list_add(&list->head, &dev->maplist);
8d153f71 339
d1f2b55a 340 /* Assign a 32-bit handle */
30e2fb18 341 /* We do it here so that dev->struct_mutex protects the increment */
8d153f71
TH
342 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
343 map->offset;
f1a2a9b6
DM
344 ret = drm_map_handle(dev, &list->hash, user_token, 0,
345 (map->type == _DRM_SHM));
8d153f71 346 if (ret) {
85abb3f9 347 if (map->type == _DRM_REGISTERS)
004a7727 348 iounmap(map->handle);
9a298b2a
EA
349 kfree(map);
350 kfree(list);
8d153f71
TH
351 mutex_unlock(&dev->struct_mutex);
352 return ret;
353 }
354
1545085a 355 list->user_token = list->hash.key << PAGE_SHIFT;
30e2fb18 356 mutex_unlock(&dev->struct_mutex);
1da177e4 357
2ff2e8a3
BS
358 if (!(map->flags & _DRM_DRIVER))
359 list->master = dev->primary->master;
89625eb1 360 *maplist = list;
7ab98401 361 return 0;
54ba2f76 362 }
89625eb1 363
41c2e75e 364int drm_addmap(struct drm_device * dev, resource_size_t offset,
c60ce623 365 unsigned int size, enum drm_map_type type,
f77d390c 366 enum drm_map_flags flags, struct drm_local_map ** map_ptr)
89625eb1 367{
55910517 368 struct drm_map_list *list;
89625eb1
DA
369 int rc;
370
371 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
372 if (!rc)
373 *map_ptr = list->map;
374 return rc;
375}
b5e89ed5 376
7ab98401
DA
377EXPORT_SYMBOL(drm_addmap);
378
f77d390c
BH
379/**
380 * Ioctl to specify a range of memory that is available for mapping by a
381 * non-root process.
382 *
383 * \param inode device inode.
384 * \param file_priv DRM file private.
385 * \param cmd command.
386 * \param arg pointer to a drm_map structure.
387 * \return zero on success or a negative value on error.
388 *
389 */
c153f45f
EA
390int drm_addmap_ioctl(struct drm_device *dev, void *data,
391 struct drm_file *file_priv)
7ab98401 392{
c153f45f 393 struct drm_map *map = data;
55910517 394 struct drm_map_list *maplist;
7ab98401
DA
395 int err;
396
7c1c2871 397 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
d985c108
DA
398 return -EPERM;
399
c153f45f
EA
400 err = drm_addmap_core(dev, map->offset, map->size, map->type,
401 map->flags, &maplist);
7ab98401 402
b5e89ed5 403 if (err)
7ab98401 404 return err;
d1f2b55a 405
67e1a014 406 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
c153f45f 407 map->handle = (void *)(unsigned long)maplist->user_token;
1da177e4 408 return 0;
88f399cd 409}
1da177e4 410
1da177e4
LT
411/**
412 * Remove a map private from list and deallocate resources if the mapping
413 * isn't in use.
414 *
1da177e4
LT
415 * Searches the map on drm_device::maplist, removes it from the list, see if
416 * its being used, and free any associate resource (such as MTRR's) if it's not
417 * being on use.
418 *
7ab98401 419 * \sa drm_addmap
1da177e4 420 */
f77d390c 421int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
1da177e4 422{
55910517 423 struct drm_map_list *r_list = NULL, *list_t;
836cf046 424 drm_dma_handle_t dmah;
bd1b331f 425 int found = 0;
7c1c2871 426 struct drm_master *master;
1da177e4 427
836cf046 428 /* Find the list entry for the map and remove it */
bd1b331f 429 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
836cf046 430 if (r_list->map == map) {
7c1c2871 431 master = r_list->master;
bd1b331f 432 list_del(&r_list->head);
1545085a
TH
433 drm_ht_remove_key(&dev->map_hash,
434 r_list->user_token >> PAGE_SHIFT);
9a298b2a 435 kfree(r_list);
bd1b331f 436 found = 1;
836cf046
DA
437 break;
438 }
1da177e4
LT
439 }
440
bd1b331f 441 if (!found)
1da177e4 442 return -EINVAL;
1da177e4 443
836cf046
DA
444 switch (map->type) {
445 case _DRM_REGISTERS:
004a7727 446 iounmap(map->handle);
836cf046
DA
447 /* FALLTHROUGH */
448 case _DRM_FRAME_BUFFER:
449 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
450 int retcode;
b5e89ed5
DA
451 retcode = mtrr_del(map->mtrr, map->offset, map->size);
452 DRM_DEBUG("mtrr_del=%d\n", retcode);
1da177e4 453 }
836cf046
DA
454 break;
455 case _DRM_SHM:
456 vfree(map->handle);
7c1c2871
DA
457 if (master) {
458 if (dev->sigdata.lock == master->lock.hw_lock)
459 dev->sigdata.lock = NULL;
460 master->lock.hw_lock = NULL; /* SHM removed */
461 master->lock.file_priv = NULL;
171901d1 462 wake_up_interruptible_all(&master->lock.lock_queue);
7c1c2871 463 }
836cf046
DA
464 break;
465 case _DRM_AGP:
466 case _DRM_SCATTER_GATHER:
467 break;
468 case _DRM_CONSISTENT:
469 dmah.vaddr = map->handle;
470 dmah.busaddr = map->offset;
471 dmah.size = map->size;
472 __drm_pci_free(dev, &dmah);
473 break;
a2c0a97b
JB
474 case _DRM_GEM:
475 DRM_ERROR("tried to rmmap GEM object\n");
476 break;
1da177e4 477 }
9a298b2a 478 kfree(map);
836cf046 479
1da177e4
LT
480 return 0;
481}
4e74f36d 482EXPORT_SYMBOL(drm_rmmap_locked);
836cf046 483
f77d390c 484int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
836cf046
DA
485{
486 int ret;
487
30e2fb18 488 mutex_lock(&dev->struct_mutex);
836cf046 489 ret = drm_rmmap_locked(dev, map);
30e2fb18 490 mutex_unlock(&dev->struct_mutex);
836cf046
DA
491
492 return ret;
493}
ba8bbcf6 494EXPORT_SYMBOL(drm_rmmap);
7ab98401 495
836cf046
DA
496/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
497 * the last close of the device, and this is necessary for cleanup when things
498 * exit uncleanly. Therefore, having userland manually remove mappings seems
499 * like a pointless exercise since they're going away anyway.
500 *
501 * One use case might be after addmap is allowed for normal users for SHM and
502 * gets used by drivers that the server doesn't need to care about. This seems
503 * unlikely.
f77d390c
BH
504 *
505 * \param inode device inode.
506 * \param file_priv DRM file private.
507 * \param cmd command.
508 * \param arg pointer to a struct drm_map structure.
509 * \return zero on success or a negative value on error.
836cf046 510 */
c153f45f
EA
511int drm_rmmap_ioctl(struct drm_device *dev, void *data,
512 struct drm_file *file_priv)
7ab98401 513{
c153f45f 514 struct drm_map *request = data;
f77d390c 515 struct drm_local_map *map = NULL;
55910517 516 struct drm_map_list *r_list;
836cf046 517 int ret;
7ab98401 518
30e2fb18 519 mutex_lock(&dev->struct_mutex);
bd1b331f 520 list_for_each_entry(r_list, &dev->maplist, head) {
836cf046 521 if (r_list->map &&
c153f45f 522 r_list->user_token == (unsigned long)request->handle &&
836cf046
DA
523 r_list->map->flags & _DRM_REMOVABLE) {
524 map = r_list->map;
525 break;
526 }
527 }
528
529 /* List has wrapped around to the head pointer, or its empty we didn't
530 * find anything.
531 */
bd1b331f 532 if (list_empty(&dev->maplist) || !map) {
30e2fb18 533 mutex_unlock(&dev->struct_mutex);
836cf046
DA
534 return -EINVAL;
535 }
536
836cf046
DA
537 /* Register and framebuffer maps are permanent */
538 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
30e2fb18 539 mutex_unlock(&dev->struct_mutex);
836cf046
DA
540 return 0;
541 }
542
543 ret = drm_rmmap_locked(dev, map);
544
30e2fb18 545 mutex_unlock(&dev->struct_mutex);
836cf046
DA
546
547 return ret;
7ab98401 548}
1da177e4
LT
549
550/**
551 * Cleanup after an error on one of the addbufs() functions.
552 *
836cf046 553 * \param dev DRM device.
1da177e4
LT
554 * \param entry buffer entry where the error occurred.
555 *
556 * Frees any pages and buffers associated with the given entry.
557 */
cdd55a29
DA
558static void drm_cleanup_buf_error(struct drm_device * dev,
559 struct drm_buf_entry * entry)
1da177e4
LT
560{
561 int i;
562
563 if (entry->seg_count) {
564 for (i = 0; i < entry->seg_count; i++) {
565 if (entry->seglist[i]) {
ddf19b97 566 drm_pci_free(dev, entry->seglist[i]);
1da177e4
LT
567 }
568 }
9a298b2a 569 kfree(entry->seglist);
1da177e4
LT
570
571 entry->seg_count = 0;
572 }
573
b5e89ed5
DA
574 if (entry->buf_count) {
575 for (i = 0; i < entry->buf_count; i++) {
9a298b2a 576 kfree(entry->buflist[i].dev_private);
1da177e4 577 }
9a298b2a 578 kfree(entry->buflist);
1da177e4
LT
579
580 entry->buf_count = 0;
581 }
582}
583
584#if __OS_HAS_AGP
585/**
d59431bf 586 * Add AGP buffers for DMA transfers.
1da177e4 587 *
84b1fd10 588 * \param dev struct drm_device to which the buffers are to be added.
c60ce623 589 * \param request pointer to a struct drm_buf_desc describing the request.
1da177e4 590 * \return zero on success or a negative number on failure.
b5e89ed5 591 *
1da177e4
LT
592 * After some sanity checks creates a drm_buf structure for each buffer and
593 * reallocates the buffer list of the same size order to accommodate the new
594 * buffers.
595 */
84b1fd10 596int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
1da177e4 597{
cdd55a29
DA
598 struct drm_device_dma *dma = dev->dma;
599 struct drm_buf_entry *entry;
55910517 600 struct drm_agp_mem *agp_entry;
056219e2 601 struct drm_buf *buf;
1da177e4
LT
602 unsigned long offset;
603 unsigned long agp_offset;
604 int count;
605 int order;
606 int size;
607 int alignment;
608 int page_order;
609 int total;
610 int byte_count;
54ba2f76 611 int i, valid;
056219e2 612 struct drm_buf **temp_buflist;
1da177e4 613
b5e89ed5
DA
614 if (!dma)
615 return -EINVAL;
1da177e4 616
d59431bf
DA
617 count = request->count;
618 order = drm_order(request->size);
1da177e4
LT
619 size = 1 << order;
620
b5e89ed5
DA
621 alignment = (request->flags & _DRM_PAGE_ALIGN)
622 ? PAGE_ALIGN(size) : size;
1da177e4
LT
623 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
624 total = PAGE_SIZE << page_order;
625
626 byte_count = 0;
d59431bf 627 agp_offset = dev->agp->base + request->agp_start;
1da177e4 628
b5e89ed5
DA
629 DRM_DEBUG("count: %d\n", count);
630 DRM_DEBUG("order: %d\n", order);
631 DRM_DEBUG("size: %d\n", size);
d985c108 632 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
b5e89ed5
DA
633 DRM_DEBUG("alignment: %d\n", alignment);
634 DRM_DEBUG("page_order: %d\n", page_order);
635 DRM_DEBUG("total: %d\n", total);
1da177e4 636
b5e89ed5
DA
637 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
638 return -EINVAL;
639 if (dev->queue_count)
640 return -EBUSY; /* Not while in use */
1da177e4 641
54ba2f76
DA
642 /* Make sure buffers are located in AGP memory that we own */
643 valid = 0;
bd1b331f 644 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
54ba2f76
DA
645 if ((agp_offset >= agp_entry->bound) &&
646 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
647 valid = 1;
648 break;
649 }
650 }
bd1b331f 651 if (!list_empty(&dev->agp->memory) && !valid) {
54ba2f76
DA
652 DRM_DEBUG("zone invalid\n");
653 return -EINVAL;
654 }
b5e89ed5
DA
655 spin_lock(&dev->count_lock);
656 if (dev->buf_use) {
657 spin_unlock(&dev->count_lock);
1da177e4
LT
658 return -EBUSY;
659 }
b5e89ed5
DA
660 atomic_inc(&dev->buf_alloc);
661 spin_unlock(&dev->count_lock);
1da177e4 662
30e2fb18 663 mutex_lock(&dev->struct_mutex);
1da177e4 664 entry = &dma->bufs[order];
b5e89ed5 665 if (entry->buf_count) {
30e2fb18 666 mutex_unlock(&dev->struct_mutex);
b5e89ed5
DA
667 atomic_dec(&dev->buf_alloc);
668 return -ENOMEM; /* May only call once for each order */
1da177e4
LT
669 }
670
671 if (count < 0 || count > 4096) {
30e2fb18 672 mutex_unlock(&dev->struct_mutex);
b5e89ed5 673 atomic_dec(&dev->buf_alloc);
1da177e4
LT
674 return -EINVAL;
675 }
676
94e3370e 677 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
b5e89ed5 678 if (!entry->buflist) {
30e2fb18 679 mutex_unlock(&dev->struct_mutex);
b5e89ed5 680 atomic_dec(&dev->buf_alloc);
1da177e4
LT
681 return -ENOMEM;
682 }
1da177e4
LT
683
684 entry->buf_size = size;
685 entry->page_order = page_order;
686
687 offset = 0;
688
b5e89ed5
DA
689 while (entry->buf_count < count) {
690 buf = &entry->buflist[entry->buf_count];
691 buf->idx = dma->buf_count + entry->buf_count;
692 buf->total = alignment;
693 buf->order = order;
694 buf->used = 0;
1da177e4 695
b5e89ed5 696 buf->offset = (dma->byte_count + offset);
1da177e4
LT
697 buf->bus_address = agp_offset + offset;
698 buf->address = (void *)(agp_offset + offset);
b5e89ed5 699 buf->next = NULL;
1da177e4
LT
700 buf->waiting = 0;
701 buf->pending = 0;
b5e89ed5 702 init_waitqueue_head(&buf->dma_wait);
6c340eac 703 buf->file_priv = NULL;
1da177e4
LT
704
705 buf->dev_priv_size = dev->driver->dev_priv_size;
94e3370e 706 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
b5e89ed5 707 if (!buf->dev_private) {
1da177e4
LT
708 /* Set count correctly so we free the proper amount. */
709 entry->buf_count = count;
b5e89ed5 710 drm_cleanup_buf_error(dev, entry);
30e2fb18 711 mutex_unlock(&dev->struct_mutex);
b5e89ed5 712 atomic_dec(&dev->buf_alloc);
1da177e4
LT
713 return -ENOMEM;
714 }
1da177e4 715
b5e89ed5 716 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1da177e4
LT
717
718 offset += alignment;
719 entry->buf_count++;
720 byte_count += PAGE_SIZE << page_order;
721 }
722
b5e89ed5 723 DRM_DEBUG("byte_count: %d\n", byte_count);
1da177e4 724
9a298b2a
EA
725 temp_buflist = krealloc(dma->buflist,
726 (dma->buf_count + entry->buf_count) *
727 sizeof(*dma->buflist), GFP_KERNEL);
b5e89ed5 728 if (!temp_buflist) {
1da177e4 729 /* Free the entry because it isn't valid */
b5e89ed5 730 drm_cleanup_buf_error(dev, entry);
30e2fb18 731 mutex_unlock(&dev->struct_mutex);
b5e89ed5 732 atomic_dec(&dev->buf_alloc);
1da177e4
LT
733 return -ENOMEM;
734 }
735 dma->buflist = temp_buflist;
736
b5e89ed5 737 for (i = 0; i < entry->buf_count; i++) {
1da177e4
LT
738 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
739 }
740
741 dma->buf_count += entry->buf_count;
d985c108
DA
742 dma->seg_count += entry->seg_count;
743 dma->page_count += byte_count >> PAGE_SHIFT;
1da177e4
LT
744 dma->byte_count += byte_count;
745
b5e89ed5
DA
746 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
747 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1da177e4 748
30e2fb18 749 mutex_unlock(&dev->struct_mutex);
1da177e4 750
d59431bf
DA
751 request->count = entry->buf_count;
752 request->size = size;
1da177e4
LT
753
754 dma->flags = _DRM_DMA_USE_AGP;
755
b5e89ed5 756 atomic_dec(&dev->buf_alloc);
1da177e4
LT
757 return 0;
758}
d84f76d3 759EXPORT_SYMBOL(drm_addbufs_agp);
b5e89ed5 760#endif /* __OS_HAS_AGP */
1da177e4 761
84b1fd10 762int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
1da177e4 763{
cdd55a29 764 struct drm_device_dma *dma = dev->dma;
1da177e4
LT
765 int count;
766 int order;
767 int size;
768 int total;
769 int page_order;
cdd55a29 770 struct drm_buf_entry *entry;
ddf19b97 771 drm_dma_handle_t *dmah;
056219e2 772 struct drm_buf *buf;
1da177e4
LT
773 int alignment;
774 unsigned long offset;
775 int i;
776 int byte_count;
777 int page_count;
778 unsigned long *temp_pagelist;
056219e2 779 struct drm_buf **temp_buflist;
1da177e4 780
b5e89ed5
DA
781 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
782 return -EINVAL;
d985c108 783
b5e89ed5
DA
784 if (!dma)
785 return -EINVAL;
1da177e4 786
d985c108
DA
787 if (!capable(CAP_SYS_ADMIN))
788 return -EPERM;
789
d59431bf
DA
790 count = request->count;
791 order = drm_order(request->size);
1da177e4
LT
792 size = 1 << order;
793
b5e89ed5
DA
794 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
795 request->count, request->size, size, order, dev->queue_count);
1da177e4 796
b5e89ed5
DA
797 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
798 return -EINVAL;
799 if (dev->queue_count)
800 return -EBUSY; /* Not while in use */
1da177e4 801
d59431bf 802 alignment = (request->flags & _DRM_PAGE_ALIGN)
b5e89ed5 803 ? PAGE_ALIGN(size) : size;
1da177e4
LT
804 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
805 total = PAGE_SIZE << page_order;
806
b5e89ed5
DA
807 spin_lock(&dev->count_lock);
808 if (dev->buf_use) {
809 spin_unlock(&dev->count_lock);
1da177e4
LT
810 return -EBUSY;
811 }
b5e89ed5
DA
812 atomic_inc(&dev->buf_alloc);
813 spin_unlock(&dev->count_lock);
1da177e4 814
30e2fb18 815 mutex_lock(&dev->struct_mutex);
1da177e4 816 entry = &dma->bufs[order];
b5e89ed5 817 if (entry->buf_count) {
30e2fb18 818 mutex_unlock(&dev->struct_mutex);
b5e89ed5 819 atomic_dec(&dev->buf_alloc);
1da177e4
LT
820 return -ENOMEM; /* May only call once for each order */
821 }
822
823 if (count < 0 || count > 4096) {
30e2fb18 824 mutex_unlock(&dev->struct_mutex);
b5e89ed5 825 atomic_dec(&dev->buf_alloc);
1da177e4
LT
826 return -EINVAL;
827 }
828
94e3370e 829 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
b5e89ed5 830 if (!entry->buflist) {
30e2fb18 831 mutex_unlock(&dev->struct_mutex);
b5e89ed5 832 atomic_dec(&dev->buf_alloc);
1da177e4
LT
833 return -ENOMEM;
834 }
b5e89ed5 835
94e3370e 836 entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
b5e89ed5 837 if (!entry->seglist) {
9a298b2a 838 kfree(entry->buflist);
30e2fb18 839 mutex_unlock(&dev->struct_mutex);
b5e89ed5 840 atomic_dec(&dev->buf_alloc);
1da177e4
LT
841 return -ENOMEM;
842 }
1da177e4
LT
843
844 /* Keep the original pagelist until we know all the allocations
845 * have succeeded
846 */
9a298b2a
EA
847 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
848 sizeof(*dma->pagelist), GFP_KERNEL);
1da177e4 849 if (!temp_pagelist) {
9a298b2a
EA
850 kfree(entry->buflist);
851 kfree(entry->seglist);
30e2fb18 852 mutex_unlock(&dev->struct_mutex);
b5e89ed5 853 atomic_dec(&dev->buf_alloc);
1da177e4
LT
854 return -ENOMEM;
855 }
856 memcpy(temp_pagelist,
b5e89ed5
DA
857 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
858 DRM_DEBUG("pagelist: %d entries\n",
859 dma->page_count + (count << page_order));
1da177e4 860
b5e89ed5 861 entry->buf_size = size;
1da177e4
LT
862 entry->page_order = page_order;
863 byte_count = 0;
864 page_count = 0;
865
b5e89ed5 866 while (entry->buf_count < count) {
bc5f4523 867
e6be8d9d 868 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
bc5f4523 869
ddf19b97 870 if (!dmah) {
1da177e4
LT
871 /* Set count correctly so we free the proper amount. */
872 entry->buf_count = count;
873 entry->seg_count = count;
874 drm_cleanup_buf_error(dev, entry);
9a298b2a 875 kfree(temp_pagelist);
30e2fb18 876 mutex_unlock(&dev->struct_mutex);
b5e89ed5 877 atomic_dec(&dev->buf_alloc);
1da177e4
LT
878 return -ENOMEM;
879 }
ddf19b97 880 entry->seglist[entry->seg_count++] = dmah;
b5e89ed5
DA
881 for (i = 0; i < (1 << page_order); i++) {
882 DRM_DEBUG("page %d @ 0x%08lx\n",
883 dma->page_count + page_count,
ddf19b97 884 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
1da177e4 885 temp_pagelist[dma->page_count + page_count++]
ddf19b97 886 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
1da177e4 887 }
b5e89ed5
DA
888 for (offset = 0;
889 offset + size <= total && entry->buf_count < count;
890 offset += alignment, ++entry->buf_count) {
891 buf = &entry->buflist[entry->buf_count];
892 buf->idx = dma->buf_count + entry->buf_count;
893 buf->total = alignment;
894 buf->order = order;
895 buf->used = 0;
896 buf->offset = (dma->byte_count + byte_count + offset);
ddf19b97
DA
897 buf->address = (void *)(dmah->vaddr + offset);
898 buf->bus_address = dmah->busaddr + offset;
b5e89ed5 899 buf->next = NULL;
1da177e4
LT
900 buf->waiting = 0;
901 buf->pending = 0;
b5e89ed5 902 init_waitqueue_head(&buf->dma_wait);
6c340eac 903 buf->file_priv = NULL;
1da177e4
LT
904
905 buf->dev_priv_size = dev->driver->dev_priv_size;
94e3370e
DB
906 buf->dev_private = kzalloc(buf->dev_priv_size,
907 GFP_KERNEL);
b5e89ed5 908 if (!buf->dev_private) {
1da177e4
LT
909 /* Set count correctly so we free the proper amount. */
910 entry->buf_count = count;
911 entry->seg_count = count;
b5e89ed5 912 drm_cleanup_buf_error(dev, entry);
9a298b2a 913 kfree(temp_pagelist);
30e2fb18 914 mutex_unlock(&dev->struct_mutex);
b5e89ed5 915 atomic_dec(&dev->buf_alloc);
1da177e4
LT
916 return -ENOMEM;
917 }
1da177e4 918
b5e89ed5
DA
919 DRM_DEBUG("buffer %d @ %p\n",
920 entry->buf_count, buf->address);
1da177e4
LT
921 }
922 byte_count += PAGE_SIZE << page_order;
923 }
924
9a298b2a
EA
925 temp_buflist = krealloc(dma->buflist,
926 (dma->buf_count + entry->buf_count) *
927 sizeof(*dma->buflist), GFP_KERNEL);
1da177e4
LT
928 if (!temp_buflist) {
929 /* Free the entry because it isn't valid */
b5e89ed5 930 drm_cleanup_buf_error(dev, entry);
9a298b2a 931 kfree(temp_pagelist);
30e2fb18 932 mutex_unlock(&dev->struct_mutex);
b5e89ed5 933 atomic_dec(&dev->buf_alloc);
1da177e4
LT
934 return -ENOMEM;
935 }
936 dma->buflist = temp_buflist;
937
b5e89ed5 938 for (i = 0; i < entry->buf_count; i++) {
1da177e4
LT
939 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
940 }
941
88393161 942 /* No allocations failed, so now we can replace the original pagelist
1da177e4
LT
943 * with the new one.
944 */
945 if (dma->page_count) {
9a298b2a 946 kfree(dma->pagelist);
1da177e4
LT
947 }
948 dma->pagelist = temp_pagelist;
949
950 dma->buf_count += entry->buf_count;
951 dma->seg_count += entry->seg_count;
952 dma->page_count += entry->seg_count << page_order;
953 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
954
30e2fb18 955 mutex_unlock(&dev->struct_mutex);
1da177e4 956
d59431bf
DA
957 request->count = entry->buf_count;
958 request->size = size;
1da177e4 959
3417f33e
GS
960 if (request->flags & _DRM_PCI_BUFFER_RO)
961 dma->flags = _DRM_DMA_USE_PCI_RO;
962
b5e89ed5 963 atomic_dec(&dev->buf_alloc);
1da177e4
LT
964 return 0;
965
966}
d84f76d3 967EXPORT_SYMBOL(drm_addbufs_pci);
1da177e4 968
84b1fd10 969static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
1da177e4 970{
cdd55a29
DA
971 struct drm_device_dma *dma = dev->dma;
972 struct drm_buf_entry *entry;
056219e2 973 struct drm_buf *buf;
1da177e4
LT
974 unsigned long offset;
975 unsigned long agp_offset;
976 int count;
977 int order;
978 int size;
979 int alignment;
980 int page_order;
981 int total;
982 int byte_count;
983 int i;
056219e2 984 struct drm_buf **temp_buflist;
1da177e4 985
b5e89ed5
DA
986 if (!drm_core_check_feature(dev, DRIVER_SG))
987 return -EINVAL;
988
989 if (!dma)
990 return -EINVAL;
1da177e4 991
d985c108
DA
992 if (!capable(CAP_SYS_ADMIN))
993 return -EPERM;
994
d59431bf
DA
995 count = request->count;
996 order = drm_order(request->size);
1da177e4
LT
997 size = 1 << order;
998
b5e89ed5
DA
999 alignment = (request->flags & _DRM_PAGE_ALIGN)
1000 ? PAGE_ALIGN(size) : size;
1da177e4
LT
1001 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1002 total = PAGE_SIZE << page_order;
1003
1004 byte_count = 0;
d59431bf 1005 agp_offset = request->agp_start;
1da177e4 1006
b5e89ed5
DA
1007 DRM_DEBUG("count: %d\n", count);
1008 DRM_DEBUG("order: %d\n", order);
1009 DRM_DEBUG("size: %d\n", size);
1010 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1011 DRM_DEBUG("alignment: %d\n", alignment);
1012 DRM_DEBUG("page_order: %d\n", page_order);
1013 DRM_DEBUG("total: %d\n", total);
1da177e4 1014
b5e89ed5
DA
1015 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1016 return -EINVAL;
1017 if (dev->queue_count)
1018 return -EBUSY; /* Not while in use */
1da177e4 1019
b5e89ed5
DA
1020 spin_lock(&dev->count_lock);
1021 if (dev->buf_use) {
1022 spin_unlock(&dev->count_lock);
1da177e4
LT
1023 return -EBUSY;
1024 }
b5e89ed5
DA
1025 atomic_inc(&dev->buf_alloc);
1026 spin_unlock(&dev->count_lock);
1da177e4 1027
30e2fb18 1028 mutex_lock(&dev->struct_mutex);
1da177e4 1029 entry = &dma->bufs[order];
b5e89ed5 1030 if (entry->buf_count) {
30e2fb18 1031 mutex_unlock(&dev->struct_mutex);
b5e89ed5
DA
1032 atomic_dec(&dev->buf_alloc);
1033 return -ENOMEM; /* May only call once for each order */
1da177e4
LT
1034 }
1035
1036 if (count < 0 || count > 4096) {
30e2fb18 1037 mutex_unlock(&dev->struct_mutex);
b5e89ed5 1038 atomic_dec(&dev->buf_alloc);
1da177e4
LT
1039 return -EINVAL;
1040 }
1041
94e3370e 1042 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
9a298b2a 1043 GFP_KERNEL);
b5e89ed5 1044 if (!entry->buflist) {
30e2fb18 1045 mutex_unlock(&dev->struct_mutex);
b5e89ed5 1046 atomic_dec(&dev->buf_alloc);
1da177e4
LT
1047 return -ENOMEM;
1048 }
1da177e4
LT
1049
1050 entry->buf_size = size;
1051 entry->page_order = page_order;
1052
1053 offset = 0;
1054
b5e89ed5
DA
1055 while (entry->buf_count < count) {
1056 buf = &entry->buflist[entry->buf_count];
1057 buf->idx = dma->buf_count + entry->buf_count;
1058 buf->total = alignment;
1059 buf->order = order;
1060 buf->used = 0;
1da177e4 1061
b5e89ed5 1062 buf->offset = (dma->byte_count + offset);
1da177e4 1063 buf->bus_address = agp_offset + offset;
b5e89ed5 1064 buf->address = (void *)(agp_offset + offset
d1f2b55a 1065 + (unsigned long)dev->sg->virtual);
b5e89ed5 1066 buf->next = NULL;
1da177e4
LT
1067 buf->waiting = 0;
1068 buf->pending = 0;
b5e89ed5 1069 init_waitqueue_head(&buf->dma_wait);
6c340eac 1070 buf->file_priv = NULL;
1da177e4
LT
1071
1072 buf->dev_priv_size = dev->driver->dev_priv_size;
94e3370e 1073 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
b5e89ed5 1074 if (!buf->dev_private) {
1da177e4
LT
1075 /* Set count correctly so we free the proper amount. */
1076 entry->buf_count = count;
b5e89ed5 1077 drm_cleanup_buf_error(dev, entry);
30e2fb18 1078 mutex_unlock(&dev->struct_mutex);
b5e89ed5 1079 atomic_dec(&dev->buf_alloc);
1da177e4
LT
1080 return -ENOMEM;
1081 }
1082
b5e89ed5 1083 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1da177e4
LT
1084
1085 offset += alignment;
1086 entry->buf_count++;
1087 byte_count += PAGE_SIZE << page_order;
1088 }
1089
b5e89ed5 1090 DRM_DEBUG("byte_count: %d\n", byte_count);
1da177e4 1091
9a298b2a
EA
1092 temp_buflist = krealloc(dma->buflist,
1093 (dma->buf_count + entry->buf_count) *
1094 sizeof(*dma->buflist), GFP_KERNEL);
b5e89ed5 1095 if (!temp_buflist) {
1da177e4 1096 /* Free the entry because it isn't valid */
b5e89ed5 1097 drm_cleanup_buf_error(dev, entry);
30e2fb18 1098 mutex_unlock(&dev->struct_mutex);
b5e89ed5 1099 atomic_dec(&dev->buf_alloc);
1da177e4
LT
1100 return -ENOMEM;
1101 }
1102 dma->buflist = temp_buflist;
1103
b5e89ed5 1104 for (i = 0; i < entry->buf_count; i++) {
1da177e4
LT
1105 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1106 }
1107
1108 dma->buf_count += entry->buf_count;
d985c108
DA
1109 dma->seg_count += entry->seg_count;
1110 dma->page_count += byte_count >> PAGE_SHIFT;
1da177e4
LT
1111 dma->byte_count += byte_count;
1112
b5e89ed5
DA
1113 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1114 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1da177e4 1115
30e2fb18 1116 mutex_unlock(&dev->struct_mutex);
1da177e4 1117
d59431bf
DA
1118 request->count = entry->buf_count;
1119 request->size = size;
1da177e4
LT
1120
1121 dma->flags = _DRM_DMA_USE_SG;
1122
b5e89ed5 1123 atomic_dec(&dev->buf_alloc);
1da177e4
LT
1124 return 0;
1125}
1126
84b1fd10 1127static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
b84397d6 1128{
cdd55a29
DA
1129 struct drm_device_dma *dma = dev->dma;
1130 struct drm_buf_entry *entry;
056219e2 1131 struct drm_buf *buf;
b84397d6
DA
1132 unsigned long offset;
1133 unsigned long agp_offset;
1134 int count;
1135 int order;
1136 int size;
1137 int alignment;
1138 int page_order;
1139 int total;
1140 int byte_count;
1141 int i;
056219e2 1142 struct drm_buf **temp_buflist;
b84397d6
DA
1143
1144 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1145 return -EINVAL;
b5e89ed5 1146
b84397d6
DA
1147 if (!dma)
1148 return -EINVAL;
1149
d985c108
DA
1150 if (!capable(CAP_SYS_ADMIN))
1151 return -EPERM;
1152
d59431bf
DA
1153 count = request->count;
1154 order = drm_order(request->size);
b84397d6
DA
1155 size = 1 << order;
1156
d59431bf 1157 alignment = (request->flags & _DRM_PAGE_ALIGN)
b84397d6
DA
1158 ? PAGE_ALIGN(size) : size;
1159 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1160 total = PAGE_SIZE << page_order;
1161
1162 byte_count = 0;
d59431bf 1163 agp_offset = request->agp_start;
b84397d6
DA
1164
1165 DRM_DEBUG("count: %d\n", count);
1166 DRM_DEBUG("order: %d\n", order);
1167 DRM_DEBUG("size: %d\n", size);
1168 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1169 DRM_DEBUG("alignment: %d\n", alignment);
1170 DRM_DEBUG("page_order: %d\n", page_order);
1171 DRM_DEBUG("total: %d\n", total);
1172
1173 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1174 return -EINVAL;
1175 if (dev->queue_count)
1176 return -EBUSY; /* Not while in use */
1177
1178 spin_lock(&dev->count_lock);
1179 if (dev->buf_use) {
1180 spin_unlock(&dev->count_lock);
1181 return -EBUSY;
1182 }
1183 atomic_inc(&dev->buf_alloc);
1184 spin_unlock(&dev->count_lock);
1185
30e2fb18 1186 mutex_lock(&dev->struct_mutex);
b84397d6
DA
1187 entry = &dma->bufs[order];
1188 if (entry->buf_count) {
30e2fb18 1189 mutex_unlock(&dev->struct_mutex);
b84397d6
DA
1190 atomic_dec(&dev->buf_alloc);
1191 return -ENOMEM; /* May only call once for each order */
1192 }
1193
1194 if (count < 0 || count > 4096) {
30e2fb18 1195 mutex_unlock(&dev->struct_mutex);
b84397d6
DA
1196 atomic_dec(&dev->buf_alloc);
1197 return -EINVAL;
1198 }
1199
94e3370e 1200 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
9a298b2a 1201 GFP_KERNEL);
b84397d6 1202 if (!entry->buflist) {
30e2fb18 1203 mutex_unlock(&dev->struct_mutex);
b84397d6
DA
1204 atomic_dec(&dev->buf_alloc);
1205 return -ENOMEM;
1206 }
b84397d6
DA
1207
1208 entry->buf_size = size;
1209 entry->page_order = page_order;
1210
1211 offset = 0;
1212
1213 while (entry->buf_count < count) {
1214 buf = &entry->buflist[entry->buf_count];
1215 buf->idx = dma->buf_count + entry->buf_count;
1216 buf->total = alignment;
1217 buf->order = order;
1218 buf->used = 0;
1219
1220 buf->offset = (dma->byte_count + offset);
1221 buf->bus_address = agp_offset + offset;
1222 buf->address = (void *)(agp_offset + offset);
1223 buf->next = NULL;
1224 buf->waiting = 0;
1225 buf->pending = 0;
1226 init_waitqueue_head(&buf->dma_wait);
6c340eac 1227 buf->file_priv = NULL;
b84397d6
DA
1228
1229 buf->dev_priv_size = dev->driver->dev_priv_size;
94e3370e 1230 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
b84397d6
DA
1231 if (!buf->dev_private) {
1232 /* Set count correctly so we free the proper amount. */
1233 entry->buf_count = count;
1234 drm_cleanup_buf_error(dev, entry);
30e2fb18 1235 mutex_unlock(&dev->struct_mutex);
b84397d6
DA
1236 atomic_dec(&dev->buf_alloc);
1237 return -ENOMEM;
1238 }
b84397d6
DA
1239
1240 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1241
1242 offset += alignment;
1243 entry->buf_count++;
1244 byte_count += PAGE_SIZE << page_order;
1245 }
1246
1247 DRM_DEBUG("byte_count: %d\n", byte_count);
1248
9a298b2a
EA
1249 temp_buflist = krealloc(dma->buflist,
1250 (dma->buf_count + entry->buf_count) *
1251 sizeof(*dma->buflist), GFP_KERNEL);
b84397d6
DA
1252 if (!temp_buflist) {
1253 /* Free the entry because it isn't valid */
1254 drm_cleanup_buf_error(dev, entry);
30e2fb18 1255 mutex_unlock(&dev->struct_mutex);
b84397d6
DA
1256 atomic_dec(&dev->buf_alloc);
1257 return -ENOMEM;
1258 }
1259 dma->buflist = temp_buflist;
1260
1261 for (i = 0; i < entry->buf_count; i++) {
1262 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1263 }
1264
1265 dma->buf_count += entry->buf_count;
d985c108
DA
1266 dma->seg_count += entry->seg_count;
1267 dma->page_count += byte_count >> PAGE_SHIFT;
b84397d6
DA
1268 dma->byte_count += byte_count;
1269
1270 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1271 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1272
30e2fb18 1273 mutex_unlock(&dev->struct_mutex);
b84397d6 1274
d59431bf
DA
1275 request->count = entry->buf_count;
1276 request->size = size;
b84397d6
DA
1277
1278 dma->flags = _DRM_DMA_USE_FB;
1279
1280 atomic_dec(&dev->buf_alloc);
1281 return 0;
1282}
d985c108 1283
b84397d6 1284
1da177e4
LT
1285/**
1286 * Add buffers for DMA transfers (ioctl).
1287 *
1288 * \param inode device inode.
6c340eac 1289 * \param file_priv DRM file private.
1da177e4 1290 * \param cmd command.
c60ce623 1291 * \param arg pointer to a struct drm_buf_desc request.
1da177e4
LT
1292 * \return zero on success or a negative number on failure.
1293 *
1294 * According with the memory type specified in drm_buf_desc::flags and the
1295 * build options, it dispatches the call either to addbufs_agp(),
1296 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1297 * PCI memory respectively.
1298 */
c153f45f
EA
1299int drm_addbufs(struct drm_device *dev, void *data,
1300 struct drm_file *file_priv)
1da177e4 1301{
c153f45f 1302 struct drm_buf_desc *request = data;
d59431bf 1303 int ret;
b5e89ed5 1304
1da177e4
LT
1305 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1306 return -EINVAL;
1307
1da177e4 1308#if __OS_HAS_AGP
c153f45f
EA
1309 if (request->flags & _DRM_AGP_BUFFER)
1310 ret = drm_addbufs_agp(dev, request);
1da177e4
LT
1311 else
1312#endif
c153f45f
EA
1313 if (request->flags & _DRM_SG_BUFFER)
1314 ret = drm_addbufs_sg(dev, request);
1315 else if (request->flags & _DRM_FB_BUFFER)
1316 ret = drm_addbufs_fb(dev, request);
1da177e4 1317 else
c153f45f 1318 ret = drm_addbufs_pci(dev, request);
d59431bf 1319
d59431bf 1320 return ret;
1da177e4
LT
1321}
1322
1da177e4
LT
1323/**
1324 * Get information about the buffer mappings.
1325 *
1326 * This was originally mean for debugging purposes, or by a sophisticated
1327 * client library to determine how best to use the available buffers (e.g.,
1328 * large buffers can be used for image transfer).
1329 *
1330 * \param inode device inode.
6c340eac 1331 * \param file_priv DRM file private.
1da177e4
LT
1332 * \param cmd command.
1333 * \param arg pointer to a drm_buf_info structure.
1334 * \return zero on success or a negative number on failure.
1335 *
1336 * Increments drm_device::buf_use while holding the drm_device::count_lock
1337 * lock, preventing of allocating more buffers after this call. Information
1338 * about each requested buffer is then copied into user space.
1339 */
c153f45f
EA
1340int drm_infobufs(struct drm_device *dev, void *data,
1341 struct drm_file *file_priv)
1da177e4 1342{
cdd55a29 1343 struct drm_device_dma *dma = dev->dma;
c153f45f 1344 struct drm_buf_info *request = data;
1da177e4
LT
1345 int i;
1346 int count;
1347
1348 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1349 return -EINVAL;
1350
b5e89ed5
DA
1351 if (!dma)
1352 return -EINVAL;
1da177e4 1353
b5e89ed5
DA
1354 spin_lock(&dev->count_lock);
1355 if (atomic_read(&dev->buf_alloc)) {
1356 spin_unlock(&dev->count_lock);
1da177e4
LT
1357 return -EBUSY;
1358 }
1359 ++dev->buf_use; /* Can't allocate more after this call */
b5e89ed5 1360 spin_unlock(&dev->count_lock);
1da177e4 1361
b5e89ed5
DA
1362 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1363 if (dma->bufs[i].buf_count)
1364 ++count;
1da177e4
LT
1365 }
1366
b5e89ed5 1367 DRM_DEBUG("count = %d\n", count);
1da177e4 1368
c153f45f 1369 if (request->count >= count) {
b5e89ed5
DA
1370 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1371 if (dma->bufs[i].buf_count) {
c60ce623 1372 struct drm_buf_desc __user *to =
c153f45f 1373 &request->list[count];
cdd55a29
DA
1374 struct drm_buf_entry *from = &dma->bufs[i];
1375 struct drm_freelist *list = &dma->bufs[i].freelist;
b5e89ed5
DA
1376 if (copy_to_user(&to->count,
1377 &from->buf_count,
1378 sizeof(from->buf_count)) ||
1379 copy_to_user(&to->size,
1380 &from->buf_size,
1381 sizeof(from->buf_size)) ||
1382 copy_to_user(&to->low_mark,
1383 &list->low_mark,
1384 sizeof(list->low_mark)) ||
1385 copy_to_user(&to->high_mark,
1386 &list->high_mark,
1387 sizeof(list->high_mark)))
1da177e4
LT
1388 return -EFAULT;
1389
b5e89ed5
DA
1390 DRM_DEBUG("%d %d %d %d %d\n",
1391 i,
1392 dma->bufs[i].buf_count,
1393 dma->bufs[i].buf_size,
1394 dma->bufs[i].freelist.low_mark,
1395 dma->bufs[i].freelist.high_mark);
1da177e4
LT
1396 ++count;
1397 }
1398 }
1399 }
c153f45f 1400 request->count = count;
1da177e4
LT
1401
1402 return 0;
1403}
1404
1405/**
1406 * Specifies a low and high water mark for buffer allocation
1407 *
1408 * \param inode device inode.
6c340eac 1409 * \param file_priv DRM file private.
1da177e4
LT
1410 * \param cmd command.
1411 * \param arg a pointer to a drm_buf_desc structure.
1412 * \return zero on success or a negative number on failure.
1413 *
1414 * Verifies that the size order is bounded between the admissible orders and
1415 * updates the respective drm_device_dma::bufs entry low and high water mark.
1416 *
1417 * \note This ioctl is deprecated and mostly never used.
1418 */
c153f45f
EA
1419int drm_markbufs(struct drm_device *dev, void *data,
1420 struct drm_file *file_priv)
1da177e4 1421{
cdd55a29 1422 struct drm_device_dma *dma = dev->dma;
c153f45f 1423 struct drm_buf_desc *request = data;
1da177e4 1424 int order;
cdd55a29 1425 struct drm_buf_entry *entry;
1da177e4
LT
1426
1427 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1428 return -EINVAL;
1429
b5e89ed5
DA
1430 if (!dma)
1431 return -EINVAL;
1da177e4 1432
b5e89ed5 1433 DRM_DEBUG("%d, %d, %d\n",
c153f45f
EA
1434 request->size, request->low_mark, request->high_mark);
1435 order = drm_order(request->size);
b5e89ed5
DA
1436 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1437 return -EINVAL;
1da177e4
LT
1438 entry = &dma->bufs[order];
1439
c153f45f 1440 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1da177e4 1441 return -EINVAL;
c153f45f 1442 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1da177e4
LT
1443 return -EINVAL;
1444
c153f45f
EA
1445 entry->freelist.low_mark = request->low_mark;
1446 entry->freelist.high_mark = request->high_mark;
1da177e4
LT
1447
1448 return 0;
1449}
1450
1451/**
b5e89ed5 1452 * Unreserve the buffers in list, previously reserved using drmDMA.
1da177e4
LT
1453 *
1454 * \param inode device inode.
6c340eac 1455 * \param file_priv DRM file private.
1da177e4
LT
1456 * \param cmd command.
1457 * \param arg pointer to a drm_buf_free structure.
1458 * \return zero on success or a negative number on failure.
b5e89ed5 1459 *
1da177e4
LT
1460 * Calls free_buffer() for each used buffer.
1461 * This function is primarily used for debugging.
1462 */
c153f45f
EA
1463int drm_freebufs(struct drm_device *dev, void *data,
1464 struct drm_file *file_priv)
1da177e4 1465{
cdd55a29 1466 struct drm_device_dma *dma = dev->dma;
c153f45f 1467 struct drm_buf_free *request = data;
1da177e4
LT
1468 int i;
1469 int idx;
056219e2 1470 struct drm_buf *buf;
1da177e4
LT
1471
1472 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1473 return -EINVAL;
1474
b5e89ed5
DA
1475 if (!dma)
1476 return -EINVAL;
1da177e4 1477
c153f45f
EA
1478 DRM_DEBUG("%d\n", request->count);
1479 for (i = 0; i < request->count; i++) {
1480 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1da177e4 1481 return -EFAULT;
b5e89ed5
DA
1482 if (idx < 0 || idx >= dma->buf_count) {
1483 DRM_ERROR("Index %d (of %d max)\n",
1484 idx, dma->buf_count - 1);
1da177e4
LT
1485 return -EINVAL;
1486 }
1487 buf = dma->buflist[idx];
6c340eac 1488 if (buf->file_priv != file_priv) {
b5e89ed5 1489 DRM_ERROR("Process %d freeing buffer not owned\n",
ba25f9dc 1490 task_pid_nr(current));
1da177e4
LT
1491 return -EINVAL;
1492 }
b5e89ed5 1493 drm_free_buffer(dev, buf);
1da177e4
LT
1494 }
1495
1496 return 0;
1497}
1498
1499/**
1500 * Maps all of the DMA buffers into client-virtual space (ioctl).
1501 *
1502 * \param inode device inode.
6c340eac 1503 * \param file_priv DRM file private.
1da177e4
LT
1504 * \param cmd command.
1505 * \param arg pointer to a drm_buf_map structure.
1506 * \return zero on success or a negative number on failure.
1507 *
3417f33e
GS
1508 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1509 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1510 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1511 * drm_mmap_dma().
1da177e4 1512 */
c153f45f
EA
1513int drm_mapbufs(struct drm_device *dev, void *data,
1514 struct drm_file *file_priv)
1da177e4 1515{
cdd55a29 1516 struct drm_device_dma *dma = dev->dma;
1da177e4
LT
1517 int retcode = 0;
1518 const int zero = 0;
1519 unsigned long virtual;
1520 unsigned long address;
c153f45f 1521 struct drm_buf_map *request = data;
1da177e4
LT
1522 int i;
1523
1524 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1525 return -EINVAL;
1526
b5e89ed5
DA
1527 if (!dma)
1528 return -EINVAL;
1da177e4 1529
b5e89ed5
DA
1530 spin_lock(&dev->count_lock);
1531 if (atomic_read(&dev->buf_alloc)) {
1532 spin_unlock(&dev->count_lock);
1da177e4
LT
1533 return -EBUSY;
1534 }
1535 dev->buf_use++; /* Can't allocate more after this call */
b5e89ed5 1536 spin_unlock(&dev->count_lock);
1da177e4 1537
c153f45f 1538 if (request->count >= dma->buf_count) {
b84397d6 1539 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
b5e89ed5 1540 || (drm_core_check_feature(dev, DRIVER_SG)
b84397d6
DA
1541 && (dma->flags & _DRM_DMA_USE_SG))
1542 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1543 && (dma->flags & _DRM_DMA_USE_FB))) {
f77d390c 1544 struct drm_local_map *map = dev->agp_buffer_map;
d1f2b55a 1545 unsigned long token = dev->agp_buffer_token;
1da177e4 1546
b5e89ed5 1547 if (!map) {
1da177e4
LT
1548 retcode = -EINVAL;
1549 goto done;
1550 }
b5e89ed5 1551 down_write(&current->mm->mmap_sem);
6c340eac 1552 virtual = do_mmap(file_priv->filp, 0, map->size,
b5e89ed5 1553 PROT_READ | PROT_WRITE,
c153f45f
EA
1554 MAP_SHARED,
1555 token);
b5e89ed5 1556 up_write(&current->mm->mmap_sem);
1da177e4 1557 } else {
b5e89ed5 1558 down_write(&current->mm->mmap_sem);
6c340eac 1559 virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
b5e89ed5
DA
1560 PROT_READ | PROT_WRITE,
1561 MAP_SHARED, 0);
1562 up_write(&current->mm->mmap_sem);
1da177e4 1563 }
b5e89ed5 1564 if (virtual > -1024UL) {
1da177e4
LT
1565 /* Real error */
1566 retcode = (signed long)virtual;
1567 goto done;
1568 }
c153f45f 1569 request->virtual = (void __user *)virtual;
1da177e4 1570
b5e89ed5 1571 for (i = 0; i < dma->buf_count; i++) {
c153f45f 1572 if (copy_to_user(&request->list[i].idx,
b5e89ed5 1573 &dma->buflist[i]->idx,
c153f45f 1574 sizeof(request->list[0].idx))) {
1da177e4
LT
1575 retcode = -EFAULT;
1576 goto done;
1577 }
c153f45f 1578 if (copy_to_user(&request->list[i].total,
b5e89ed5 1579 &dma->buflist[i]->total,
c153f45f 1580 sizeof(request->list[0].total))) {
1da177e4
LT
1581 retcode = -EFAULT;
1582 goto done;
1583 }
c153f45f 1584 if (copy_to_user(&request->list[i].used,
b5e89ed5 1585 &zero, sizeof(zero))) {
1da177e4
LT
1586 retcode = -EFAULT;
1587 goto done;
1588 }
b5e89ed5 1589 address = virtual + dma->buflist[i]->offset; /* *** */
c153f45f 1590 if (copy_to_user(&request->list[i].address,
b5e89ed5 1591 &address, sizeof(address))) {
1da177e4
LT
1592 retcode = -EFAULT;
1593 goto done;
1594 }
1595 }
1596 }
b5e89ed5 1597 done:
c153f45f
EA
1598 request->count = dma->buf_count;
1599 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1da177e4
LT
1600
1601 return retcode;
1602}
1603
836cf046
DA
1604/**
1605 * Compute size order. Returns the exponent of the smaller power of two which
1606 * is greater or equal to given number.
b5e89ed5 1607 *
836cf046
DA
1608 * \param size size.
1609 * \return order.
1610 *
1611 * \todo Can be made faster.
1612 */
b5e89ed5 1613int drm_order(unsigned long size)
836cf046
DA
1614{
1615 int order;
1616 unsigned long tmp;
1617
b5e89ed5 1618 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
836cf046
DA
1619
1620 if (size & (size - 1))
1621 ++order;
1622
1623 return order;
1624}
1625EXPORT_SYMBOL(drm_order);