{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
+ int *reloc_offset;
int i, total, ret;
/* We may process another execbuffer during the unlock... */
for (i = 0; i < count; i++)
total += exec[i].relocation_count;
+ reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
reloc = drm_malloc_ab(total, sizeof(*reloc));
- if (reloc == NULL) {
+ if (reloc == NULL || reloc_offset == NULL) {
+ drm_free_large(reloc);
+ drm_free_large(reloc_offset);
mutex_lock(&dev->struct_mutex);
return -ENOMEM;
}
goto err;
}
+ reloc_offset[i] = total;
total += exec[i].relocation_count;
}
if (ret)
goto err;
- total = 0;
list_for_each_entry(obj, objects, exec_list) {
+ int offset = obj->exec_entry - exec;
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
- reloc + total);
+ reloc + reloc_offset[offset]);
if (ret)
goto err;
-
- total += exec->relocation_count;
- exec++;
}
/* Leave the user relocations as are, this is the painfully slow path,
err:
drm_free_large(reloc);
+ drm_free_large(reloc_offset);
return ret;
}