if (req == NULL)
return 0;
- requests[n++] = i915_gem_request_reference(req);
+ requests[n++] = i915_gem_request_get(req);
} else {
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
if (req == NULL)
continue;
- requests[n++] = i915_gem_request_reference(req);
+ requests[n++] = i915_gem_request_get(req);
}
}
for (i = 0; i < n; i++) {
if (ret == 0)
i915_gem_object_retire_request(obj, requests[i]);
- i915_gem_request_unreference(requests[i]);
+ i915_gem_request_put(requests[i]);
}
return ret;
if (obj->last_read_req[i] == NULL)
continue;
- req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
+ req[n++] = i915_gem_request_get(obj->last_read_req[i]);
}
mutex_unlock(&dev->struct_mutex);
ret = __i915_wait_request(req[i], true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
to_rps_client(file));
- i915_gem_request_unreference(req[i]);
+ i915_gem_request_put(req[i]);
}
return ret;
target = request;
}
if (target)
- i915_gem_request_reference(target);
+ i915_gem_request_get(target);
spin_unlock(&file_priv->mm.lock);
if (target == NULL)
return 0;
ret = __i915_wait_request(target, true, NULL, NULL);
- i915_gem_request_unreference(target);
+ i915_gem_request_put(target);
return ret;
}
}
i915_gem_context_unreference(request->ctx);
- i915_gem_request_unreference(request);
+ i915_gem_request_put(request);
}
void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
}
static inline struct drm_i915_gem_request *
-i915_gem_request_reference(struct drm_i915_gem_request *req)
+i915_gem_request_get(struct drm_i915_gem_request *req)
{
return to_request(fence_get(&req->fence));
}
static inline void
-i915_gem_request_unreference(struct drm_i915_gem_request *req)
+i915_gem_request_put(struct drm_i915_gem_request *req)
{
fence_put(&req->fence);
}
struct drm_i915_gem_request *src)
{
if (src)
- i915_gem_request_reference(src);
+ i915_gem_request_get(src);
if (*pdst)
- i915_gem_request_unreference(*pdst);
+ i915_gem_request_put(*pdst);
*pdst = src;
}
if (req == NULL)
continue;
- requests[n++] = i915_gem_request_reference(req);
+ requests[n++] = i915_gem_request_get(req);
}
mutex_unlock(&dev->struct_mutex);
mutex_lock(&dev->struct_mutex);
for (i = 0; i < n; i++)
- i915_gem_request_unreference(requests[i]);
+ i915_gem_request_put(requests[i]);
}
static void cancel_userptr(struct work_struct *work)
rb_erase(&request->signaling.node, &b->signals);
spin_unlock(&b->lock);
- i915_gem_request_unreference(request);
+ i915_gem_request_put(request);
} else {
if (kthread_should_stop())
break;
request->signaling.wait.tsk = b->signaler;
request->signaling.wait.seqno = request->fence.seqno;
- i915_gem_request_reference(request);
+ i915_gem_request_get(request);
/* First add ourselves into the list of waiters, but register our
* bottom-half as the signaller thread. As per usual, only the oldest
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
drm_gem_object_unreference(&work->pending_flip_obj->base);
-
- if (work->flip_queued_req)
- i915_gem_request_assign(&work->flip_queued_req, NULL);
mutex_unlock(&dev->struct_mutex);
+ i915_gem_request_put(work->flip_queued_req);
+
intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
intel_fbc_post_update(crtc);
drm_framebuffer_unreference(work->old_fb);
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
list_del(&req0->execlist_link);
- i915_gem_request_unreference(req0);
+ i915_gem_request_put(req0);
req0 = cursor;
} else {
if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
list_del(&head_req->execlist_link);
- i915_gem_request_unreference(head_req);
+ i915_gem_request_put(head_req);
return 1;
}
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
- i915_gem_request_unreference(tail_req);
+ i915_gem_request_put(tail_req);
}
}
- i915_gem_request_reference(request);
+ i915_gem_request_get(request);
list_add_tail(&request->execlist_link, &engine->execlist_queue);
request->ctx_hw_id = request->ctx->hw_id;
if (num_elements == 0)
list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
list_del(&req->execlist_link);
- i915_gem_request_unreference(req);
+ i915_gem_request_put(req);
}
}
if (!i915_gem_request_completed(req))
gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
- i915_gem_request_unreference(req);
+ i915_gem_request_put(req);
kfree(boost);
}
if (boost == NULL)
return;
- i915_gem_request_reference(req);
- boost->req = req;
+ boost->req = i915_gem_request_get(req);
INIT_WORK(&boost->work, __intel_rps_boost_work);
queue_work(req->i915->wq, &boost->work);