if (node->color != color)
*start += 4096;
- if (!list_empty(&node->node_list)) {
- node = list_entry(node->node_list.next,
- struct drm_mm_node,
- node_list);
- if (node->allocated && node->color != color)
- *end -= 4096;
- }
+ node = list_first_entry_or_null(&node->node_list,
+ struct drm_mm_node,
+ node_list);
+ if (node && node->allocated && node->color != color)
+ *end -= 4096;
}
static int i915_gem_setup_global_gtt(struct drm_device *dev,
return ret;
/* Move the oldest request to the slab-cache (if not in use!) */
- if (!list_empty(&engine->request_list)) {
- req = list_first_entry(&engine->request_list,
+ req = list_first_entry_or_null(&engine->request_list,
typeof(*req), list);
- if (i915_gem_request_completed(req))
- i915_gem_request_retire(req);
- }
+ if (req && i915_gem_request_completed(req))
+ i915_gem_request_retire(req);
req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
if (!req)
*/
for (phase = phases; phase->list; phase++) {
struct list_head still_in_list;
+ struct drm_i915_gem_object *obj;
if ((flags & phase->bit) == 0)
continue;
INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(phase->list)) {
- struct drm_i915_gem_object *obj;
+ while (count < target &&
+ (obj = list_first_entry_or_null(phase->list,
+ typeof(*obj),
+ global_list))) {
struct i915_vma *vma, *v;
- obj = list_first_entry(phase->list,
- typeof(*obj), global_list);
list_move_tail(&obj->global_list, &still_in_list);
if (flags & I915_SHRINK_PURGEABLE &&