Merge tag 'v4.8-rc8' into drm-next
authorDave Airlie <airlied@redhat.com>
Wed, 28 Sep 2016 02:08:49 +0000 (12:08 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 28 Sep 2016 02:08:49 +0000 (12:08 +1000)
Linux 4.8-rc8

There was a lot of fallout in the imx/amdgpu/i915 drivers, so backmerge
it now to avoid troubles.

* tag 'v4.8-rc8': (1442 commits)
  Linux 4.8-rc8
  fault_in_multipages_readable() throws set-but-unused error
  mm: check VMA flags to avoid invalid PROT_NONE NUMA balancing
  radix tree: fix sibling entry handling in radix_tree_descend()
  radix tree test suite: Test radix_tree_replace_slot() for multiorder entries
  fix memory leaks in tracing_buffers_splice_read()
  tracing: Move mutex to protect against resetting of seq data
  MIPS: Fix delay slot emulation count in debugfs
  MIPS: SMP: Fix possibility of deadlock when bringing CPUs online
  mm: delete unnecessary and unsafe init_tlb_ubc()
  huge tmpfs: fix Committed_AS leak
  shmem: fix tmpfs to handle the huge= option properly
  blk-mq: skip unmapped queues in blk_mq_alloc_request_hctx
  MIPS: Fix pre-r6 emulation FPU initialisation
  arm64: kgdb: handle read-only text / modules
  arm64: Call numa_store_cpu_info() earlier.
  locking/hung_task: Fix typo in CONFIG_DETECT_HUNG_TASK help text
  nvme-rdma: only clear queue flags after successful connect
  i2c: qup: skip qup_i2c_suspend if the device is already runtime suspended
  perf/core: Limit matching exclusive events to one PMU
  ...

25 files changed:
1  2 
MAINTAINERS
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_gem.c

diff --cc MAINTAINERS
Simple merge
index 9d79e4ba0213be8c85a60d4fb6ebb58a60a055b1,700c56baf2de7110fdab3dacf1bee91d1a82dbb7..72c68dbb982136b73f84ad881ca30a90ca2f8866
@@@ -405,9 -396,48 +405,8 @@@ int amdgpu_fence_wait_empty(struct amdg
  unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
  
  /*
 - * TTM.
 + * BO.
   */
 -
 -#define AMDGPU_TTM_LRU_SIZE   20
 -
 -struct amdgpu_mman_lru {
 -      struct list_head                *lru[TTM_NUM_MEM_TYPES];
 -      struct list_head                *swap_lru;
 -};
 -
 -struct amdgpu_mman {
 -      struct ttm_bo_global_ref        bo_global_ref;
 -      struct drm_global_reference     mem_global_ref;
 -      struct ttm_bo_device            bdev;
 -      bool                            mem_global_referenced;
 -      bool                            initialized;
 -
 -#if defined(CONFIG_DEBUG_FS)
 -      struct dentry                   *vram;
 -      struct dentry                   *gtt;
 -#endif
 -
 -      /* buffer handling */
 -      const struct amdgpu_buffer_funcs        *buffer_funcs;
 -      struct amdgpu_ring                      *buffer_funcs_ring;
 -      /* Scheduler entity for buffer moves */
 -      struct amd_sched_entity                 entity;
 -
 -      /* custom LRU management */
 -      struct amdgpu_mman_lru                  log2_size[AMDGPU_TTM_LRU_SIZE];
 -      /* guard for log2_size array, don't add anything in between */
 -      struct amdgpu_mman_lru                  guard;
 -};
 -
 -int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 -                     uint64_t src_offset,
 -                     uint64_t dst_offset,
 -                     uint32_t byte_count,
 -                     struct reservation_object *resv,
 -                     struct fence **fence);
 -int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
--
  struct amdgpu_bo_list_entry {
        struct amdgpu_bo                *robj;
        struct ttm_validate_buffer      tv;
@@@ -620,12 -648,11 +619,12 @@@ int amdgpu_gart_table_vram_pin(struct a
  void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
  int amdgpu_gart_init(struct amdgpu_device *adev);
  void amdgpu_gart_fini(struct amdgpu_device *adev);
- void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
                        int pages);
- int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
                     int pages, struct page **pagelist,
                     dma_addr_t *dma_addr, uint32_t flags);
 +int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
  
  /*
   * GPU MC structures, functions & helpers
index b63969d7887caf4d575a35a94b1c9f05ff3c126e,716f2afeb6a9a4a403b56a5c28ddf9de489f8746..160a094e1a934e9269a32ccdadf71fdcae796d32
@@@ -273,17 -251,15 +273,17 @@@ static int amdgpu_move_blit(struct ttm_
  
        adev = amdgpu_get_adev(bo->bdev);
        ring = adev->mman.buffer_funcs_ring;
-       old_start = old_mem->start << PAGE_SHIFT;
-       new_start = new_mem->start << PAGE_SHIFT;
+       old_start = (u64)old_mem->start << PAGE_SHIFT;
+       new_start = (u64)new_mem->start << PAGE_SHIFT;
  
        switch (old_mem->mem_type) {
 -      case TTM_PL_VRAM:
 -              old_start += adev->mc.vram_start;
 -              break;
        case TTM_PL_TT:
 -              old_start += adev->mc.gtt_start;
 +              r = amdgpu_ttm_bind(bo->ttm, old_mem);
 +              if (r)
 +                      return r;
 +
 +      case TTM_PL_VRAM:
 +              old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
                break;
        default:
                DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
Simple merge
Simple merge
Simple merge
Simple merge
index c8bd02277b7d4ef66a51dafee058dad742a93c49,a77ce9983f69c9965725f806a008fc06a129935f..2c81067589225a89762a4129fac7eeee0abebed9
@@@ -2591,78 -3109,145 +2591,81 @@@ static void i915_gem_reset_engine(struc
                return;
  
        ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
 -
        i915_set_reset_status(request->ctx, ring_hung);
 -      list_for_each_entry_continue(request, &engine->request_list, list)
 -              i915_set_reset_status(request->ctx, false);
 -}
 -
 -static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 -{
 -      struct intel_ringbuffer *buffer;
 -
 -      while (!list_empty(&engine->active_list)) {
 -              struct drm_i915_gem_object *obj;
 -
 -              obj = list_first_entry(&engine->active_list,
 -                                     struct drm_i915_gem_object,
 -                                     engine_list[engine->id]);
 -
 -              i915_gem_object_retire__read(obj, engine->id);
 -      }
 -
 -      /*
 -       * Clear the execlists queue up before freeing the requests, as those
 -       * are the ones that keep the context and ringbuffer backing objects
 -       * pinned in place.
 -       */
 -
 -      if (i915.enable_execlists) {
 -              /* Ensure irq handler finishes or is cancelled. */
 -              tasklet_kill(&engine->irq_tasklet);
 -
 -              intel_execlists_cancel_requests(engine);
 -      }
 -
 -      /*
 -       * We must free the requests after all the corresponding objects have
 -       * been moved off active lists. Which is the same order as the normal
 -       * retire_requests function does. This is important if object hold
 -       * implicit references on things like e.g. ppgtt address spaces through
 -       * the request.
 -       */
 -      while (!list_empty(&engine->request_list)) {
 -              struct drm_i915_gem_request *request;
 +      if (!ring_hung)
 +              return;
  
 -              request = list_first_entry(&engine->request_list,
 -                                         struct drm_i915_gem_request,
 -                                         list);
 +      DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
 +                       engine->name, request->fence.seqno);
  
 -              i915_gem_request_retire(request);
 -      }
 +      /* Setup the CS to resume from the breadcrumb of the hung request */
 +      engine->reset_hw(engine, request);
  
 -      /* Having flushed all requests from all queues, we know that all
 -       * ringbuffers must now be empty. However, since we do not reclaim
 -       * all space when retiring the request (to prevent HEADs colliding
 -       * with rapid ringbuffer wraparound) the amount of available space
 -       * upon reset is less than when we start. Do one more pass over
 -       * all the ringbuffers to reset last_retired_head.
 +      /* Users of the default context do not rely on logical state
 +       * preserved between batches. They have to emit full state on
 +       * every batch and so it is safe to execute queued requests following
 +       * the hang.
 +       *
 +       * Other contexts preserve state, now corrupt. We want to skip all
 +       * queued requests that reference the corrupt context.
         */
 -      list_for_each_entry(buffer, &engine->buffers, link) {
 -              buffer->last_retired_head = buffer->tail;
 -              intel_ring_update_space(buffer);
 -      }
 +      incomplete_ctx = request->ctx;
 +      if (i915_gem_context_is_default(incomplete_ctx))
 +              return;
  
 -      intel_ring_init_seqno(engine, engine->last_submitted_seqno);
 +      list_for_each_entry_continue(request, &engine->request_list, link)
 +              if (request->ctx == incomplete_ctx)
 +                      reset_request(request);
+       engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
  }
  
 -void i915_gem_reset(struct drm_device *dev)
 +void i915_gem_reset(struct drm_i915_private *dev_priv)
  {
 -      struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
  
 -      /*
 -       * Before we free the objects from the requests, we need to inspect
 -       * them for finding the guilty party. As the requests only borrow
 -       * their reference to the objects, the inspection must be done first.
 -       */
 -      for_each_engine(engine, dev_priv)
 -              i915_gem_reset_engine_status(engine);
 +      i915_gem_retire_requests(dev_priv);
  
        for_each_engine(engine, dev_priv)
 -              i915_gem_reset_engine_cleanup(engine);
 +              i915_gem_reset_engine(engine);
+       mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
  
 -      i915_gem_context_reset(dev);
 -
 -      i915_gem_restore_fences(dev);
 +      i915_gem_restore_fences(&dev_priv->drm);
 +}
  
 -      WARN_ON(i915_verify_lists(dev));
 +static void nop_submit_request(struct drm_i915_gem_request *request)
 +{
  }
  
 -/**
 - * This function clears the request list as sequence numbers are passed.
 - * @engine: engine to retire requests on
 - */
 -void
 -i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
 +static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
  {
 -      WARN_ON(i915_verify_lists(engine->dev));
 +      engine->submit_request = nop_submit_request;
  
 -      /* Retire requests first as we use it above for the early return.
 -       * If we retire requests last, we may use a later seqno and so clear
 -       * the requests lists without clearing the active list, leading to
 -       * confusion.
 +      /* Mark all pending requests as complete so that any concurrent
 +       * (lockless) lookup doesn't try and wait upon the request as we
 +       * reset it.
         */
 -      while (!list_empty(&engine->request_list)) {
 -              struct drm_i915_gem_request *request;
 -
 -              request = list_first_entry(&engine->request_list,
 -                                         struct drm_i915_gem_request,
 -                                         list);
 -
 -              if (!i915_gem_request_completed(request))
 -                      break;
 -
 -              i915_gem_request_retire(request);
 -      }
 +      intel_engine_init_seqno(engine, engine->last_submitted_seqno);
  
 -      /* Move any buffers on the active list that are no longer referenced
 -       * by the ringbuffer to the flushing/inactive lists as appropriate,
 -       * before we free the context associated with the requests.
 +      /*
 +       * Clear the execlists queue up before freeing the requests, as those
 +       * are the ones that keep the context and ringbuffer backing objects
 +       * pinned in place.
         */
 -      while (!list_empty(&engine->active_list)) {
 -              struct drm_i915_gem_object *obj;
 -
 -              obj = list_first_entry(&engine->active_list,
 -                                     struct drm_i915_gem_object,
 -                                     engine_list[engine->id]);
 -
 -              if (!list_empty(&obj->last_read_req[engine->id]->list))
 -                      break;
  
 -              i915_gem_object_retire__read(obj, engine->id);
 +      if (i915.enable_execlists) {
 +              spin_lock(&engine->execlist_lock);
 +              INIT_LIST_HEAD(&engine->execlist_queue);
 +              i915_gem_request_put(engine->execlist_port[0].request);
 +              i915_gem_request_put(engine->execlist_port[1].request);
 +              memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
 +              spin_unlock(&engine->execlist_lock);
        }
  
 -      WARN_ON(i915_verify_lists(engine->dev));
 +      engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
  }
  
 -void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 +void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
  {
        struct intel_engine_cs *engine;
  
index 497d99b8846883692ea0be1bb818b29319a5c15e,175595fc3e45d239ce528662696167bac4c73327..8d4c35d55b1bbd4e2848b5bf4d69a0abbdd05cbc
@@@ -3599,26 -3204,24 +3599,28 @@@ void intel_finish_reset(struct drm_i915
         */
        intel_complete_page_flips(dev_priv);
  
 -      /* no reset support for gen2 */
 -      if (IS_GEN2(dev_priv))
 -              return;
 +      dev_priv->modeset_restore_state = NULL;
  
+       dev_priv->modeset_restore_state = NULL;
        /* reset doesn't touch the display */
 -      if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
 -              /*
 -               * Flips in the rings have been nuked by the reset,
 -               * so update the base address of all primary
 -               * planes to the the last fb to make sure we're
 -               * showing the correct fb after a reset.
 -               *
 -               * FIXME: Atomic will make this obsolete since we won't schedule
 -               * CS-based flips (which might get lost in gpu resets) any more.
 -               */
 -              intel_update_primary_planes(dev);
 +      if (!gpu_reset_clobbers_display(dev_priv)) {
 +              if (!state) {
 +                      /*
 +                       * Flips in the rings have been nuked by the reset,
 +                       * so update the base address of all primary
 +                       * planes to the the last fb to make sure we're
 +                       * showing the correct fb after a reset.
 +                       *
 +                       * FIXME: Atomic will make this obsolete since we won't schedule
 +                       * CS-based flips (which might get lost in gpu resets) any more.
 +                       */
 +                      intel_update_primary_planes(dev);
 +              } else {
 +                      ret = __intel_display_resume(dev, state);
 +                      if (ret)
 +                              DRM_ERROR("Restoring old state failed with %i\n", ret);
 +              }
        } else {
                /*
                 * The display has been reset as well,
index 9df29f1cb16af029a7bcbdd93a56562be464a2ef,462056e4b9e48b80cd5bf30ed618b12c27ae6c97..4e1ae3fc462dc65591d2fa5b3f6dffe3ee8a4ad4
@@@ -77,8 -77,7 +77,10 @@@ static void ipu_crtc_atomic_disable(str
        }
        spin_unlock_irq(&crtc->dev->event_lock);
  
 +      /* always disable planes on the CRTC */
 +      drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
++
+       drm_crtc_vblank_off(crtc);
  }
  
  static void imx_drm_crtc_reset(struct drm_crtc *crtc)
Simple merge
index 3ac14cd1e5b9a023666a3b576a8ed73de312dc0d,880d6a9af7c8d28dae1beb87492e849702709fb0..b6a0f37a65f30cad9f85d5773fca802e9ea3bb6b
@@@ -385,13 -394,7 +402,14 @@@ int msm_ioctl_gem_submit(struct drm_dev
        if (ret)
                return ret;
  
 +      if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
 +              out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
 +              if (out_fence_fd < 0) {
 +                      ret = out_fence_fd;
 +                      goto out_unlock;
 +              }
 +      }
+       priv->struct_mutex_task = current;
  
        submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
        if (!submit) {
@@@ -529,8 -487,7 +547,9 @@@ out
        if (ret)
                msm_gem_submit_free(submit);
  out_unlock:
 +      if (ret && (out_fence_fd >= 0))
 +              put_unused_fd(out_fence_fd);
+       priv->struct_mutex_task = NULL;
        mutex_unlock(&dev->struct_mutex);
        return ret;
  }
Simple merge
Simple merge
Simple merge