enum cxlflash_state {
STATE_NORMAL, /* Normal running state, everything good */
- STATE_LIMBO, /* Limbo running state, trying to reset/recover */
+ STATE_RESET, /* Reset state, trying to reset/recover */
STATE_FAILTERM /* Failed/terminating state, error out users/threads */
};
wait_queue_head_t tmf_waitq;
bool tmf_active;
- wait_queue_head_t limbo_waitq;
+ wait_queue_head_t reset_waitq;
enum cxlflash_state state;
};
spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
switch (cfg->state) {
- case STATE_LIMBO:
- dev_dbg_ratelimited(&cfg->dev->dev, "%s: device in limbo!\n",
+ case STATE_RESET:
+ dev_dbg_ratelimited(&cfg->dev->dev, "%s: device is in reset!\n",
__func__);
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
if (unlikely(rcr))
rc = FAILED;
break;
- case STATE_LIMBO:
- wait_event(cfg->limbo_waitq, cfg->state != STATE_LIMBO);
+ case STATE_RESET:
+ wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
if (cfg->state == STATE_NORMAL)
break;
/* fall through */
switch (cfg->state) {
case STATE_NORMAL:
- cfg->state = STATE_LIMBO;
+ cfg->state = STATE_RESET;
scsi_block_requests(cfg->host);
cxlflash_mark_contexts_error(cfg);
rcr = cxlflash_afu_reset(cfg);
cfg->state = STATE_FAILTERM;
} else
cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->limbo_waitq);
+ wake_up_all(&cfg->reset_waitq);
scsi_unblock_requests(cfg->host);
break;
- case STATE_LIMBO:
- wait_event(cfg->limbo_waitq, cfg->state != STATE_LIMBO);
+ case STATE_RESET:
+ wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
if (cfg->state == STATE_NORMAL)
break;
/* fall through */
struct pci_dev *pdev = cfg->dev;
if (pci_channel_offline(pdev))
- wait_event_timeout(cfg->limbo_waitq,
+ wait_event_timeout(cfg->reset_waitq,
!pci_channel_offline(pdev),
CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
}
cfg->mcctx = NULL;
init_waitqueue_head(&cfg->tmf_waitq);
- init_waitqueue_head(&cfg->limbo_waitq);
+ init_waitqueue_head(&cfg->reset_waitq);
INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
cfg->lr_state = LINK_RESET_INVALID;
switch (state) {
case pci_channel_io_frozen:
- cfg->state = STATE_LIMBO;
+ cfg->state = STATE_RESET;
scsi_block_requests(cfg->host);
drain_ioctls(cfg);
rc = cxlflash_mark_contexts_error(cfg);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
cfg->state = STATE_FAILTERM;
- wake_up_all(&cfg->limbo_waitq);
+ wake_up_all(&cfg->reset_waitq);
scsi_unblock_requests(cfg->host);
return PCI_ERS_RESULT_DISCONNECT;
default:
dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->limbo_waitq);
+ wake_up_all(&cfg->reset_waitq);
scsi_unblock_requests(cfg->host);
}
dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
__func__);
- wake_up_all(&cfg->limbo_waitq);
+ wake_up_all(&cfg->reset_waitq);
ssleep(1);
}
}
retry:
switch (cfg->state) {
- case STATE_LIMBO:
- dev_dbg(dev, "%s: Limbo state, going to wait...\n", __func__);
+ case STATE_RESET:
+ dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
up_read(&cfg->ioctl_rwsem);
- rc = wait_event_interruptible(cfg->limbo_waitq,
- cfg->state != STATE_LIMBO);
+ rc = wait_event_interruptible(cfg->reset_waitq,
+ cfg->state != STATE_RESET);
down_read(&cfg->ioctl_rwsem);
if (unlikely(rc))
break;
* quite possible for this routine to act as the kernel's EEH detection
* source (MMIO read of mbox_r). Because of this, there is a window of
* time where an EEH might have been detected but not yet 'serviced'
- * (callback invoked, causing the device to enter limbo state). To avoid
+ * (callback invoked, causing the device to enter reset state). To avoid
* looping in this routine during that window, a 1 second sleep is in place
* between the time the MMIO failure is detected and the time a wait on the
- * limbo wait queue is attempted via check_state().
+ * reset wait queue is attempted via check_state().
*
* Return: 0 on success, -errno on failure
*/