dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__,
interrupt_status);
- sci_base_state_machine_change_state(&scic->state_machine,
- SCI_BASE_CONTROLLER_STATE_FAILED);
+ sci_change_state(&scic->sm, SCIC_FAILED);
return;
}
{
struct isci_host *ihost = scic_to_ihost(scic);
- if (scic->state_machine.current_state_id ==
- SCI_BASE_CONTROLLER_STATE_STARTING) {
+ if (scic->sm.current_state_id == SCIC_STARTING) {
/*
* We move into the ready state, because some of the phys/ports
* may be up and operational.
*/
- sci_base_state_machine_change_state(&scic->state_machine,
- SCI_BASE_CONTROLLER_STATE_READY);
+ sci_change_state(&scic->sm, SCIC_READY);
isci_host_start_complete(ihost, status);
}
{
enum scic_sds_phy_states state;
- state = sci_phy->state_machine.current_state_id;
+ state = sci_phy->sm.current_state_id;
switch (state) {
- case SCI_BASE_PHY_STATE_STARTING:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL:
+ case SCI_PHY_STARTING:
+ case SCI_PHY_SUB_INITIAL:
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_IAF_UF:
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+ case SCI_PHY_SUB_FINAL:
return true;
default:
return false;
for (index = 0; index < SCI_MAX_PHYS; index++) {
sci_phy = &ihost->phys[index].sci;
- state = sci_phy->state_machine.current_state_id;
+ state = sci_phy->sm.current_state_id;
if (!phy_get_non_dummy_port(sci_phy))
continue;
* - have an indication of a connected device and it has
* finished the link training process.
*/
- if ((sci_phy->is_in_link_training == false &&
- state == SCI_BASE_PHY_STATE_INITIAL) ||
- (sci_phy->is_in_link_training == false &&
- state == SCI_BASE_PHY_STATE_STOPPED) ||
- (sci_phy->is_in_link_training == true &&
- is_phy_starting(sci_phy))) {
+ if ((sci_phy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
+ (sci_phy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
+ (sci_phy->is_in_link_training == true && is_phy_starting(sci_phy))) {
is_controller_start_complete = false;
break;
}
enum sci_status result;
u16 index;
- if (scic->state_machine.current_state_id !=
- SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
+ if (scic->sm.current_state_id != SCIC_INITIALIZED) {
dev_warn(scic_to_dev(scic),
"SCIC Controller start operation requested in "
"invalid state\n");
sci_mod_timer(&scic->timer, timeout);
- sci_base_state_machine_change_state(&scic->state_machine,
- SCI_BASE_CONTROLLER_STATE_STARTING);
+ sci_change_state(&scic->sm, SCIC_STARTING);
return SCI_SUCCESS;
}
static enum sci_status scic_controller_stop(struct scic_sds_controller *scic,
u32 timeout)
{
- if (scic->state_machine.current_state_id !=
- SCI_BASE_CONTROLLER_STATE_READY) {
+ if (scic->sm.current_state_id != SCIC_READY) {
dev_warn(scic_to_dev(scic),
"SCIC Controller stop operation requested in "
"invalid state\n");
}
sci_mod_timer(&scic->timer, timeout);
- sci_base_state_machine_change_state(&scic->state_machine,
- SCI_BASE_CONTROLLER_STATE_STOPPING);
+ sci_change_state(&scic->sm, SCIC_STOPPING);
return SCI_SUCCESS;
}
*/
static enum sci_status scic_controller_reset(struct scic_sds_controller *scic)
{
- switch (scic->state_machine.current_state_id) {
- case SCI_BASE_CONTROLLER_STATE_RESET:
- case SCI_BASE_CONTROLLER_STATE_READY:
- case SCI_BASE_CONTROLLER_STATE_STOPPED:
- case SCI_BASE_CONTROLLER_STATE_FAILED:
+ switch (scic->sm.current_state_id) {
+ case SCIC_RESET:
+ case SCIC_READY:
+ case SCIC_STOPPED:
+ case SCIC_FAILED:
/*
* The reset operation is not a graceful cleanup, just
* perform the state transition.
*/
- sci_base_state_machine_change_state(&scic->state_machine,
- SCI_BASE_CONTROLLER_STATE_RESETTING);
+ sci_change_state(&scic->sm, SCIC_RESETTING);
return SCI_SUCCESS;
default:
dev_warn(scic_to_dev(scic),
static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
+ struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
- sci_base_state_machine_change_state(&scic->state_machine,
- SCI_BASE_CONTROLLER_STATE_RESET);
+ sci_change_state(&scic->sm, SCIC_RESET);
}
static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
+ struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
sci_del_timer(&scic->timer);
}
static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
+ struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
/* set the default interrupt coalescence number and timeout value. */
scic_controller_set_interrupt_coalescence(scic, 0x10, 250);
static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
+ struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
/* disable interrupt coalescence. */
scic_controller_set_interrupt_coalescence(scic, 0, 0);
static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
+ struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
/* Stop all of the components for this controller */
scic_sds_controller_stop_phys(scic);
static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
+ struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
sci_del_timer(&scic->timer);
}
static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
+ struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
scic_sds_controller_reset_hardware(scic);
- sci_base_state_machine_change_state(&scic->state_machine,
- SCI_BASE_CONTROLLER_STATE_RESET);
+ sci_change_state(&scic->sm, SCIC_RESET);
}
static const struct sci_base_state scic_sds_controller_state_table[] = {
- [SCI_BASE_CONTROLLER_STATE_INITIAL] = {
+ [SCIC_INITIAL] = {
.enter_state = scic_sds_controller_initial_state_enter,
},
- [SCI_BASE_CONTROLLER_STATE_RESET] = {},
- [SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {},
- [SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {},
- [SCI_BASE_CONTROLLER_STATE_STARTING] = {
+ [SCIC_RESET] = {},
+ [SCIC_INITIALIZING] = {},
+ [SCIC_INITIALIZED] = {},
+ [SCIC_STARTING] = {
.exit_state = scic_sds_controller_starting_state_exit,
},
- [SCI_BASE_CONTROLLER_STATE_READY] = {
+ [SCIC_READY] = {
.enter_state = scic_sds_controller_ready_state_enter,
.exit_state = scic_sds_controller_ready_state_exit,
},
- [SCI_BASE_CONTROLLER_STATE_RESETTING] = {
+ [SCIC_RESETTING] = {
.enter_state = scic_sds_controller_resetting_state_enter,
},
- [SCI_BASE_CONTROLLER_STATE_STOPPING] = {
+ [SCIC_STOPPING] = {
.enter_state = scic_sds_controller_stopping_state_enter,
.exit_state = scic_sds_controller_stopping_state_exit,
},
- [SCI_BASE_CONTROLLER_STATE_STOPPED] = {},
- [SCI_BASE_CONTROLLER_STATE_FAILED] = {}
+ [SCIC_STOPPED] = {},
+ [SCIC_FAILED] = {}
};
static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic)
struct sci_timer *tmr = (struct sci_timer *)data;
struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), timer);
struct isci_host *ihost = scic_to_ihost(scic);
- struct sci_base_state_machine *sm = &scic->state_machine;
+ struct sci_base_state_machine *sm = &scic->sm;
unsigned long flags;
spin_lock_irqsave(&ihost->scic_lock, flags);
if (tmr->cancel)
goto done;
- if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STARTING)
+ if (sm->current_state_id == SCIC_STARTING)
scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT);
- else if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STOPPING) {
- sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_FAILED);
+ else if (sm->current_state_id == SCIC_STOPPING) {
+ sci_change_state(sm, SCIC_FAILED);
isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
} else /* / @todo Now what do we want to do in this case? */
dev_err(scic_to_dev(scic),
struct isci_host *ihost = scic_to_ihost(scic);
u8 i;
- sci_base_state_machine_construct(&scic->state_machine,
+ sci_base_state_machine_construct(&scic->sm,
scic_sds_controller_state_table,
- SCI_BASE_CONTROLLER_STATE_INITIAL);
+ SCIC_INITIAL);
- sci_base_state_machine_start(&scic->state_machine);
+ sci_base_state_machine_start(&scic->sm);
scic->scu_registers = scu_base;
scic->smu_registers = smu_base;
static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic,
union scic_oem_parameters *scic_parms)
{
- u32 state = scic->state_machine.current_state_id;
+ u32 state = scic->sm.current_state_id;
- if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
- state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
- state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
+ if (state == SCIC_RESET ||
+ state == SCIC_INITIALIZING ||
+ state == SCIC_INITIALIZED) {
if (scic_oem_parameters_validate(&scic_parms->sds1))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
{
enum sci_status status = SCI_SUCCESS;
- if ((scic->state_machine.current_state_id ==
- SCI_BASE_CONTROLLER_STATE_INITIALIZING) ||
- (scic->state_machine.current_state_id ==
- SCI_BASE_CONTROLLER_STATE_INITIALIZED)) {
+ if ((scic->sm.current_state_id == SCIC_INITIALIZING) ||
+ (scic->sm.current_state_id == SCIC_INITIALIZED)) {
switch (operating_mode) {
case SCI_MODE_SPEED:
scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES;
static enum sci_status scic_controller_initialize(struct scic_sds_controller *scic)
{
- struct sci_base_state_machine *sm = &scic->state_machine;
+ struct sci_base_state_machine *sm = &scic->sm;
enum sci_status result = SCI_SUCCESS;
struct isci_host *ihost = scic_to_ihost(scic);
u32 index, state;
- if (scic->state_machine.current_state_id !=
- SCI_BASE_CONTROLLER_STATE_RESET) {
+ if (scic->sm.current_state_id != SCIC_RESET) {
dev_warn(scic_to_dev(scic),
"SCIC Controller initialize operation requested "
"in invalid state\n");
return SCI_FAILURE_INVALID_STATE;
}
- sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_INITIALIZING);
+ sci_change_state(sm, SCIC_INITIALIZING);
sci_init_timer(&scic->phy_timer, phy_startup_timeout);
/* Advance the controller state machine */
if (result == SCI_SUCCESS)
- state = SCI_BASE_CONTROLLER_STATE_INITIALIZED;
+ state = SCIC_INITIALIZED;
else
- state = SCI_BASE_CONTROLLER_STATE_FAILED;
- sci_base_state_machine_change_state(sm, state);
+ state = SCIC_FAILED;
+ sci_change_state(sm, state);
return result;
}
struct scic_sds_controller *scic,
union scic_user_parameters *scic_parms)
{
- u32 state = scic->state_machine.current_state_id;
+ u32 state = scic->sm.current_state_id;
- if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
- state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
- state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
+ if (state == SCIC_RESET ||
+ state == SCIC_INITIALIZING ||
+ state == SCIC_INITIALIZED) {
u16 index;
/*
void scic_sds_controller_link_up(struct scic_sds_controller *scic,
struct scic_sds_port *port, struct scic_sds_phy *phy)
{
- switch (scic->state_machine.current_state_id) {
- case SCI_BASE_CONTROLLER_STATE_STARTING:
+ switch (scic->sm.current_state_id) {
+ case SCIC_STARTING:
sci_del_timer(&scic->phy_timer);
scic->phy_startup_timer_pending = false;
scic->port_agent.link_up_handler(scic, &scic->port_agent,
port, phy);
scic_sds_controller_start_next_phy(scic);
break;
- case SCI_BASE_CONTROLLER_STATE_READY:
+ case SCIC_READY:
scic->port_agent.link_up_handler(scic, &scic->port_agent,
port, phy);
break;
dev_dbg(scic_to_dev(scic),
"%s: SCIC Controller linkup event from phy %d in "
"unexpected state %d\n", __func__, phy->phy_index,
- scic->state_machine.current_state_id);
+ scic->sm.current_state_id);
}
}
void scic_sds_controller_link_down(struct scic_sds_controller *scic,
struct scic_sds_port *port, struct scic_sds_phy *phy)
{
- switch (scic->state_machine.current_state_id) {
- case SCI_BASE_CONTROLLER_STATE_STARTING:
- case SCI_BASE_CONTROLLER_STATE_READY:
+ switch (scic->sm.current_state_id) {
+ case SCIC_STARTING:
+ case SCIC_READY:
scic->port_agent.link_down_handler(scic, &scic->port_agent,
port, phy);
break;
"unexpected state %d\n",
__func__,
phy->phy_index,
- scic->state_machine.current_state_id);
+ scic->sm.current_state_id);
}
}
for (index = 0; index < controller->remote_node_entries; index++) {
if ((controller->device_table[index] != NULL) &&
- (controller->device_table[index]->state_machine.current_state_id
- == SCI_BASE_REMOTE_DEVICE_STATE_STOPPING))
+ (controller->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
return true;
}
void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic,
struct scic_sds_remote_device *sci_dev)
{
- if (scic->state_machine.current_state_id !=
- SCI_BASE_CONTROLLER_STATE_STOPPING) {
+ if (scic->sm.current_state_id != SCIC_STOPPING) {
dev_dbg(scic_to_dev(scic),
"SCIC Controller 0x%p remote device stopped event "
"from device 0x%p in unexpected state %d\n",
scic, sci_dev,
- scic->state_machine.current_state_id);
+ scic->sm.current_state_id);
return;
}
if (!scic_sds_controller_has_remote_devices_stopping(scic)) {
- sci_base_state_machine_change_state(&scic->state_machine,
- SCI_BASE_CONTROLLER_STATE_STOPPED);
+ sci_change_state(&scic->sm, SCIC_STOPPED);
}
}
{
enum sci_status status;
- if (scic->state_machine.current_state_id !=
- SCI_BASE_CONTROLLER_STATE_READY) {
+ if (scic->sm.current_state_id != SCIC_READY) {
dev_warn(scic_to_dev(scic), "invalid state to start I/O");
return SCI_FAILURE_INVALID_STATE;
}
{
enum sci_status status;
- if (scic->state_machine.current_state_id !=
- SCI_BASE_CONTROLLER_STATE_READY) {
+ if (scic->sm.current_state_id != SCIC_READY) {
dev_warn(scic_to_dev(scic),
"invalid state to terminate request\n");
return SCI_FAILURE_INVALID_STATE;
enum sci_status status;
u16 index;
- switch (scic->state_machine.current_state_id) {
- case SCI_BASE_CONTROLLER_STATE_STOPPING:
+ switch (scic->sm.current_state_id) {
+ case SCIC_STOPPING:
/* XXX: Implement this function */
return SCI_FAILURE;
- case SCI_BASE_CONTROLLER_STATE_READY:
+ case SCIC_READY:
status = scic_sds_remote_device_complete_io(scic, rdev, request);
if (status != SCI_SUCCESS)
return status;
{
struct scic_sds_controller *scic = sci_req->owning_controller;
- if (scic->state_machine.current_state_id !=
- SCI_BASE_CONTROLLER_STATE_READY) {
+ if (scic->sm.current_state_id != SCIC_READY) {
dev_warn(scic_to_dev(scic), "invalid state to continue I/O");
return SCI_FAILURE_INVALID_STATE;
}
{
enum sci_status status;
- if (scic->state_machine.current_state_id !=
- SCI_BASE_CONTROLLER_STATE_READY) {
+ if (scic->sm.current_state_id != SCIC_READY) {
dev_warn(scic_to_dev(scic),
"%s: SCIC Controller starting task from invalid "
"state\n",
* This field contains the information for the base controller state
* machine.
*/
- struct sci_base_state_machine state_machine;
+ struct sci_base_state_machine sm;
/**
* Timer for controller start/stop operations.
/**
* Simply the initial state for the base controller state machine.
*/
- SCI_BASE_CONTROLLER_STATE_INITIAL = 0,
+ SCIC_INITIAL = 0,
/**
* This state indicates that the controller is reset. The memory for
* This state is entered from the INITIAL state.
* This state is entered from the RESETTING state.
*/
- SCI_BASE_CONTROLLER_STATE_RESET,
+ SCIC_RESET,
/**
* This state is typically an action state that indicates the controller
* are permitted.
* This state is entered from the RESET state.
*/
- SCI_BASE_CONTROLLER_STATE_INITIALIZING,
+ SCIC_INITIALIZING,
/**
* This state indicates that the controller has been successfully
* initialized. In this state no new IO operations are permitted.
* This state is entered from the INITIALIZING state.
*/
- SCI_BASE_CONTROLLER_STATE_INITIALIZED,
+ SCIC_INITIALIZED,
/**
* This state indicates the the controller is in the process of becoming
* ready (i.e. starting). In this state no new IO operations are permitted.
* This state is entered from the INITIALIZED state.
*/
- SCI_BASE_CONTROLLER_STATE_STARTING,
+ SCIC_STARTING,
/**
* This state indicates the controller is now ready. Thus, the user
* is able to perform IO operations on the controller.
* This state is entered from the STARTING state.
*/
- SCI_BASE_CONTROLLER_STATE_READY,
+ SCIC_READY,
/**
* This state is typically an action state that indicates the controller
* This state is entered from the FAILED state.
* This state is entered from the STOPPED state.
*/
- SCI_BASE_CONTROLLER_STATE_RESETTING,
+ SCIC_RESETTING,
/**
* This state indicates that the controller is in the process of stopping.
* operations are allowed to complete.
* This state is entered from the READY state.
*/
- SCI_BASE_CONTROLLER_STATE_STOPPING,
+ SCIC_STOPPING,
/**
* This state indicates that the controller has successfully been stopped.
* In this state no new IO operations are permitted.
* This state is entered from the STOPPING state.
*/
- SCI_BASE_CONTROLLER_STATE_STOPPED,
+ SCIC_STOPPED,
/**
* This state indicates that the controller could not successfully be
* This state is entered from the STOPPING state.
* This state is entered from the RESETTING state.
*/
- SCI_BASE_CONTROLLER_STATE_FAILED,
-
- SCI_BASE_CONTROLLER_MAX_STATES
-
+ SCIC_FAILED,
};
writel(0x1F4, &sci_phy->link_layer_registers->link_layer_hang_detection_timeout);
/* We can exit the initial state to the stopped state */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STOPPED);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STOPPED);
return SCI_SUCCESS;
}
__func__,
sci_phy);
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STARTING);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STARTING);
done:
spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
/*
* There is nothing that needs to be done in this state just
* transition to the stopped state. */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STOPPED);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STOPPED);
return SCI_SUCCESS;
}
enum sci_status scic_sds_phy_start(struct scic_sds_phy *sci_phy)
{
- enum scic_sds_phy_states state = sci_phy->state_machine.current_state_id;
+ enum scic_sds_phy_states state = sci_phy->sm.current_state_id;
- if (state != SCI_BASE_PHY_STATE_STOPPED) {
+ if (state != SCI_PHY_STOPPED) {
dev_dbg(sciphy_to_dev(sci_phy),
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
}
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STARTING);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STARTING);
return SCI_SUCCESS;
}
enum sci_status scic_sds_phy_stop(struct scic_sds_phy *sci_phy)
{
- enum scic_sds_phy_states state = sci_phy->state_machine.current_state_id;
+ enum scic_sds_phy_states state = sci_phy->sm.current_state_id;
switch (state) {
- case SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_OSSP_EN:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF:
- case SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL:
- case SCI_BASE_PHY_STATE_READY:
+ case SCI_PHY_SUB_INITIAL:
+ case SCI_PHY_SUB_AWAIT_OSSP_EN:
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+ case SCI_PHY_SUB_FINAL:
+ case SCI_PHY_READY:
break;
default:
dev_dbg(sciphy_to_dev(sci_phy),
return SCI_FAILURE_INVALID_STATE;
}
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STOPPED);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STOPPED);
return SCI_SUCCESS;
}
enum sci_status scic_sds_phy_reset(struct scic_sds_phy *sci_phy)
{
- enum scic_sds_phy_states state = sci_phy->state_machine.current_state_id;
+ enum scic_sds_phy_states state = sci_phy->sm.current_state_id;
- if (state != SCI_BASE_PHY_STATE_READY) {
+ if (state != SCI_PHY_READY) {
dev_dbg(sciphy_to_dev(sci_phy),
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
}
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_RESETTING);
+ sci_change_state(&sci_phy->sm, SCI_PHY_RESETTING);
return SCI_SUCCESS;
}
enum sci_status scic_sds_phy_consume_power_handler(struct scic_sds_phy *sci_phy)
{
- enum scic_sds_phy_states state = sci_phy->state_machine.current_state_id;
+ enum scic_sds_phy_states state = sci_phy->sm.current_state_id;
switch (state) {
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER: {
+ case SCI_PHY_SUB_AWAIT_SAS_POWER: {
u32 enable_spinup;
enable_spinup = readl(&sci_phy->link_layer_registers->notify_enable_spinup_control);
writel(enable_spinup, &sci_phy->link_layer_registers->notify_enable_spinup_control);
/* Change state to the final state this substate machine has run to completion */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL);
+ sci_change_state(&sci_phy->sm, SCI_PHY_SUB_FINAL);
return SCI_SUCCESS;
}
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER: {
+ case SCI_PHY_SUB_AWAIT_SATA_POWER: {
u32 scu_sas_pcfg_value;
/* Release the spinup hold state and reset the OOB state machine */
&sci_phy->link_layer_registers->phy_configuration);
/* Change state to the final state this substate machine has run to completion */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN);
+ sci_change_state(&sci_phy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN);
return SCI_SUCCESS;
}
writel(phy_control,
&sci_phy->link_layer_registers->phy_configuration);
- sci_base_state_machine_change_state(
- &sci_phy->state_machine,
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN
- );
+ sci_change_state(&sci_phy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
sci_phy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
}
static void scic_sds_phy_start_sata_link_training(
struct scic_sds_phy *sci_phy)
{
- sci_base_state_machine_change_state(
- &sci_phy->state_machine,
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER
- );
+ sci_change_state(&sci_phy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
sci_phy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
}
{
sci_phy->max_negotiated_speed = max_link_rate;
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- next_state);
+ sci_change_state(&sci_phy->sm, next_state);
}
enum sci_status scic_sds_phy_event_handler(struct scic_sds_phy *sci_phy,
u32 event_code)
{
- enum scic_sds_phy_states state = sci_phy->state_machine.current_state_id;
+ enum scic_sds_phy_states state = sci_phy->sm.current_state_id;
switch (state) {
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_OSSP_EN:
+ case SCI_PHY_SUB_AWAIT_OSSP_EN:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_SAS_PHY_DETECTED:
scic_sds_phy_start_sas_link_training(sci_phy);
return SCI_FAILURE;
}
return SCI_SUCCESS;
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_SAS_PHY_DETECTED:
/*
scic_sds_phy_complete_link_training(
sci_phy,
SAS_LINK_RATE_1_5_GBPS,
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF);
+ SCI_PHY_SUB_AWAIT_IAF_UF);
break;
case SCU_EVENT_SAS_30:
case SCU_EVENT_SAS_30_SSC:
scic_sds_phy_complete_link_training(
sci_phy,
SAS_LINK_RATE_3_0_GBPS,
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF);
+ SCI_PHY_SUB_AWAIT_IAF_UF);
break;
case SCU_EVENT_SAS_60:
case SCU_EVENT_SAS_60_SSC:
scic_sds_phy_complete_link_training(
sci_phy,
SAS_LINK_RATE_6_0_GBPS,
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF);
+ SCI_PHY_SUB_AWAIT_IAF_UF);
break;
case SCU_EVENT_SATA_SPINUP_HOLD:
/*
break;
case SCU_EVENT_LINK_FAILURE:
/* Link failure change state back to the starting state */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STARTING);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STARTING);
break;
default:
dev_warn(sciphy_to_dev(sci_phy),
break;
}
return SCI_SUCCESS;
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF:
+ case SCI_PHY_SUB_AWAIT_IAF_UF:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_SAS_PHY_DETECTED:
/* Backup the state machine */
case SCU_EVENT_LINK_FAILURE:
case SCU_EVENT_HARD_RESET_RECEIVED:
/* Start the oob/sn state machine over again */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STARTING);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STARTING);
break;
default:
dev_warn(sciphy_to_dev(sci_phy),
return SCI_FAILURE;
}
return SCI_SUCCESS;
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER:
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_LINK_FAILURE:
/* Link failure change state back to the starting state */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STARTING);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STARTING);
break;
default:
dev_warn(sciphy_to_dev(sci_phy),
return SCI_FAILURE;
}
return SCI_SUCCESS;
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_LINK_FAILURE:
/* Link failure change state back to the starting state */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STARTING);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STARTING);
break;
case SCU_EVENT_SATA_SPINUP_HOLD:
/* These events are received every 10ms and are
return SCI_FAILURE;
}
return SCI_SUCCESS;
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN:
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_LINK_FAILURE:
/* Link failure change state back to the starting state */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STARTING);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STARTING);
break;
case SCU_EVENT_SATA_SPINUP_HOLD:
/* These events might be received since we dont know how many may be in
sci_phy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
/* We have received the SATA PHY notification change state */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN);
+ sci_change_state(&sci_phy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
break;
case SCU_EVENT_SAS_PHY_DETECTED:
/* There has been a change in the phy type before OOB/SN for the
return SCI_FAILURE;;
}
return SCI_SUCCESS;
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_SATA_PHY_DETECTED:
/*
scic_sds_phy_complete_link_training(
sci_phy,
SAS_LINK_RATE_1_5_GBPS,
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF);
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
break;
case SCU_EVENT_SATA_30:
case SCU_EVENT_SATA_30_SSC:
scic_sds_phy_complete_link_training(
sci_phy,
SAS_LINK_RATE_3_0_GBPS,
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF);
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
break;
case SCU_EVENT_SATA_60:
case SCU_EVENT_SATA_60_SSC:
scic_sds_phy_complete_link_training(
sci_phy,
SAS_LINK_RATE_6_0_GBPS,
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF);
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
break;
case SCU_EVENT_LINK_FAILURE:
/* Link failure change state back to the starting state */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STARTING);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STARTING);
break;
case SCU_EVENT_SAS_PHY_DETECTED:
/*
}
return SCI_SUCCESS;
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF:
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_SATA_PHY_DETECTED:
/* Backup the state machine */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN);
+ sci_change_state(&sci_phy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
break;
case SCU_EVENT_LINK_FAILURE:
/* Link failure change state back to the starting state */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STARTING);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STARTING);
break;
default:
return SCI_FAILURE;
}
return SCI_SUCCESS;
- case SCI_BASE_PHY_STATE_READY:
+ case SCI_PHY_READY:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_LINK_FAILURE:
/* Link failure change state back to the starting state */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STARTING);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STARTING);
break;
case SCU_EVENT_BROADCAST_CHANGE:
/* Broadcast change received. Notify the port. */
return SCI_FAILURE_INVALID_STATE;
}
return SCI_SUCCESS;
- case SCI_BASE_PHY_STATE_RESETTING:
+ case SCI_PHY_RESETTING:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_HARD_RESET_TRANSMITTED:
/* Link failure change state back to the starting state */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STARTING);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STARTING);
break;
default:
dev_warn(sciphy_to_dev(sci_phy),
enum sci_status scic_sds_phy_frame_handler(struct scic_sds_phy *sci_phy,
u32 frame_index)
{
- enum scic_sds_phy_states state = sci_phy->state_machine.current_state_id;
+ enum scic_sds_phy_states state = sci_phy->sm.current_state_id;
struct scic_sds_controller *scic = sci_phy->owning_port->owning_controller;
enum sci_status result;
switch (state) {
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF: {
+ case SCI_PHY_SUB_AWAIT_IAF_UF: {
u32 *frame_words;
struct sas_identify_frame iaf;
struct isci_phy *iphy = sci_phy_to_iphy(sci_phy);
* state since there are no power requirements for
* expander phys.
*/
- state = SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL;
+ state = SCI_PHY_SUB_FINAL;
} else {
/* We got the IAF we can now go to the await spinup
* semaphore state
*/
- state = SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER;
+ state = SCI_PHY_SUB_AWAIT_SAS_POWER;
}
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- state);
+ sci_change_state(&sci_phy->sm, state);
result = SCI_SUCCESS;
} else
dev_warn(sciphy_to_dev(sci_phy),
scic_sds_controller_release_frame(scic, frame_index);
return result;
}
- case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF: {
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: {
struct dev_to_host_fis *frame_header;
u32 *fis_frame_data;
struct isci_phy *iphy = sci_phy_to_iphy(sci_phy);
fis_frame_data);
/* got IAF we can now go to the await spinup semaphore state */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL);
+ sci_change_state(&sci_phy->sm, SCI_PHY_SUB_FINAL);
result = SCI_SUCCESS;
} else
static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
/* This is just an temporary state go off to the starting state */
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_OSSP_EN);
+ sci_change_state(&sci_phy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN);
}
static void scic_sds_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
struct scic_sds_controller *scic = sci_phy->owning_port->owning_controller;
scic_sds_controller_power_control_queue_insert(scic, sci_phy);
static void scic_sds_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
struct scic_sds_controller *scic = sci_phy->owning_port->owning_controller;
scic_sds_controller_power_control_queue_remove(scic, sci_phy);
static void scic_sds_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
struct scic_sds_controller *scic = sci_phy->owning_port->owning_controller;
scic_sds_controller_power_control_queue_insert(scic, sci_phy);
static void scic_sds_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
struct scic_sds_controller *scic = sci_phy->owning_port->owning_controller;
scic_sds_controller_power_control_queue_remove(scic, sci_phy);
static void scic_sds_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
sci_mod_timer(&sci_phy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
}
static void scic_sds_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
sci_del_timer(&sci_phy->sata_timer);
}
static void scic_sds_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
sci_mod_timer(&sci_phy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
}
static void scic_sds_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
sci_del_timer(&sci_phy->sata_timer);
}
static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
if (scic_sds_port_link_detected(sci_phy->owning_port, sci_phy)) {
static void scic_sds_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
sci_del_timer(&sci_phy->sata_timer);
}
static void scic_sds_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
/* State machine has run to completion so exit out and change
* the base state machine to the ready state
*/
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_READY);
+ sci_change_state(&sci_phy->sm, SCI_PHY_READY);
}
/**
static void scic_sds_phy_stopped_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
/*
* @todo We need to get to the controller to place this PE in a
scu_link_layer_stop_protocol_engine(sci_phy);
- if (sci_phy->state_machine.previous_state_id != SCI_BASE_PHY_STATE_INITIAL)
+ if (sci_phy->sm.previous_state_id != SCI_PHY_INITIAL)
scic_sds_controller_link_down(scic_sds_phy_get_controller(sci_phy),
phy_get_non_dummy_port(sci_phy),
sci_phy);
static void scic_sds_phy_starting_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
scu_link_layer_stop_protocol_engine(sci_phy);
scu_link_layer_start_oob(sci_phy);
sci_phy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
sci_phy->bcn_received_while_port_unassigned = false;
- if (sci_phy->state_machine.previous_state_id == SCI_BASE_PHY_STATE_READY)
+ if (sci_phy->sm.previous_state_id == SCI_PHY_READY)
scic_sds_controller_link_down(scic_sds_phy_get_controller(sci_phy),
phy_get_non_dummy_port(sci_phy),
sci_phy);
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL);
+ sci_change_state(&sci_phy->sm, SCI_PHY_SUB_INITIAL);
}
static void scic_sds_phy_ready_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
scic_sds_controller_link_up(scic_sds_phy_get_controller(sci_phy),
phy_get_non_dummy_port(sci_phy),
static void scic_sds_phy_ready_state_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
scic_sds_phy_suspend(sci_phy);
}
static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), state_machine);
+ struct scic_sds_phy *sci_phy = container_of(sm, typeof(*sci_phy), sm);
/* The phy is being reset, therefore deactivate it from the port. In
* the resetting state we don't notify the user regarding link up and
/* The SCU does not need to have a discrete reset state so
* just go back to the starting state.
*/
- sci_base_state_machine_change_state(&sci_phy->state_machine,
- SCI_BASE_PHY_STATE_STARTING);
+ sci_change_state(&sci_phy->sm, SCI_PHY_STARTING);
}
}
static const struct sci_base_state scic_sds_phy_state_table[] = {
- [SCI_BASE_PHY_STATE_INITIAL] = { },
- [SCI_BASE_PHY_STATE_STOPPED] = {
+ [SCI_PHY_INITIAL] = { },
+ [SCI_PHY_STOPPED] = {
.enter_state = scic_sds_phy_stopped_state_enter,
},
- [SCI_BASE_PHY_STATE_STARTING] = {
+ [SCI_PHY_STARTING] = {
.enter_state = scic_sds_phy_starting_state_enter,
},
- [SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL] = {
+ [SCI_PHY_SUB_INITIAL] = {
.enter_state = scic_sds_phy_starting_initial_substate_enter,
},
- [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_OSSP_EN] = { },
- [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN] = { },
- [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF] = { },
- [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER] = {
+ [SCI_PHY_SUB_AWAIT_OSSP_EN] = { },
+ [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { },
+ [SCI_PHY_SUB_AWAIT_IAF_UF] = { },
+ [SCI_PHY_SUB_AWAIT_SAS_POWER] = {
.enter_state = scic_sds_phy_starting_await_sas_power_substate_enter,
.exit_state = scic_sds_phy_starting_await_sas_power_substate_exit,
},
- [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER] = {
+ [SCI_PHY_SUB_AWAIT_SATA_POWER] = {
.enter_state = scic_sds_phy_starting_await_sata_power_substate_enter,
.exit_state = scic_sds_phy_starting_await_sata_power_substate_exit
},
- [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN] = {
+ [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = {
.enter_state = scic_sds_phy_starting_await_sata_phy_substate_enter,
.exit_state = scic_sds_phy_starting_await_sata_phy_substate_exit
},
- [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN] = {
+ [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = {
.enter_state = scic_sds_phy_starting_await_sata_speed_substate_enter,
.exit_state = scic_sds_phy_starting_await_sata_speed_substate_exit
},
- [SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF] = {
+ [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = {
.enter_state = scic_sds_phy_starting_await_sig_fis_uf_substate_enter,
.exit_state = scic_sds_phy_starting_await_sig_fis_uf_substate_exit
},
- [SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL] = {
+ [SCI_PHY_SUB_FINAL] = {
.enter_state = scic_sds_phy_starting_final_substate_enter,
},
- [SCI_BASE_PHY_STATE_READY] = {
+ [SCI_PHY_READY] = {
.enter_state = scic_sds_phy_ready_state_enter,
.exit_state = scic_sds_phy_ready_state_exit,
},
- [SCI_BASE_PHY_STATE_RESETTING] = {
+ [SCI_PHY_RESETTING] = {
.enter_state = scic_sds_phy_resetting_state_enter,
},
- [SCI_BASE_PHY_STATE_FINAL] = { },
+ [SCI_PHY_FINAL] = { },
};
void scic_sds_phy_construct(struct scic_sds_phy *sci_phy,
struct scic_sds_port *owning_port, u8 phy_index)
{
- sci_base_state_machine_construct(&sci_phy->state_machine,
+ sci_base_state_machine_construct(&sci_phy->sm,
scic_sds_phy_state_table,
- SCI_BASE_PHY_STATE_INITIAL);
+ SCI_PHY_INITIAL);
- sci_base_state_machine_start(&sci_phy->state_machine);
+ sci_base_state_machine_start(&sci_phy->sm);
/* Copy the rest of the input data to our locals */
sci_phy->owning_port = owning_port;
/**
* This field contains the information for the base phy state machine.
*/
- struct sci_base_state_machine state_machine;
+ struct sci_base_state_machine sm;
/**
* This field specifies the port object that owns/contains this phy.
/**
* Simply the initial state for the base domain state machine.
*/
- SCI_BASE_PHY_STATE_INITIAL,
+ SCI_PHY_INITIAL,
/**
* This state indicates that the phy has successfully been stopped.
* This state is entered from the READY state.
* This state is entered from the RESETTING state.
*/
- SCI_BASE_PHY_STATE_STOPPED,
+ SCI_PHY_STOPPED,
/**
* This state indicates that the phy is in the process of becomming
* This state is entered from the READY state.
* This state is entered from the RESETTING state.
*/
- SCI_BASE_PHY_STATE_STARTING,
+ SCI_PHY_STARTING,
/**
* Initial state
*/
- SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL,
+ SCI_PHY_SUB_INITIAL,
/**
* Wait state for the hardware OSSP event type notification
*/
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_OSSP_EN,
+ SCI_PHY_SUB_AWAIT_OSSP_EN,
/**
* Wait state for the PHY speed notification
*/
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN,
+ SCI_PHY_SUB_AWAIT_SAS_SPEED_EN,
/**
* Wait state for the IAF Unsolicited frame notification
*/
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF,
+ SCI_PHY_SUB_AWAIT_IAF_UF,
/**
* Wait state for the request to consume power
*/
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER,
+ SCI_PHY_SUB_AWAIT_SAS_POWER,
/**
* Wait state for request to consume power
*/
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER,
+ SCI_PHY_SUB_AWAIT_SATA_POWER,
/**
* Wait state for the SATA PHY notification
*/
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN,
+ SCI_PHY_SUB_AWAIT_SATA_PHY_EN,
/**
* Wait for the SATA PHY speed notification
*/
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN,
+ SCI_PHY_SUB_AWAIT_SATA_SPEED_EN,
/**
* Wait state for the SIGNATURE FIS unsolicited frame notification
*/
- SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF,
/**
* Exit state for this state machine
*/
- SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL,
+ SCI_PHY_SUB_FINAL,
/**
* This state indicates the the phy is now ready. Thus, the user
* is currently part of a valid port.
* This state is entered from the STARTING state.
*/
- SCI_BASE_PHY_STATE_READY,
+ SCI_PHY_READY,
/**
* This state indicates that the phy is in the process of being reset.
* In this state no new IO operations are permitted on this phy.
* This state is entered from the READY state.
*/
- SCI_BASE_PHY_STATE_RESETTING,
+ SCI_PHY_RESETTING,
/**
* Simply the final state for the base phy state machine.
*/
- SCI_BASE_PHY_STATE_FINAL,
+ SCI_PHY_FINAL,
};
/**
static bool is_port_ready_state(enum scic_sds_port_states state)
{
switch (state) {
- case SCI_BASE_PORT_STATE_READY:
- case SCIC_SDS_PORT_READY_SUBSTATE_WAITING:
- case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL:
- case SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING:
+ case SCI_PORT_READY:
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ case SCI_PORT_SUB_CONFIGURING:
return true;
default:
return false;
static void port_state_machine_change(struct scic_sds_port *sci_port,
enum scic_sds_port_states state)
{
- struct sci_base_state_machine *sm = &sci_port->state_machine;
+ struct sci_base_state_machine *sm = &sci_port->sm;
enum scic_sds_port_states old_state = sm->current_state_id;
if (is_port_ready_state(old_state) && !is_port_ready_state(state))
sci_port->ready_exit = true;
- sci_base_state_machine_change_state(sm, state);
+ sci_change_state(sm, state);
sci_port->ready_exit = false;
}
if ((phy_sas_address.high == port_sas_address.high &&
phy_sas_address.low == port_sas_address.low) ||
sci_port->active_phy_mask == 0) {
- struct sci_base_state_machine *sm = &sci_port->state_machine;
+ struct sci_base_state_machine *sm = &sci_port->sm;
scic_sds_port_activate_phy(sci_port, sci_phy, do_notify_user);
- if (sm->current_state_id == SCI_BASE_PORT_STATE_RESETTING)
- port_state_machine_change(sci_port, SCI_BASE_PORT_STATE_READY);
+ if (sm->current_state_id == SCI_PORT_RESETTING)
+ port_state_machine_change(sci_port, SCI_PORT_READY);
} else
scic_sds_port_invalid_link_up(sci_port, sci_phy);
}
if (tmr->cancel)
goto done;
- current_state = sci_base_state_machine_get_state(&sci_port->state_machine);
+ current_state = sci_port->sm.current_state_id;
- if (current_state == SCI_BASE_PORT_STATE_RESETTING) {
+ if (current_state == SCI_PORT_RESETTING) {
/* if the port is still in the resetting state then the timeout
* fired before the reset completed.
*/
- port_state_machine_change(sci_port, SCI_BASE_PORT_STATE_FAILED);
- } else if (current_state == SCI_BASE_PORT_STATE_STOPPED) {
+ port_state_machine_change(sci_port, SCI_PORT_FAILED);
+ } else if (current_state == SCI_PORT_STOPPED) {
/* if the port is stopped then the start request failed In this
* case stay in the stopped state.
*/
"%s: SCIC Port 0x%p failed to stop before tiemout.\n",
__func__,
sci_port);
- } else if (current_state == SCI_BASE_PORT_STATE_STOPPING) {
+ } else if (current_state == SCI_PORT_STOPPING) {
/* if the port is still stopping then the stop has not completed */
isci_port_stop_complete(sci_port->owning_controller,
sci_port,
static void scic_sds_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine);
+ struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm);
scic_sds_port_suspend_port_task_scheduler(sci_port);
if (sci_port->active_phy_mask != 0) {
/* At least one of the phys on the port is ready */
port_state_machine_change(sci_port,
- SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL);
+ SCI_PORT_SUB_OPERATIONAL);
}
}
static void scic_sds_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
{
u32 index;
- struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine);
+ struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm);
struct scic_sds_controller *scic = sci_port->owning_controller;
struct isci_host *ihost = scic_to_ihost(scic);
struct isci_port *iport = sci_port_to_iport(sci_port);
* @object: This is the object which is cast to a struct scic_sds_port object.
*
* This method will perform the actions required by the struct scic_sds_port on
- * exiting the SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL. This function reports
+ * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
* the port not ready and suspends the port task scheduler. none
*/
static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine);
+ struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm);
struct scic_sds_controller *scic = sci_port->owning_controller;
struct isci_host *ihost = scic_to_ihost(scic);
struct isci_port *iport = sci_port_to_iport(sci_port);
static void scic_sds_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine);
+ struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm);
struct scic_sds_controller *scic = sci_port->owning_controller;
struct isci_host *ihost = scic_to_ihost(scic);
struct isci_port *iport = sci_port_to_iport(sci_port);
isci_port_not_ready(ihost, iport);
port_state_machine_change(sci_port,
- SCIC_SDS_PORT_READY_SUBSTATE_WAITING);
+ SCI_PORT_SUB_WAITING);
} else if (sci_port->started_request_count == 0)
port_state_machine_change(sci_port,
- SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL);
+ SCI_PORT_SUB_OPERATIONAL);
}
static void scic_sds_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine);
+ struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm);
scic_sds_port_suspend_port_task_scheduler(sci_port);
if (sci_port->ready_exit)
enum scic_sds_port_states state;
u32 phy_mask;
- state = sci_port->state_machine.current_state_id;
- if (state != SCI_BASE_PORT_STATE_STOPPED) {
+ state = sci_port->sm.current_state_id;
+ if (state != SCI_PORT_STOPPED) {
dev_warn(sciport_to_dev(sci_port),
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
*/
if (scic_sds_port_is_phy_mask_valid(sci_port, phy_mask) == true) {
port_state_machine_change(sci_port,
- SCI_BASE_PORT_STATE_READY);
+ SCI_PORT_READY);
return SCI_SUCCESS;
}
{
enum scic_sds_port_states state;
- state = sci_port->state_machine.current_state_id;
+ state = sci_port->sm.current_state_id;
switch (state) {
- case SCI_BASE_PORT_STATE_STOPPED:
+ case SCI_PORT_STOPPED:
return SCI_SUCCESS;
- case SCIC_SDS_PORT_READY_SUBSTATE_WAITING:
- case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL:
- case SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING:
- case SCI_BASE_PORT_STATE_RESETTING:
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ case SCI_PORT_SUB_CONFIGURING:
+ case SCI_PORT_RESETTING:
port_state_machine_change(sci_port,
- SCI_BASE_PORT_STATE_STOPPING);
+ SCI_PORT_STOPPING);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(sci_port),
enum scic_sds_port_states state;
u32 phy_index;
- state = sci_port->state_machine.current_state_id;
- if (state != SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL) {
+ state = sci_port->sm.current_state_id;
+ if (state != SCI_PORT_SUB_OPERATIONAL) {
dev_warn(sciport_to_dev(sci_port),
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
sci_port->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
port_state_machine_change(sci_port,
- SCI_BASE_PORT_STATE_RESETTING);
+ SCI_PORT_RESETTING);
return SCI_SUCCESS;
}
enum sci_status status;
enum scic_sds_port_states state;
- state = sci_port->state_machine.current_state_id;
+ state = sci_port->sm.current_state_id;
switch (state) {
- case SCI_BASE_PORT_STATE_STOPPED: {
+ case SCI_PORT_STOPPED: {
struct sci_sas_address port_sas_address;
/* Read the port assigned SAS Address if there is one */
}
return scic_sds_port_set_phy(sci_port, sci_phy);
}
- case SCIC_SDS_PORT_READY_SUBSTATE_WAITING:
- case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL:
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
status = scic_sds_port_set_phy(sci_port, sci_phy);
if (status != SCI_SUCCESS)
scic_sds_port_general_link_up_handler(sci_port, sci_phy, true);
sci_port->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
- port_state_machine_change(sci_port, SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING);
+ port_state_machine_change(sci_port, SCI_PORT_SUB_CONFIGURING);
return status;
- case SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING:
+ case SCI_PORT_SUB_CONFIGURING:
status = scic_sds_port_set_phy(sci_port, sci_phy);
if (status != SCI_SUCCESS)
* the port.
*/
port_state_machine_change(sci_port,
- SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING);
+ SCI_PORT_SUB_CONFIGURING);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(sci_port),
enum sci_status status;
enum scic_sds_port_states state;
- state = sci_port->state_machine.current_state_id;
+ state = sci_port->sm.current_state_id;
switch (state) {
- case SCI_BASE_PORT_STATE_STOPPED:
+ case SCI_PORT_STOPPED:
return scic_sds_port_clear_phy(sci_port, sci_phy);
- case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL:
+ case SCI_PORT_SUB_OPERATIONAL:
status = scic_sds_port_clear_phy(sci_port, sci_phy);
if (status != SCI_SUCCESS)
return status;
scic_sds_port_deactivate_phy(sci_port, sci_phy, true);
sci_port->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
port_state_machine_change(sci_port,
- SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING);
+ SCI_PORT_SUB_CONFIGURING);
return SCI_SUCCESS;
- case SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING:
+ case SCI_PORT_SUB_CONFIGURING:
status = scic_sds_port_clear_phy(sci_port, sci_phy);
if (status != SCI_SUCCESS)
* the port
*/
port_state_machine_change(sci_port,
- SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING);
+ SCI_PORT_SUB_CONFIGURING);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(sci_port),
{
enum scic_sds_port_states state;
- state = sci_port->state_machine.current_state_id;
+ state = sci_port->sm.current_state_id;
switch (state) {
- case SCIC_SDS_PORT_READY_SUBSTATE_WAITING:
+ case SCI_PORT_SUB_WAITING:
/* Since this is the first phy going link up for the port we
* can just enable it and continue
*/
scic_sds_port_activate_phy(sci_port, sci_phy, true);
port_state_machine_change(sci_port,
- SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL);
+ SCI_PORT_SUB_OPERATIONAL);
return SCI_SUCCESS;
- case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL:
+ case SCI_PORT_SUB_OPERATIONAL:
scic_sds_port_general_link_up_handler(sci_port, sci_phy, true);
return SCI_SUCCESS;
- case SCI_BASE_PORT_STATE_RESETTING:
+ case SCI_PORT_RESETTING:
/* TODO We should make sure that the phy that has gone
* link up is the same one on which we sent the reset. It is
* possible that the phy on which we sent the reset is not the
{
enum scic_sds_port_states state;
- state = sci_port->state_machine.current_state_id;
+ state = sci_port->sm.current_state_id;
switch (state) {
- case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL:
+ case SCI_PORT_SUB_OPERATIONAL:
scic_sds_port_deactivate_phy(sci_port, sci_phy, true);
/* If there are no active phys left in the port, then
*/
if (sci_port->active_phy_mask == 0)
port_state_machine_change(sci_port,
- SCIC_SDS_PORT_READY_SUBSTATE_WAITING);
+ SCI_PORT_SUB_WAITING);
return SCI_SUCCESS;
- case SCI_BASE_PORT_STATE_RESETTING:
+ case SCI_PORT_RESETTING:
/* In the resetting state we don't notify the user regarding
* link up and link down notifications. */
scic_sds_port_deactivate_phy(sci_port, sci_phy, false);
{
enum scic_sds_port_states state;
- state = sci_port->state_machine.current_state_id;
+ state = sci_port->sm.current_state_id;
switch (state) {
- case SCIC_SDS_PORT_READY_SUBSTATE_WAITING:
+ case SCI_PORT_SUB_WAITING:
return SCI_FAILURE_INVALID_STATE;
- case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL:
+ case SCI_PORT_SUB_OPERATIONAL:
sci_port->started_request_count++;
return SCI_SUCCESS;
default:
{
enum scic_sds_port_states state;
- state = sci_port->state_machine.current_state_id;
+ state = sci_port->sm.current_state_id;
switch (state) {
- case SCI_BASE_PORT_STATE_STOPPED:
+ case SCI_PORT_STOPPED:
dev_warn(sciport_to_dev(sci_port),
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
- case SCI_BASE_PORT_STATE_STOPPING:
+ case SCI_PORT_STOPPING:
scic_sds_port_decrement_request_count(sci_port);
if (sci_port->started_request_count == 0)
port_state_machine_change(sci_port,
- SCI_BASE_PORT_STATE_STOPPED);
+ SCI_PORT_STOPPED);
break;
- case SCI_BASE_PORT_STATE_READY:
- case SCI_BASE_PORT_STATE_RESETTING:
- case SCI_BASE_PORT_STATE_FAILED:
- case SCIC_SDS_PORT_READY_SUBSTATE_WAITING:
- case SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL:
+ case SCI_PORT_READY:
+ case SCI_PORT_RESETTING:
+ case SCI_PORT_FAILED:
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
scic_sds_port_decrement_request_count(sci_port);
break;
- case SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING:
+ case SCI_PORT_SUB_CONFIGURING:
scic_sds_port_decrement_request_count(sci_port);
if (sci_port->started_request_count == 0) {
port_state_machine_change(sci_port,
- SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL);
+ SCI_PORT_SUB_OPERATIONAL);
}
break;
}
static void scic_sds_port_stopped_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine);
+ struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm);
- if (sci_port->state_machine.previous_state_id == SCI_BASE_PORT_STATE_STOPPING) {
+ if (sci_port->sm.previous_state_id == SCI_PORT_STOPPING) {
/*
* If we enter this state becasuse of a request to stop
* the port then we want to disable the hardwares port
static void scic_sds_port_stopped_state_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine);
+ struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm);
/* Enable and suspend the port task scheduler */
scic_sds_port_enable_port_task_scheduler(sci_port);
static void scic_sds_port_ready_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine);
+ struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm);
struct scic_sds_controller *scic = sci_port->owning_controller;
struct isci_host *ihost = scic_to_ihost(scic);
struct isci_port *iport = sci_port_to_iport(sci_port);
u32 prev_state;
- prev_state = sci_port->state_machine.previous_state_id;
- if (prev_state == SCI_BASE_PORT_STATE_RESETTING)
+ prev_state = sci_port->sm.previous_state_id;
+ if (prev_state == SCI_PORT_RESETTING)
isci_port_hard_reset_complete(iport, SCI_SUCCESS);
else
isci_port_not_ready(ihost, iport);
/* Start the ready substate machine */
port_state_machine_change(sci_port,
- SCIC_SDS_PORT_READY_SUBSTATE_WAITING);
+ SCI_PORT_SUB_WAITING);
}
static void scic_sds_port_resetting_state_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine);
+ struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm);
sci_del_timer(&sci_port->timer);
}
static void scic_sds_port_stopping_state_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine);
+ struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm);
sci_del_timer(&sci_port->timer);
static void scic_sds_port_failed_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), state_machine);
+ struct scic_sds_port *sci_port = container_of(sm, typeof(*sci_port), sm);
struct isci_port *iport = sci_port_to_iport(sci_port);
isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
/* --------------------------------------------------------------------------- */
static const struct sci_base_state scic_sds_port_state_table[] = {
- [SCI_BASE_PORT_STATE_STOPPED] = {
+ [SCI_PORT_STOPPED] = {
.enter_state = scic_sds_port_stopped_state_enter,
.exit_state = scic_sds_port_stopped_state_exit
},
- [SCI_BASE_PORT_STATE_STOPPING] = {
+ [SCI_PORT_STOPPING] = {
.exit_state = scic_sds_port_stopping_state_exit
},
- [SCI_BASE_PORT_STATE_READY] = {
+ [SCI_PORT_READY] = {
.enter_state = scic_sds_port_ready_state_enter,
},
- [SCIC_SDS_PORT_READY_SUBSTATE_WAITING] = {
+ [SCI_PORT_SUB_WAITING] = {
.enter_state = scic_sds_port_ready_substate_waiting_enter,
},
- [SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL] = {
+ [SCI_PORT_SUB_OPERATIONAL] = {
.enter_state = scic_sds_port_ready_substate_operational_enter,
.exit_state = scic_sds_port_ready_substate_operational_exit
},
- [SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING] = {
+ [SCI_PORT_SUB_CONFIGURING] = {
.enter_state = scic_sds_port_ready_substate_configuring_enter,
.exit_state = scic_sds_port_ready_substate_configuring_exit
},
- [SCI_BASE_PORT_STATE_RESETTING] = {
+ [SCI_PORT_RESETTING] = {
.exit_state = scic_sds_port_resetting_state_exit
},
- [SCI_BASE_PORT_STATE_FAILED] = {
+ [SCI_PORT_FAILED] = {
.enter_state = scic_sds_port_failed_state_enter,
}
};
void scic_sds_port_construct(struct scic_sds_port *sci_port, u8 index,
struct scic_sds_controller *scic)
{
- sci_base_state_machine_construct(&sci_port->state_machine,
+ sci_base_state_machine_construct(&sci_port->sm,
scic_sds_port_state_table,
- SCI_BASE_PORT_STATE_STOPPED);
+ SCI_PORT_STOPPED);
- sci_base_state_machine_start(&sci_port->state_machine);
+ sci_base_state_machine_start(&sci_port->sm);
sci_port->logical_port_index = SCIC_SDS_DUMMY_PORT;
sci_port->physical_port_index = index;
/**
* This field contains the information for the base port state machine.
*/
- struct sci_base_state_machine state_machine;
+ struct sci_base_state_machine sm;
bool ready_exit;
* In this state no new IO operations are permitted.
* This state is entered from the STOPPING state.
*/
- SCI_BASE_PORT_STATE_STOPPED,
+ SCI_PORT_STOPPED,
/**
* This state indicates that the port is in the process of stopping.
* operations are allowed to complete.
* This state is entered from the READY state.
*/
- SCI_BASE_PORT_STATE_STOPPING,
+ SCI_PORT_STOPPING,
/**
* This state indicates the port is now ready. Thus, the user is
* able to perform IO operations on this port.
* This state is entered from the STARTING state.
*/
- SCI_BASE_PORT_STATE_READY,
+ SCI_PORT_READY,
/**
* The substate where the port is started and ready but has no
* active phys.
*/
- SCIC_SDS_PORT_READY_SUBSTATE_WAITING,
+ SCI_PORT_SUB_WAITING,
/**
* The substate where the port is started and ready and there is
* at least one phy operational.
*/
- SCIC_SDS_PORT_READY_SUBSTATE_OPERATIONAL,
+ SCI_PORT_SUB_OPERATIONAL,
/**
* The substate where the port is started and there was an
* add/remove phy event. This state is only used in Automatic
* Port Configuration Mode (APC)
*/
- SCIC_SDS_PORT_READY_SUBSTATE_CONFIGURING,
+ SCI_PORT_SUB_CONFIGURING,
/**
* This state indicates the port is in the process of performing a hard
* port.
* This state is entered from the READY state.
*/
- SCI_BASE_PORT_STATE_RESETTING,
+ SCI_PORT_RESETTING,
/**
* This state indicates the port has failed a reset request. This state
* is entered when a port reset request times out.
* This state is entered from the RESETTING state.
*/
- SCI_BASE_PORT_STATE_FAILED,
+ SCI_PORT_FAILED,
};
scic_sds_apc_agent_configure_ports(scic, port_agent, sci_phy, true);
} else {
/* the phy is already the part of the port */
- u32 port_state = sci_port->state_machine.current_state_id;
+ u32 port_state = sci_port->sm.current_state_id;
/* if the PORT'S state is resetting then the link up is from
* port hard reset in this case, we need to tell the port
* that link up is recieved
*/
- BUG_ON(port_state != SCI_BASE_PORT_STATE_RESETTING);
+ BUG_ON(port_state != SCI_PORT_RESETTING);
port_agent->phy_ready_mask |= 1 << phy_index;
scic_sds_port_link_up(sci_port, sci_phy);
}
struct scic_sds_remote_device *sci_dev = _dev;
BUG_ON(sci_dev->started_request_count != 0);
- sci_base_state_machine_change_state(&sci_dev->state_machine,
- SCI_BASE_REMOTE_DEVICE_STATE_STOPPED);
+ sci_change_state(&sci_dev->sm, SCI_DEV_STOPPED);
}
static enum sci_status scic_sds_remote_device_terminate_requests(struct scic_sds_remote_device *sci_dev)
enum sci_status scic_remote_device_stop(struct scic_sds_remote_device *sci_dev,
u32 timeout)
{
- struct sci_base_state_machine *sm = &sci_dev->state_machine;
+ struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
switch (state) {
- case SCI_BASE_REMOTE_DEVICE_STATE_INITIAL:
- case SCI_BASE_REMOTE_DEVICE_STATE_FAILED:
- case SCI_BASE_REMOTE_DEVICE_STATE_FINAL:
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_FINAL:
default:
dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
__func__, state);
return SCI_FAILURE_INVALID_STATE;
- case SCI_BASE_REMOTE_DEVICE_STATE_STOPPED:
+ case SCI_DEV_STOPPED:
return SCI_SUCCESS;
- case SCI_BASE_REMOTE_DEVICE_STATE_STARTING:
+ case SCI_DEV_STARTING:
/* device not started so there had better be no requests */
BUG_ON(sci_dev->started_request_count != 0);
scic_sds_remote_node_context_destruct(&sci_dev->rnc,
/* Transition to the stopping state and wait for the
* remote node to complete being posted and invalidated.
*/
- sci_base_state_machine_change_state(sm, SCI_BASE_REMOTE_DEVICE_STATE_STOPPING);
+ sci_change_state(sm, SCI_DEV_STOPPING);
return SCI_SUCCESS;
- case SCI_BASE_REMOTE_DEVICE_STATE_READY:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET:
- case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE:
- case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD:
- sci_base_state_machine_change_state(sm, SCI_BASE_REMOTE_DEVICE_STATE_STOPPING);
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ sci_change_state(sm, SCI_DEV_STOPPING);
if (sci_dev->started_request_count == 0) {
scic_sds_remote_node_context_destruct(&sci_dev->rnc,
rnc_destruct_done, sci_dev);
} else
return scic_sds_remote_device_terminate_requests(sci_dev);
break;
- case SCI_BASE_REMOTE_DEVICE_STATE_STOPPING:
+ case SCI_DEV_STOPPING:
/* All requests should have been terminated, but if there is an
* attempt to stop a device already in the stopping state, then
* try again to terminate.
*/
return scic_sds_remote_device_terminate_requests(sci_dev);
- case SCI_BASE_REMOTE_DEVICE_STATE_RESETTING:
- sci_base_state_machine_change_state(sm, SCI_BASE_REMOTE_DEVICE_STATE_STOPPING);
+ case SCI_DEV_RESETTING:
+ sci_change_state(sm, SCI_DEV_STOPPING);
return SCI_SUCCESS;
}
}
enum sci_status scic_remote_device_reset(struct scic_sds_remote_device *sci_dev)
{
- struct sci_base_state_machine *sm = &sci_dev->state_machine;
+ struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
switch (state) {
- case SCI_BASE_REMOTE_DEVICE_STATE_INITIAL:
- case SCI_BASE_REMOTE_DEVICE_STATE_STOPPED:
- case SCI_BASE_REMOTE_DEVICE_STATE_STARTING:
- case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE:
- case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD:
- case SCI_BASE_REMOTE_DEVICE_STATE_STOPPING:
- case SCI_BASE_REMOTE_DEVICE_STATE_FAILED:
- case SCI_BASE_REMOTE_DEVICE_STATE_RESETTING:
- case SCI_BASE_REMOTE_DEVICE_STATE_FINAL:
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
default:
dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
__func__, state);
return SCI_FAILURE_INVALID_STATE;
- case SCI_BASE_REMOTE_DEVICE_STATE_READY:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET:
- sci_base_state_machine_change_state(sm, SCI_BASE_REMOTE_DEVICE_STATE_RESETTING);
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ sci_change_state(sm, SCI_DEV_RESETTING);
return SCI_SUCCESS;
}
}
enum sci_status scic_remote_device_reset_complete(struct scic_sds_remote_device *sci_dev)
{
- struct sci_base_state_machine *sm = &sci_dev->state_machine;
+ struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
- if (state != SCI_BASE_REMOTE_DEVICE_STATE_RESETTING) {
+ if (state != SCI_DEV_RESETTING) {
dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
__func__, state);
return SCI_FAILURE_INVALID_STATE;
}
- sci_base_state_machine_change_state(sm, SCI_BASE_REMOTE_DEVICE_STATE_READY);
+ sci_change_state(sm, SCI_DEV_READY);
return SCI_SUCCESS;
}
enum sci_status scic_sds_remote_device_suspend(struct scic_sds_remote_device *sci_dev,
u32 suspend_type)
{
- struct sci_base_state_machine *sm = &sci_dev->state_machine;
+ struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
- if (state != SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD) {
+ if (state != SCI_STP_DEV_CMD) {
dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
__func__, state);
return SCI_FAILURE_INVALID_STATE;
enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_device *sci_dev,
u32 frame_index)
{
- struct sci_base_state_machine *sm = &sci_dev->state_machine;
+ struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller;
enum sci_status status;
switch (state) {
- case SCI_BASE_REMOTE_DEVICE_STATE_INITIAL:
- case SCI_BASE_REMOTE_DEVICE_STATE_STOPPED:
- case SCI_BASE_REMOTE_DEVICE_STATE_STARTING:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE:
- case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE:
- case SCI_BASE_REMOTE_DEVICE_STATE_FINAL:
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_IDLE:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_DEV_FINAL:
default:
dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
__func__, state);
/* Return the frame back to the controller */
scic_sds_controller_release_frame(scic, frame_index);
return SCI_FAILURE_INVALID_STATE;
- case SCI_BASE_REMOTE_DEVICE_STATE_READY:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET:
- case SCI_BASE_REMOTE_DEVICE_STATE_STOPPING:
- case SCI_BASE_REMOTE_DEVICE_STATE_FAILED:
- case SCI_BASE_REMOTE_DEVICE_STATE_RESETTING: {
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING: {
struct scic_sds_request *sci_req;
struct ssp_frame_hdr hdr;
void *frame_header;
}
break;
}
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ: {
+ case SCI_STP_DEV_NCQ: {
struct dev_to_host_fis *hdr;
status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
sci_dev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
/* TODO Check sactive and complete associated IO if any. */
- sci_base_state_machine_change_state(sm, SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR);
+ sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
} else if (hdr->fis_type == FIS_REGD2H &&
(hdr->status & ATA_ERR)) {
/*
* Treat this like an SDB error FIS ready reason.
*/
sci_dev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
- sci_base_state_machine_change_state(&sci_dev->state_machine,
- SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR);
+ sci_change_state(&sci_dev->sm, SCI_STP_DEV_NCQ_ERROR);
} else
status = SCI_FAILURE;
scic_sds_controller_release_frame(scic, frame_index);
break;
}
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD:
- case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD:
+ case SCI_STP_DEV_CMD:
+ case SCI_SMP_DEV_CMD:
/* The device does not process any UF received from the hardware while
* in this state. All unsolicited frames are forwarded to the io request
* object.
static bool is_remote_device_ready(struct scic_sds_remote_device *sci_dev)
{
- struct sci_base_state_machine *sm = &sci_dev->state_machine;
+ struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
switch (state) {
- case SCI_BASE_REMOTE_DEVICE_STATE_READY:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET:
- case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE:
- case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD:
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
return true;
default:
return false;
enum sci_status scic_sds_remote_device_event_handler(struct scic_sds_remote_device *sci_dev,
u32 event_code)
{
- struct sci_base_state_machine *sm = &sci_dev->state_machine;
+ struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
enum sci_status status;
if (status != SCI_SUCCESS)
return status;
- if (state == SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE) {
+ if (state == SCI_STP_DEV_IDLE) {
/* We pick up suspension events to handle specifically to this
* state. We resume the RNC right away.
struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *sci_req)
{
- struct sci_base_state_machine *sm = &sci_dev->state_machine;
+ struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
struct scic_sds_port *sci_port = sci_dev->owning_port;
struct isci_request *ireq = sci_req_to_ireq(sci_req);
enum sci_status status;
switch (state) {
- case SCI_BASE_REMOTE_DEVICE_STATE_INITIAL:
- case SCI_BASE_REMOTE_DEVICE_STATE_STOPPED:
- case SCI_BASE_REMOTE_DEVICE_STATE_STARTING:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR:
- case SCI_BASE_REMOTE_DEVICE_STATE_STOPPING:
- case SCI_BASE_REMOTE_DEVICE_STATE_FAILED:
- case SCI_BASE_REMOTE_DEVICE_STATE_RESETTING:
- case SCI_BASE_REMOTE_DEVICE_STATE_FINAL:
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
default:
dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
__func__, state);
return SCI_FAILURE_INVALID_STATE;
- case SCI_BASE_REMOTE_DEVICE_STATE_READY:
+ case SCI_DEV_READY:
/* attempt to start an io request for this device object. The remote
* device object will issue the start request for the io and if
* successful it will start the request for the port object then
status = scic_sds_request_start(sci_req);
break;
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE: {
+ case SCI_STP_DEV_IDLE: {
/* handle the start io operation for a sata device that is in
* the command idle state. - Evalute the type of IO request to
* be started - If its an NCQ request change to NCQ substate -
break;
if (task->ata_task.use_ncq)
- new_state = SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ;
+ new_state = SCI_STP_DEV_NCQ;
else {
sci_dev->working_request = sci_req;
- new_state = SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD;
+ new_state = SCI_STP_DEV_CMD;
}
- sci_base_state_machine_change_state(sm, new_state);
+ sci_change_state(sm, new_state);
break;
}
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ: {
+ case SCI_STP_DEV_NCQ: {
struct sas_task *task = isci_request_access_task(ireq);
if (task->ata_task.use_ncq) {
return SCI_FAILURE_INVALID_STATE;
break;
}
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET:
+ case SCI_STP_DEV_AWAIT_RESET:
return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
- case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE:
+ case SCI_SMP_DEV_IDLE:
status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
if (status != SCI_SUCCESS)
return status;
break;
sci_dev->working_request = sci_req;
- sci_base_state_machine_change_state(&sci_dev->state_machine,
- SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD);
+ sci_change_state(&sci_dev->sm, SCI_SMP_DEV_CMD);
break;
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD:
- case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD:
+ case SCI_STP_DEV_CMD:
+ case SCI_SMP_DEV_CMD:
/* device is already handling a command it can not accept new commands
* until this one is complete.
*/
struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *sci_req)
{
- struct sci_base_state_machine *sm = &sci_dev->state_machine;
+ struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
struct scic_sds_port *sci_port = sci_dev->owning_port;
enum sci_status status;
switch (state) {
- case SCI_BASE_REMOTE_DEVICE_STATE_INITIAL:
- case SCI_BASE_REMOTE_DEVICE_STATE_STOPPED:
- case SCI_BASE_REMOTE_DEVICE_STATE_STARTING:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE:
- case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE:
- case SCI_BASE_REMOTE_DEVICE_STATE_FAILED:
- case SCI_BASE_REMOTE_DEVICE_STATE_FINAL:
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_IDLE:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_FINAL:
default:
dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
__func__, state);
return SCI_FAILURE_INVALID_STATE;
- case SCI_BASE_REMOTE_DEVICE_STATE_READY:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET:
- case SCI_BASE_REMOTE_DEVICE_STATE_RESETTING:
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_DEV_RESETTING:
status = common_complete_io(sci_port, sci_dev, sci_req);
break;
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
status = common_complete_io(sci_port, sci_dev, sci_req);
if (status != SCI_SUCCESS)
break;
* can reach RNC state handler, these IOs will be completed by RNC with
* status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
*/
- sci_base_state_machine_change_state(sm, SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET);
+ sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
} else if (scic_sds_remote_device_get_request_count(sci_dev) == 0)
- sci_base_state_machine_change_state(sm, SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE);
+ sci_change_state(sm, SCI_STP_DEV_IDLE);
break;
- case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD:
+ case SCI_SMP_DEV_CMD:
status = common_complete_io(sci_port, sci_dev, sci_req);
if (status != SCI_SUCCESS)
break;
- sci_base_state_machine_change_state(sm, SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE);
+ sci_change_state(sm, SCI_SMP_DEV_IDLE);
break;
- case SCI_BASE_REMOTE_DEVICE_STATE_STOPPING:
+ case SCI_DEV_STOPPING:
status = common_complete_io(sci_port, sci_dev, sci_req);
if (status != SCI_SUCCESS)
break;
struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *sci_req)
{
- struct sci_base_state_machine *sm = &sci_dev->state_machine;
+ struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
struct scic_sds_port *sci_port = sci_dev->owning_port;
enum sci_status status;
switch (state) {
- case SCI_BASE_REMOTE_DEVICE_STATE_INITIAL:
- case SCI_BASE_REMOTE_DEVICE_STATE_STOPPED:
- case SCI_BASE_REMOTE_DEVICE_STATE_STARTING:
- case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE:
- case SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD:
- case SCI_BASE_REMOTE_DEVICE_STATE_STOPPING:
- case SCI_BASE_REMOTE_DEVICE_STATE_FAILED:
- case SCI_BASE_REMOTE_DEVICE_STATE_RESETTING:
- case SCI_BASE_REMOTE_DEVICE_STATE_FINAL:
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
default:
dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
__func__, state);
return SCI_FAILURE_INVALID_STATE;
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR:
- case SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
if (status != SCI_SUCCESS)
return status;
* management request.
*/
sci_dev->working_request = sci_req;
- sci_base_state_machine_change_state(sm, SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD);
+ sci_change_state(sm, SCI_STP_DEV_CMD);
/* The remote node context must cleanup the TCi to NCQ mapping
* table. The only way to do this correctly is to either write
* post TC when RNC gets resumed.
*/
return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
- case SCI_BASE_REMOTE_DEVICE_STATE_READY:
+ case SCI_DEV_READY:
status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
if (status != SCI_SUCCESS)
return status;
return;
/* go 'ready' if we are not already in a ready state */
- sci_base_state_machine_change_state(&sci_dev->state_machine,
- SCI_BASE_REMOTE_DEVICE_STATE_READY);
+ sci_change_state(&sci_dev->sm, SCI_DEV_READY);
}
static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
/* For NCQ operation we do not issue a isci_remote_device_not_ready().
* As a result, avoid sending the ready notification.
*/
- if (sci_dev->state_machine.previous_state_id != SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ)
+ if (sci_dev->sm.previous_state_id != SCI_STP_DEV_NCQ)
isci_remote_device_ready(scic_to_ihost(scic), idev);
}
static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine);
+ struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
/* Initial state is a transitional state to the stopped state */
- sci_base_state_machine_change_state(&sci_dev->state_machine,
- SCI_BASE_REMOTE_DEVICE_STATE_STOPPED);
+ sci_change_state(&sci_dev->sm, SCI_DEV_STOPPED);
}
/**
*/
static enum sci_status scic_remote_device_destruct(struct scic_sds_remote_device *sci_dev)
{
- struct sci_base_state_machine *sm = &sci_dev->state_machine;
+ struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
struct scic_sds_controller *scic;
- if (state != SCI_BASE_REMOTE_DEVICE_STATE_STOPPED) {
+ if (state != SCI_DEV_STOPPED) {
dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
__func__, state);
return SCI_FAILURE_INVALID_STATE;
scic_sds_controller_free_remote_node_context(scic, sci_dev,
sci_dev->rnc.remote_node_index);
sci_dev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
- sci_base_state_machine_change_state(sm, SCI_BASE_REMOTE_DEVICE_STATE_FINAL);
+ sci_change_state(sm, SCI_DEV_FINAL);
return SCI_SUCCESS;
}
static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine);
+ struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller;
struct isci_remote_device *idev = sci_dev_to_idev(sci_dev);
u32 prev_state;
/* If we are entering from the stopping state let the SCI User know that
* the stop operation has completed.
*/
- prev_state = sci_dev->state_machine.previous_state_id;
- if (prev_state == SCI_BASE_REMOTE_DEVICE_STATE_STOPPING)
+ prev_state = sci_dev->sm.previous_state_id;
+ if (prev_state == SCI_DEV_STOPPING)
isci_remote_device_stop_complete(scic_to_ihost(scic), idev);
scic_sds_controller_remote_device_stopped(scic, sci_dev);
static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine);
+ struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev);
struct isci_host *ihost = scic_to_ihost(scic);
struct isci_remote_device *idev = sci_dev_to_idev(sci_dev);
static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine);
+ struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller;
struct isci_remote_device *idev = sci_dev_to_idev(sci_dev);
struct domain_device *dev = idev->domain_dev;
scic->remote_device_sequence[sci_dev->rnc.remote_node_index]++;
if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
- sci_base_state_machine_change_state(&sci_dev->state_machine,
- SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE);
+ sci_change_state(&sci_dev->sm, SCI_STP_DEV_IDLE);
} else if (dev_is_expander(dev)) {
- sci_base_state_machine_change_state(&sci_dev->state_machine,
- SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE);
+ sci_change_state(&sci_dev->sm, SCI_SMP_DEV_IDLE);
} else
isci_remote_device_ready(scic_to_ihost(scic), idev);
}
static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine);
+ struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
struct domain_device *dev = sci_dev_to_domain(sci_dev);
if (dev->dev_type == SAS_END_DEV) {
static void scic_sds_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine);
+ struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
scic_sds_remote_node_context_suspend(
&sci_dev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
static void scic_sds_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine);
+ struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
scic_sds_remote_node_context_resume(&sci_dev->rnc, NULL, NULL);
}
static void scic_sds_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine);
+ struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
sci_dev->working_request = NULL;
if (scic_sds_remote_node_context_is_ready(&sci_dev->rnc)) {
static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine);
+ struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev);
BUG_ON(sci_dev->working_request == NULL);
static void scic_sds_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine);
+ struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev);
struct isci_remote_device *idev = sci_dev_to_idev(sci_dev);
static void scic_sds_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine);
+ struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev);
isci_remote_device_ready(scic_to_ihost(scic), sci_dev_to_idev(sci_dev));
static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine);
+ struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev);
BUG_ON(sci_dev->working_request == NULL);
static void scic_sds_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), state_machine);
+ struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
sci_dev->working_request = NULL;
}
static const struct sci_base_state scic_sds_remote_device_state_table[] = {
- [SCI_BASE_REMOTE_DEVICE_STATE_INITIAL] = {
+ [SCI_DEV_INITIAL] = {
.enter_state = scic_sds_remote_device_initial_state_enter,
},
- [SCI_BASE_REMOTE_DEVICE_STATE_STOPPED] = {
+ [SCI_DEV_STOPPED] = {
.enter_state = scic_sds_remote_device_stopped_state_enter,
},
- [SCI_BASE_REMOTE_DEVICE_STATE_STARTING] = {
+ [SCI_DEV_STARTING] = {
.enter_state = scic_sds_remote_device_starting_state_enter,
},
- [SCI_BASE_REMOTE_DEVICE_STATE_READY] = {
+ [SCI_DEV_READY] = {
.enter_state = scic_sds_remote_device_ready_state_enter,
.exit_state = scic_sds_remote_device_ready_state_exit
},
- [SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE] = {
+ [SCI_STP_DEV_IDLE] = {
.enter_state = scic_sds_stp_remote_device_ready_idle_substate_enter,
},
- [SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD] = {
+ [SCI_STP_DEV_CMD] = {
.enter_state = scic_sds_stp_remote_device_ready_cmd_substate_enter,
},
- [SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ] = { },
- [SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR] = {
+ [SCI_STP_DEV_NCQ] = { },
+ [SCI_STP_DEV_NCQ_ERROR] = {
.enter_state = scic_sds_stp_remote_device_ready_ncq_error_substate_enter,
},
- [SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET] = { },
- [SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE] = {
+ [SCI_STP_DEV_AWAIT_RESET] = { },
+ [SCI_SMP_DEV_IDLE] = {
.enter_state = scic_sds_smp_remote_device_ready_idle_substate_enter,
},
- [SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD] = {
+ [SCI_SMP_DEV_CMD] = {
.enter_state = scic_sds_smp_remote_device_ready_cmd_substate_enter,
.exit_state = scic_sds_smp_remote_device_ready_cmd_substate_exit,
},
- [SCI_BASE_REMOTE_DEVICE_STATE_STOPPING] = { },
- [SCI_BASE_REMOTE_DEVICE_STATE_FAILED] = { },
- [SCI_BASE_REMOTE_DEVICE_STATE_RESETTING] = {
+ [SCI_DEV_STOPPING] = { },
+ [SCI_DEV_FAILED] = { },
+ [SCI_DEV_RESETTING] = {
.enter_state = scic_sds_remote_device_resetting_state_enter,
.exit_state = scic_sds_remote_device_resetting_state_exit
},
- [SCI_BASE_REMOTE_DEVICE_STATE_FINAL] = { },
+ [SCI_DEV_FINAL] = { },
};
/**
sci_dev->owning_port = sci_port;
sci_dev->started_request_count = 0;
- sci_base_state_machine_construct(&sci_dev->state_machine,
+ sci_base_state_machine_construct(&sci_dev->sm,
scic_sds_remote_device_state_table,
- SCI_BASE_REMOTE_DEVICE_STATE_INITIAL);
+ SCI_DEV_INITIAL);
- sci_base_state_machine_start(&sci_dev->state_machine);
+ sci_base_state_machine_start(&sci_dev->sm);
scic_sds_remote_node_context_construct(&sci_dev->rnc,
SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
static enum sci_status scic_remote_device_start(struct scic_sds_remote_device *sci_dev,
u32 timeout)
{
- struct sci_base_state_machine *sm = &sci_dev->state_machine;
+ struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
enum sci_status status;
- if (state != SCI_BASE_REMOTE_DEVICE_STATE_STOPPED) {
+ if (state != SCI_DEV_STOPPED) {
dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
__func__, state);
return SCI_FAILURE_INVALID_STATE;
if (status != SCI_SUCCESS)
return status;
- sci_base_state_machine_change_state(sm, SCI_BASE_REMOTE_DEVICE_STATE_STARTING);
+ sci_change_state(sm, SCI_DEV_STARTING);
return SCI_SUCCESS;
}
* This field contains the information for the base remote device state
* machine.
*/
- struct sci_base_state_machine state_machine;
+ struct sci_base_state_machine sm;
/**
* This field is the programmed device port width. This value is
/**
* This field contains the stated request count for the remote device. The
- * device can not reach the SCI_BASE_REMOTE_DEVICE_STATE_STOPPED until all
+ * device can not reach the SCI_DEV_STOPPED until all
* requests are complete and the rnc_posted value is false.
*/
u32 started_request_count;
/**
* Simply the initial state for the base remote device state machine.
*/
- SCI_BASE_REMOTE_DEVICE_STATE_INITIAL,
+ SCI_DEV_INITIAL,
/**
* This state indicates that the remote device has successfully been
* This state is entered from the INITIAL state.
* This state is entered from the STOPPING state.
*/
- SCI_BASE_REMOTE_DEVICE_STATE_STOPPED,
+ SCI_DEV_STOPPED,
/**
* This state indicates the the remote device is in the process of
* are permitted.
* This state is entered from the STOPPED state.
*/
- SCI_BASE_REMOTE_DEVICE_STATE_STARTING,
+ SCI_DEV_STARTING,
/**
* This state indicates the remote device is now ready. Thus, the user
* is able to perform IO operations on the remote device.
* This state is entered from the STARTING state.
*/
- SCI_BASE_REMOTE_DEVICE_STATE_READY,
+ SCI_DEV_READY,
/**
* This is the idle substate for the stp remote device. When there are no
* active IO for the device it is is in this state.
*/
- SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_IDLE,
+ SCI_STP_DEV_IDLE,
/**
* This is the command state for for the STP remote device. This state is
* entered when the device is processing a non-NCQ command. The device object
* will fail any new start IO requests until this command is complete.
*/
- SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_CMD,
+ SCI_STP_DEV_CMD,
/**
* This is the NCQ state for the STP remote device. This state is entered
* when the device is processing an NCQ reuqest. It will remain in this state
* so long as there is one or more NCQ requests being processed.
*/
- SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ,
+ SCI_STP_DEV_NCQ,
/**
* This is the NCQ error state for the STP remote device. This state is
* NCQ state. The device object will only accept a READ LOG command while in
* this state.
*/
- SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_NCQ_ERROR,
+ SCI_STP_DEV_NCQ_ERROR,
/**
* This is the READY substate indicates the device is waiting for the RESET task
* coming to be recovered from certain hardware specific error.
*/
- SCIC_SDS_STP_REMOTE_DEVICE_READY_SUBSTATE_AWAIT_RESET,
+ SCI_STP_DEV_AWAIT_RESET,
/**
* This is the ready operational substate for the remote device. This is the
* normal operational state for a remote device.
*/
- SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_IDLE,
+ SCI_SMP_DEV_IDLE,
/**
* This is the suspended state for the remote device. This is the state that
* the device is placed in when a RNC suspend is received by the SCU hardware.
*/
- SCIC_SDS_SMP_REMOTE_DEVICE_READY_SUBSTATE_CMD,
+ SCI_SMP_DEV_CMD,
/**
* This state indicates that the remote device is in the process of
* This state is entered from the READY state.
* This state is entered from the FAILED state.
*/
- SCI_BASE_REMOTE_DEVICE_STATE_STOPPING,
+ SCI_DEV_STOPPING,
/**
* This state indicates that the remote device has failed.
* This state is entered from the INITIALIZING state.
* This state is entered from the READY state.
*/
- SCI_BASE_REMOTE_DEVICE_STATE_FAILED,
+ SCI_DEV_FAILED,
/**
* This state indicates the device is being reset.
* In this state no new IO operations are permitted.
* This state is entered from the READY state.
*/
- SCI_BASE_REMOTE_DEVICE_STATE_RESETTING,
+ SCI_DEV_RESETTING,
/**
* Simply the final state for the base remote device state machine.
*/
- SCI_BASE_REMOTE_DEVICE_STATE_FINAL,
+ SCI_DEV_FINAL,
};
static inline struct scic_sds_remote_device *rnc_to_dev(struct scic_sds_remote_node_context *rnc)
bool scic_sds_remote_node_context_is_ready(
struct scic_sds_remote_node_context *sci_rnc)
{
- u32 current_state = sci_base_state_machine_get_state(&sci_rnc->state_machine);
+ u32 current_state = sci_rnc->sm.current_state_id;
- if (current_state == SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE) {
+ if (current_state == SCI_RNC_READY) {
return true;
}
static void scic_sds_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), state_machine);
+ struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
/* Check to see if we have gotten back to the initial state because
* someone requested to destroy the remote node context object.
*/
- if (sm->previous_state_id == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE) {
+ if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
scic_sds_remote_node_context_notify_user(rnc);
}
static void scic_sds_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), state_machine);
+ struct scic_sds_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
scic_sds_remote_node_context_validate_context_buffer(sci_rnc);
}
static void scic_sds_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), state_machine);
+ struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
scic_sds_remote_node_context_invalidate_context_buffer(rnc);
}
static void scic_sds_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), state_machine);
+ struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
struct scic_sds_remote_device *sci_dev;
struct domain_device *dev;
static void scic_sds_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), state_machine);
+ struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
static void scic_sds_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), state_machine);
+ struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
scic_sds_remote_node_context_continue_state_transitions(rnc);
}
static void scic_sds_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), state_machine);
+ struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
scic_sds_remote_node_context_continue_state_transitions(rnc);
}
static const struct sci_base_state scic_sds_remote_node_context_state_table[] = {
- [SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE] = {
+ [SCI_RNC_INITIAL] = {
.enter_state = scic_sds_remote_node_context_initial_state_enter,
},
- [SCIC_SDS_REMOTE_NODE_CONTEXT_POSTING_STATE] = {
+ [SCI_RNC_POSTING] = {
.enter_state = scic_sds_remote_node_context_posting_state_enter,
},
- [SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE] = {
+ [SCI_RNC_INVALIDATING] = {
.enter_state = scic_sds_remote_node_context_invalidating_state_enter,
},
- [SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE] = {
+ [SCI_RNC_RESUMING] = {
.enter_state = scic_sds_remote_node_context_resuming_state_enter,
},
- [SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE] = {
+ [SCI_RNC_READY] = {
.enter_state = scic_sds_remote_node_context_ready_state_enter,
},
- [SCIC_SDS_REMOTE_NODE_CONTEXT_TX_SUSPENDED_STATE] = {
+ [SCI_RNC_TX_SUSPENDED] = {
.enter_state = scic_sds_remote_node_context_tx_suspended_state_enter,
},
- [SCIC_SDS_REMOTE_NODE_CONTEXT_TX_RX_SUSPENDED_STATE] = {
+ [SCI_RNC_TX_RX_SUSPENDED] = {
.enter_state = scic_sds_remote_node_context_tx_rx_suspended_state_enter,
},
- [SCIC_SDS_REMOTE_NODE_CONTEXT_AWAIT_SUSPENSION_STATE] = { },
+ [SCI_RNC_AWAIT_SUSPENSION] = { },
};
void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context *rnc,
rnc->remote_node_index = remote_node_index;
rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
- sci_base_state_machine_construct(&rnc->state_machine,
+ sci_base_state_machine_construct(&rnc->sm,
scic_sds_remote_node_context_state_table,
- SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE);
+ SCI_RNC_INITIAL);
- sci_base_state_machine_start(&rnc->state_machine);
+ sci_base_state_machine_start(&rnc->sm);
}
enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remote_node_context *sci_rnc,
{
enum scis_sds_remote_node_context_states state;
- state = sci_rnc->state_machine.current_state_id;
+ state = sci_rnc->sm.current_state_id;
switch (state) {
- case SCIC_SDS_REMOTE_NODE_CONTEXT_POSTING_STATE:
+ case SCI_RNC_POSTING:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_POST_RNC_COMPLETE:
- sci_base_state_machine_change_state(&sci_rnc->state_machine,
- SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
break;
default:
goto out;
}
break;
- case SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE:
+ case SCI_RNC_INVALIDATING:
if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
- state = SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE;
+ state = SCI_RNC_INITIAL;
else
- state = SCIC_SDS_REMOTE_NODE_CONTEXT_POSTING_STATE;
- sci_base_state_machine_change_state(&sci_rnc->state_machine,
- state);
+ state = SCI_RNC_POSTING;
+ sci_change_state(&sci_rnc->sm, state);
} else {
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
}
}
break;
- case SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE:
+ case SCI_RNC_RESUMING:
if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
- sci_base_state_machine_change_state(&sci_rnc->state_machine,
- SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
} else {
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
}
}
break;
- case SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE:
+ case SCI_RNC_READY:
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TL_RNC_SUSPEND_TX:
- sci_base_state_machine_change_state(&sci_rnc->state_machine,
- SCIC_SDS_REMOTE_NODE_CONTEXT_TX_SUSPENDED_STATE);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
sci_rnc->suspension_code = scu_get_event_specifier(event_code);
break;
case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
- sci_base_state_machine_change_state(&sci_rnc->state_machine,
- SCIC_SDS_REMOTE_NODE_CONTEXT_TX_RX_SUSPENDED_STATE);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
sci_rnc->suspension_code = scu_get_event_specifier(event_code);
break;
default:
goto out;
}
break;
- case SCIC_SDS_REMOTE_NODE_CONTEXT_AWAIT_SUSPENSION_STATE:
+ case SCI_RNC_AWAIT_SUSPENSION:
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TL_RNC_SUSPEND_TX:
- sci_base_state_machine_change_state(&sci_rnc->state_machine,
- SCIC_SDS_REMOTE_NODE_CONTEXT_TX_SUSPENDED_STATE);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
sci_rnc->suspension_code = scu_get_event_specifier(event_code);
break;
case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
- sci_base_state_machine_change_state(&sci_rnc->state_machine,
- SCIC_SDS_REMOTE_NODE_CONTEXT_TX_RX_SUSPENDED_STATE);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
sci_rnc->suspension_code = scu_get_event_specifier(event_code);
break;
default:
{
enum scis_sds_remote_node_context_states state;
- state = sci_rnc->state_machine.current_state_id;
+ state = sci_rnc->sm.current_state_id;
switch (state) {
- case SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE:
+ case SCI_RNC_INVALIDATING:
scic_sds_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
return SCI_SUCCESS;
- case SCIC_SDS_REMOTE_NODE_CONTEXT_POSTING_STATE:
- case SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE:
- case SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE:
- case SCIC_SDS_REMOTE_NODE_CONTEXT_TX_SUSPENDED_STATE:
- case SCIC_SDS_REMOTE_NODE_CONTEXT_TX_RX_SUSPENDED_STATE:
- case SCIC_SDS_REMOTE_NODE_CONTEXT_AWAIT_SUSPENSION_STATE:
+ case SCI_RNC_POSTING:
+ case SCI_RNC_RESUMING:
+ case SCI_RNC_READY:
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ case SCI_RNC_AWAIT_SUSPENSION:
scic_sds_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
- sci_base_state_machine_change_state(&sci_rnc->state_machine,
- SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
return SCI_SUCCESS;
- case SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE:
+ case SCI_RNC_INITIAL:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: invalid state %d\n", __func__, state);
/* We have decided that the destruct request on the remote node context
{
enum scis_sds_remote_node_context_states state;
- state = sci_rnc->state_machine.current_state_id;
- if (state != SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE) {
+ state = sci_rnc->sm.current_state_id;
+ if (state != SCI_RNC_READY) {
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: invalid state %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
}
- sci_base_state_machine_change_state(&sci_rnc->state_machine,
- SCIC_SDS_REMOTE_NODE_CONTEXT_AWAIT_SUSPENSION_STATE);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
return SCI_SUCCESS;
}
{
enum scis_sds_remote_node_context_states state;
- state = sci_rnc->state_machine.current_state_id;
+ state = sci_rnc->sm.current_state_id;
switch (state) {
- case SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE:
+ case SCI_RNC_INITIAL:
if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
return SCI_FAILURE_INVALID_STATE;
scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
scic_sds_remote_node_context_construct_buffer(sci_rnc);
- sci_base_state_machine_change_state(&sci_rnc->state_machine,
- SCIC_SDS_REMOTE_NODE_CONTEXT_POSTING_STATE);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
return SCI_SUCCESS;
- case SCIC_SDS_REMOTE_NODE_CONTEXT_POSTING_STATE:
- case SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE:
- case SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE:
+ case SCI_RNC_POSTING:
+ case SCI_RNC_INVALIDATING:
+ case SCI_RNC_RESUMING:
if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
return SCI_FAILURE_INVALID_STATE;
sci_rnc->user_callback = cb_fn;
sci_rnc->user_cookie = cb_p;
return SCI_SUCCESS;
- case SCIC_SDS_REMOTE_NODE_CONTEXT_TX_SUSPENDED_STATE: {
+ case SCI_RNC_TX_SUSPENDED: {
struct scic_sds_remote_device *sci_dev = rnc_to_dev(sci_rnc);
struct domain_device *dev = sci_dev_to_domain(sci_dev);
/* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
- sci_base_state_machine_change_state(&sci_rnc->state_machine,
- SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
if (sci_dev->is_direct_attached) {
/* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */
- sci_base_state_machine_change_state(&sci_rnc->state_machine,
- SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
} else {
- sci_base_state_machine_change_state(&sci_rnc->state_machine,
- SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
}
} else
return SCI_FAILURE;
return SCI_SUCCESS;
}
- case SCIC_SDS_REMOTE_NODE_CONTEXT_TX_RX_SUSPENDED_STATE:
+ case SCI_RNC_TX_RX_SUSPENDED:
scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
- sci_base_state_machine_change_state(&sci_rnc->state_machine,
- SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
return SCI_FAILURE_INVALID_STATE;
- case SCIC_SDS_REMOTE_NODE_CONTEXT_AWAIT_SUSPENSION_STATE:
+ case SCI_RNC_AWAIT_SUSPENSION:
scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
return SCI_SUCCESS;
default:
{
enum scis_sds_remote_node_context_states state;
- state = sci_rnc->state_machine.current_state_id;
- if (state != SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE) {
+ state = sci_rnc->sm.current_state_id;
+ if (state != SCI_RNC_READY) {
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: invalid state %d\n", __func__, state);
return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
{
enum scis_sds_remote_node_context_states state;
- state = sci_rnc->state_machine.current_state_id;
+ state = sci_rnc->sm.current_state_id;
switch (state) {
- case SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE:
- case SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE:
- case SCIC_SDS_REMOTE_NODE_CONTEXT_AWAIT_SUSPENSION_STATE:
+ case SCI_RNC_RESUMING:
+ case SCI_RNC_READY:
+ case SCI_RNC_AWAIT_SUSPENSION:
return SCI_SUCCESS;
- case SCIC_SDS_REMOTE_NODE_CONTEXT_TX_SUSPENDED_STATE:
- case SCIC_SDS_REMOTE_NODE_CONTEXT_TX_RX_SUSPENDED_STATE:
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
scic_sds_remote_node_context_resume(sci_rnc, NULL, NULL);
return SCI_SUCCESS;
default:
* This state is the initial state for a remote node context. On a resume
* request the remote node context will transition to the posting state.
*/
- SCIC_SDS_REMOTE_NODE_CONTEXT_INITIAL_STATE,
+ SCI_RNC_INITIAL,
/**
* This is a transition state that posts the RNi to the hardware. Once the RNC
* is posted the remote node context will be made ready.
*/
- SCIC_SDS_REMOTE_NODE_CONTEXT_POSTING_STATE,
+ SCI_RNC_POSTING,
/**
* This is a transition state that will post an RNC invalidate to the
* hardware. Once the invalidate is complete the remote node context will
* transition to the posting state.
*/
- SCIC_SDS_REMOTE_NODE_CONTEXT_INVALIDATING_STATE,
+ SCI_RNC_INVALIDATING,
/**
* This is a transition state that will post an RNC resume to the hardare.
* Once the event notification of resume complete is received the remote node
* context will transition to the ready state.
*/
- SCIC_SDS_REMOTE_NODE_CONTEXT_RESUMING_STATE,
+ SCI_RNC_RESUMING,
/**
* This is the state that the remote node context must be in to accept io
* request operations.
*/
- SCIC_SDS_REMOTE_NODE_CONTEXT_READY_STATE,
+ SCI_RNC_READY,
/**
* This is the state that the remote node context transitions to when it gets
* a TX suspend notification from the hardware.
*/
- SCIC_SDS_REMOTE_NODE_CONTEXT_TX_SUSPENDED_STATE,
+ SCI_RNC_TX_SUSPENDED,
/**
* This is the state that the remote node context transitions to when it gets
* a TX RX suspend notification from the hardware.
*/
- SCIC_SDS_REMOTE_NODE_CONTEXT_TX_RX_SUSPENDED_STATE,
+ SCI_RNC_TX_RX_SUSPENDED,
/**
* This state is a wait state for the remote node context that waits for a
* there is a request to supend the remote node context or when there is a TC
* completion where the remote node will be suspended by the hardware.
*/
- SCIC_SDS_REMOTE_NODE_CONTEXT_AWAIT_SUSPENSION_STATE
+ SCI_RNC_AWAIT_SUSPENSION
};
/**
/**
* This field contains the data for the object's state machine.
*/
- struct sci_base_state_machine state_machine;
+ struct sci_base_state_machine sm;
};
void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context *rnc,
scic_sds_io_request_build_ssp_command_iu(sci_req);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_CONSTRUCTED);
+ sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
return SCI_SUCCESS;
}
/* Fill in the SSP Task IU */
scic_sds_task_request_build_ssp_task_iu(sci_req);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_CONSTRUCTED);
+ sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
return SCI_SUCCESS;
}
copy);
if (status == SCI_SUCCESS)
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_CONSTRUCTED);
+ sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
return status;
}
if (status != SCI_SUCCESS)
return status;
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_CONSTRUCTED);
+ sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
return status;
}
scic_sds_remote_device_get_sequence(sci_req->target_device))
return SCI_FAILURE;
- state = sci_req->state_machine.current_state_id;
- if (state != SCI_BASE_REQUEST_STATE_CONSTRUCTED) {
+ state = sci_req->sm.current_state_id;
+ if (state != SCI_REQ_CONSTRUCTED) {
dev_warn(scic_to_dev(scic),
"%s: SCIC IO Request requested to start while in wrong "
"state %d\n", __func__, state);
sci_req->post_context |= scic_sds_io_tag_get_index(sci_req->io_tag);
/* Everything is good go ahead and change state */
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_STARTED);
+ sci_change_state(&sci_req->sm, SCI_REQ_STARTED);
return SCI_SUCCESS;
}
{
enum sci_base_request_states state;
- state = sci_req->state_machine.current_state_id;
+ state = sci_req->sm.current_state_id;
switch (state) {
- case SCI_BASE_REQUEST_STATE_CONSTRUCTED:
+ case SCI_REQ_CONSTRUCTED:
scic_sds_request_set_status(sci_req,
SCU_TASK_DONE_TASK_ABORT,
SCI_FAILURE_IO_TERMINATED);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
return SCI_SUCCESS;
- case SCI_BASE_REQUEST_STATE_STARTED:
- case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION:
- case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE:
- case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION:
- case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE:
- case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE:
- case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE:
- case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE:
- case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE:
- case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE:
- case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE:
- case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE:
- case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE:
- case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE:
- case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE:
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_ABORTING);
+ case SCI_REQ_STARTED:
+ case SCI_REQ_TASK_WAIT_TC_COMP:
+ case SCI_REQ_SMP_WAIT_RESP:
+ case SCI_REQ_SMP_WAIT_TC_COMP:
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ case SCI_REQ_STP_UDMA_WAIT_D2H:
+ case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ case SCI_REQ_STP_NON_DATA_WAIT_D2H:
+ case SCI_REQ_STP_PIO_WAIT_H2D:
+ case SCI_REQ_STP_PIO_WAIT_FRAME:
+ case SCI_REQ_STP_PIO_DATA_IN:
+ case SCI_REQ_STP_PIO_DATA_OUT:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
+ sci_change_state(&sci_req->sm, SCI_REQ_ABORTING);
return SCI_SUCCESS;
- case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE:
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_ABORTING);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ case SCI_REQ_TASK_WAIT_TC_RESP:
+ sci_change_state(&sci_req->sm, SCI_REQ_ABORTING);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
return SCI_SUCCESS;
- case SCI_BASE_REQUEST_STATE_ABORTING:
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ case SCI_REQ_ABORTING:
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
return SCI_SUCCESS;
- case SCI_BASE_REQUEST_STATE_COMPLETED:
+ case SCI_REQ_COMPLETED:
default:
dev_warn(scic_to_dev(sci_req->owning_controller),
"%s: SCIC IO Request requested to abort while in wrong "
"state %d\n",
__func__,
- sci_base_state_machine_get_state(&sci_req->state_machine));
+ sci_req->sm.current_state_id);
break;
}
enum sci_base_request_states state;
struct scic_sds_controller *scic = sci_req->owning_controller;
- state = sci_req->state_machine.current_state_id;
- if (WARN_ONCE(state != SCI_BASE_REQUEST_STATE_COMPLETED,
+ state = sci_req->sm.current_state_id;
+ if (WARN_ONCE(state != SCI_REQ_COMPLETED,
"isci: request completion from wrong state (%d)\n", state))
return SCI_FAILURE_INVALID_STATE;
sci_req->saved_rx_frame_index);
/* XXX can we just stop the machine and remove the 'final' state? */
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_FINAL);
+ sci_change_state(&sci_req->sm, SCI_REQ_FINAL);
return SCI_SUCCESS;
}
enum sci_base_request_states state;
struct scic_sds_controller *scic = sci_req->owning_controller;
- state = sci_req->state_machine.current_state_id;
+ state = sci_req->sm.current_state_id;
- if (state != SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE) {
+ if (state != SCI_REQ_STP_PIO_DATA_IN) {
dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n",
__func__, event_code, state);
/* We are waiting for data and the SCU has R_ERR the data frame.
* Go back to waiting for the D2H Register FIS
*/
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
return SCI_SUCCESS;
default:
dev_err(scic_to_dev(scic),
memcpy(resp_buf, ssp_response->resp_data, len);
}
-static enum sci_status request_started_state_tc_event(struct scic_sds_request *sci_req,
- u32 completion_code)
+static enum sci_status
+request_started_state_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
{
struct ssp_response_iu *resp_iu;
u8 datapres;
*/
/* In all cases we will treat this as the completion of the IO req. */
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
return SCI_SUCCESS;
}
-static enum sci_status request_aborting_state_tc_event(struct scic_sds_request *sci_req,
- u32 completion_code)
+static enum sci_status
+request_aborting_state_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
scic_sds_request_set_status(sci_req, SCU_TASK_DONE_TASK_ABORT,
SCI_FAILURE_IO_TERMINATED);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
break;
default:
scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
+ sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP);
break;
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
/* Currently, the decision is to simply allow the task request
"ACK/NAK timeout\n", __func__, sci_req,
completion_code);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
+ sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP);
break;
default:
- /* All other completion status cause the IO to be complete. If a NAK
- * was received, then it is up to the user to retry the request.
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
*/
scic_sds_request_set_status(sci_req,
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
break;
}
return SCI_SUCCESS;
}
-static enum sci_status smp_request_await_response_tc_event(struct scic_sds_request *sci_req,
- u32 completion_code)
+static enum sci_status
+smp_request_await_response_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
break;
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR,
SCI_FAILURE_RETRY_REQUIRED);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
break;
default:
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
break;
}
return SCI_SUCCESS;
}
-static enum sci_status smp_request_await_tc_event(struct scic_sds_request *sci_req,
- u32 completion_code)
+static enum sci_status
+smp_request_await_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
break;
default:
/* All other completion status cause the IO to be
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
break;
}
return current_sgl;
}
-static enum sci_status stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req,
- u32 completion_code)
+static enum sci_status
+stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE);
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
break;
default:
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
break;
}
return status;
}
-static enum sci_status stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req,
- u32 completion_code)
+static enum sci_status
+stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
{
enum sci_status status = SCI_SUCCESS;
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
- scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_GOOD,
+ SCI_SUCCESS);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
break;
default:
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
break;
}
return status;
}
-static enum sci_status pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req,
- u32 completion_code)
+static enum sci_status
+pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
{
enum sci_status status = SCI_SUCCESS;
bool all_frames_transferred = false;
/* all data transferred. */
if (all_frames_transferred) {
/*
- * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
+ * Change the state to SCI_REQ_STP_PIO_DATA_IN
* and wait for PIO_SETUP fis / or D2H REg fis. */
- sci_base_state_machine_change_state(
- &sci_req->state_machine,
- SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
- );
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
}
break;
+
default:
/*
- * All other completion status cause the IO to be complete. If a NAK
- * was received, then it is up to the user to retry the request. */
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
scic_sds_request_set_status(
sci_req,
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
- SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
- );
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
- sci_base_state_machine_change_state(
- &sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED
- );
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
break;
}
enum sci_status sci_status)
{
scic_sds_request_set_status(request, scu_status, sci_status);
- sci_base_state_machine_change_state(&request->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&request->sm, SCI_REQ_COMPLETED);
}
static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
return status;
}
-enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
- u32 frame_index)
+enum sci_status
+scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
+ u32 frame_index)
{
struct scic_sds_controller *scic = sci_req->owning_controller;
struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
enum sci_status status;
ssize_t word_cnt;
- state = sci_req->state_machine.current_state_id;
+ state = sci_req->sm.current_state_id;
switch (state) {
- case SCI_BASE_REQUEST_STATE_STARTED: {
+ case SCI_REQ_STARTED: {
struct ssp_frame_hdr ssp_hdr;
void *frame_header;
}
/*
- * In any case we are done with this frame buffer return it to the
- * controller
+ * In any case we are done with this frame buffer return it to
+ * the controller
*/
scic_sds_controller_release_frame(scic, frame_index);
return SCI_SUCCESS;
}
- case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE:
+
+ case SCI_REQ_TASK_WAIT_TC_RESP:
scic_sds_io_request_copy_response(sci_req);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
scic_sds_controller_release_frame(scic,frame_index);
return SCI_SUCCESS;
- case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE: {
+
+ case SCI_REQ_SMP_WAIT_RESP: {
struct smp_resp *rsp_hdr = &sci_req->smp.rsp;
void *frame_header;
scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION);
+ sci_change_state(&sci_req->sm, SCI_REQ_SMP_WAIT_TC_COMP);
} else {
- /* This was not a response frame why did it get forwarded? */
+ /*
+ * This was not a response frame why did it get
+ * forwarded?
+ */
dev_err(scic_to_dev(scic),
- "%s: SCIC SMP Request 0x%p received unexpected frame "
- "%d type 0x%02x\n", __func__, sci_req,
- frame_index, rsp_hdr->frame_type);
+ "%s: SCIC SMP Request 0x%p received unexpected "
+ "frame %d type 0x%02x\n",
+ __func__,
+ sci_req,
+ frame_index,
+ rsp_hdr->frame_type);
scic_sds_request_set_status(sci_req,
SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
}
scic_sds_controller_release_frame(scic, frame_index);
return SCI_SUCCESS;
}
- case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE:
- return scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
- case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE:
+
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ return scic_sds_stp_request_udma_general_frame_handler(sci_req,
+ frame_index);
+
+ case SCI_REQ_STP_UDMA_WAIT_D2H:
/* Use the general frame handler to copy the resposne data */
- status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
+ status = scic_sds_stp_request_udma_general_frame_handler(sci_req,
+ frame_index);
if (status != SCI_SUCCESS)
return status;
scic_sds_stp_request_udma_complete_request(sci_req,
SCU_TASK_DONE_CHECK_RESPONSE,
SCI_FAILURE_IO_RESPONSE_VALID);
+
return SCI_SUCCESS;
- case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE: {
+
+ case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
struct dev_to_host_fis *frame_header;
u32 *frame_buffer;
if (status != SCI_SUCCESS) {
dev_err(scic_to_dev(scic),
- "%s: SCIC IO Request 0x%p could not get frame header "
- "for frame index %d, status %x\n",
- __func__, stp_req, frame_index, status);
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
return status;
}
break;
}
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
/* Frame has been decoded return it to the controller */
scic_sds_controller_release_frame(scic, frame_index);
return status;
}
- case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE: {
+
+ case SCI_REQ_STP_PIO_WAIT_FRAME: {
struct isci_request *ireq = sci_req_to_ireq(sci_req);
struct sas_task *task = isci_request_access_task(ireq);
struct dev_to_host_fis *frame_header;
if (status != SCI_SUCCESS) {
dev_err(scic_to_dev(scic),
- "%s: SCIC IO Request 0x%p could not get frame header "
- "for frame index %d, status %x\n",
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
__func__, stp_req, frame_index, status);
return status;
}
frame_index,
(void **)&frame_buffer);
- /* Get the data from the PIO Setup The SCU Hardware returns
- * first word in the frame_header and the rest of the data is in
- * the frame buffer so we need to back up one dword
+ /* Get the data from the PIO Setup The SCU Hardware
+ * returns first word in the frame_header and the rest
+ * of the data is in the frame buffer so we need to
+ * back up one dword
*/
/* transfer_count: first 16bits in the 4th dword */
* request was PIO Data-in or Data out
*/
if (task->data_dir == DMA_FROM_DEVICE) {
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_IN);
} else if (task->data_dir == DMA_TO_DEVICE) {
/* Transmit data */
status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
if (status != SCI_SUCCESS)
break;
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_DATA_OUT);
}
break;
+
case FIS_SETDEVBITS:
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
break;
+
case FIS_REGD2H:
if (frame_header->status & ATA_BUSY) {
- /* Now why is the drive sending a D2H Register FIS when
- * it is still busy? Do nothing since we are still in
- * the right state.
+ /*
+ * Now why is the drive sending a D2H Register
+ * FIS when it is still busy? Do nothing since
+ * we are still in the right state.
*/
dev_dbg(scic_to_dev(scic),
"%s: SCIC PIO Request 0x%p received "
"D2H Register FIS with BSY status "
- "0x%x\n", __func__, stp_req,
+ "0x%x\n",
+ __func__,
+ stp_req,
frame_header->status);
break;
}
SCU_TASK_DONE_CHECK_RESPONSE,
SCI_FAILURE_IO_RESPONSE_VALID);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
break;
+
default:
/* FIXME: what do we do here? */
break;
return status;
}
- case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE: {
+
+ case SCI_REQ_STP_PIO_DATA_IN: {
struct dev_to_host_fis *frame_header;
struct sata_fis_data *frame_buffer;
if (status != SCI_SUCCESS) {
dev_err(scic_to_dev(scic),
- "%s: SCIC IO Request 0x%p could not get frame header "
- "for frame index %d, status %x\n",
- __func__, stp_req, frame_index, status);
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
return status;
}
dev_err(scic_to_dev(scic),
"%s: SCIC PIO Request 0x%p received frame %d "
"with fis type 0x%02x when expecting a data "
- "fis.\n", __func__, stp_req, frame_index,
+ "fis.\n",
+ __func__,
+ stp_req,
+ frame_index,
frame_header->fis_type);
scic_sds_request_set_status(sci_req,
SCU_TASK_DONE_GOOD,
SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
/* Frame is decoded return it to the controller */
scic_sds_controller_release_frame(scic, frame_index);
SCU_TASK_DONE_CHECK_RESPONSE,
SCI_FAILURE_IO_RESPONSE_VALID);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
} else {
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
}
return status;
}
- case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE: {
+
+ case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
struct dev_to_host_fis *frame_header;
u32 *frame_buffer;
(void **)&frame_header);
if (status != SCI_SUCCESS) {
dev_err(scic_to_dev(scic),
- "%s: SCIC IO Request 0x%p could not get frame header "
- "for frame index %d, status %x\n",
- __func__, stp_req, frame_index, status);
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
return status;
}
SCU_TASK_DONE_CHECK_RESPONSE,
SCI_FAILURE_IO_RESPONSE_VALID);
break;
+
default:
dev_warn(scic_to_dev(scic),
"%s: IO Request:0x%p Frame Id:%d protocol "
- "violation occurred\n", __func__, stp_req,
+ "violation occurred\n",
+ __func__,
+ stp_req,
frame_index);
- scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
+ scic_sds_request_set_status(sci_req,
+ SCU_TASK_DONE_UNEXP_FIS,
SCI_FAILURE_PROTOCOL_VIOLATION);
break;
}
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
/* Frame has been decoded return it to the controller */
scic_sds_controller_release_frame(scic, frame_index);
return status;
}
- case SCI_BASE_REQUEST_STATE_ABORTING:
- /* TODO: Is it even possible to get an unsolicited frame in the
+ case SCI_REQ_ABORTING:
+ /*
+ * TODO: Is it even possible to get an unsolicited frame in the
* aborting state?
*/
scic_sds_controller_release_frame(scic, frame_index);
return SCI_SUCCESS;
+
default:
dev_warn(scic_to_dev(scic),
- "%s: SCIC IO Request given unexpected frame %x while in "
- "state %d\n", __func__, frame_index, state);
+ "%s: SCIC IO Request given unexpected frame %x while "
+ "in state %d\n",
+ __func__,
+ frame_index,
+ state);
scic_sds_controller_release_frame(scic, frame_index);
return SCI_FAILURE_INVALID_STATE;
* the device so we must change state to wait
* for it
*/
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
}
break;
return status;
}
-static enum sci_status stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_req,
- u32 completion_code)
+static enum sci_status
+stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE);
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
break;
default:
/*
- * All other completion status cause the IO to be complete. If a NAK
- * was received, then it is up to the user to retry the request. */
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
scic_sds_request_set_status(sci_req,
- SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
- SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
+ SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
break;
}
return SCI_SUCCESS;
}
-static enum sci_status stp_request_soft_reset_await_h2d_diagnostic_tc_event(
- struct scic_sds_request *sci_req,
- u32 completion_code)
+static enum sci_status
+stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct scic_sds_request *sci_req,
+ u32 completion_code)
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE);
+ sci_change_state(&sci_req->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
break;
default:
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_COMPLETED);
+ sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED);
break;
}
}
enum sci_status
-scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 completion_code)
+scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req,
+ u32 completion_code)
{
enum sci_base_request_states state;
struct scic_sds_controller *scic = sci_req->owning_controller;
- state = sci_req->state_machine.current_state_id;
+ state = sci_req->sm.current_state_id;
switch (state) {
- case SCI_BASE_REQUEST_STATE_STARTED:
- return request_started_state_tc_event(sci_req, completion_code);
- case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION:
- return ssp_task_request_await_tc_event(sci_req, completion_code);
- case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE:
- return smp_request_await_response_tc_event(sci_req, completion_code);
- case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION:
- return smp_request_await_tc_event(sci_req, completion_code);
- case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE:
- return stp_request_udma_await_tc_event(sci_req, completion_code);
- case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE:
- return stp_request_non_data_await_h2d_tc_event(sci_req, completion_code);
- case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE:
- return stp_request_pio_await_h2d_completion_tc_event(sci_req, completion_code);
- case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE:
- return pio_data_out_tx_done_tc_event(sci_req, completion_code);
- case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE:
- return stp_request_soft_reset_await_h2d_asserted_tc_event(sci_req, completion_code);
- case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE:
- return stp_request_soft_reset_await_h2d_diagnostic_tc_event(sci_req, completion_code);
- case SCI_BASE_REQUEST_STATE_ABORTING:
- return request_aborting_state_tc_event(sci_req, completion_code);
- default:
- dev_warn(scic_to_dev(scic),
- "%s: SCIC IO Request given task completion notification %x "
- "while in wrong state %d\n", __func__, completion_code,
- state);
- return SCI_FAILURE_INVALID_STATE;
+ case SCI_REQ_STARTED:
+ return request_started_state_tc_event(sci_req, completion_code);
+
+ case SCI_REQ_TASK_WAIT_TC_COMP:
+ return ssp_task_request_await_tc_event(sci_req,
+ completion_code);
+
+ case SCI_REQ_SMP_WAIT_RESP:
+ return smp_request_await_response_tc_event(sci_req,
+ completion_code);
+
+ case SCI_REQ_SMP_WAIT_TC_COMP:
+ return smp_request_await_tc_event(sci_req, completion_code);
+
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ return stp_request_udma_await_tc_event(sci_req,
+ completion_code);
+
+ case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ return stp_request_non_data_await_h2d_tc_event(sci_req,
+ completion_code);
+
+ case SCI_REQ_STP_PIO_WAIT_H2D:
+ return stp_request_pio_await_h2d_completion_tc_event(sci_req,
+ completion_code);
+
+ case SCI_REQ_STP_PIO_DATA_OUT:
+ return pio_data_out_tx_done_tc_event(sci_req, completion_code);
+
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
+ return stp_request_soft_reset_await_h2d_asserted_tc_event(sci_req,
+ completion_code);
+
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
+ return stp_request_soft_reset_await_h2d_diagnostic_tc_event(sci_req,
+ completion_code);
+
+ case SCI_REQ_ABORTING:
+ return request_aborting_state_tc_event(sci_req,
+ completion_code);
+
+ default:
+ dev_warn(scic_to_dev(scic),
+ "%s: SCIC IO Request given task completion "
+ "notification %x while in wrong state %d\n",
+ __func__,
+ completion_code,
+ state);
+ return SCI_FAILURE_INVALID_STATE;
}
}
static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine);
+ struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
struct isci_request *ireq = sci_req_to_ireq(sci_req);
struct domain_device *dev = sci_dev_to_domain(sci_req->target_device);
struct sas_task *task;
* substates
*/
if (!task && dev->dev_type == SAS_END_DEV) {
- sci_base_state_machine_change_state(sm,
- SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION);
+ sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
} else if (!task &&
(isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
- sci_base_state_machine_change_state(sm,
- SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
+ sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
- sci_base_state_machine_change_state(sm,
- SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE);
+ sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
} else if (task && sas_protocol_ata(task->task_proto) &&
!task->ata_task.use_ncq) {
u32 state;
if (task->data_dir == DMA_NONE)
- state = SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE;
+ state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
else if (task->ata_task.dma_xfer)
- state = SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE;
+ state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
else /* PIO */
- state = SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE;
+ state = SCI_REQ_STP_PIO_WAIT_H2D;
- sci_base_state_machine_change_state(sm, state);
+ sci_change_state(sm, state);
}
}
static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine);
+ struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
struct scic_sds_controller *scic = sci_req->owning_controller;
struct isci_host *ihost = scic_to_ihost(scic);
struct isci_request *ireq = sci_req_to_ireq(sci_req);
static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine);
+ struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
/* Setting the abort bit in the Task Context is required by the silicon. */
sci_req->task_context_buffer->abort = 1;
static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine);
+ struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
scic_sds_remote_device_set_working_request(sci_req->target_device,
sci_req);
static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine);
+ struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
scic_sds_remote_device_set_working_request(sci_req->target_device,
sci_req);
static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine);
+ struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
scic_sds_remote_device_set_working_request(sci_req->target_device,
sci_req);
static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), state_machine);
+ struct scic_sds_request *sci_req = container_of(sm, typeof(*sci_req), sm);
struct scu_task_context *task_context;
struct host_to_dev_fis *h2d_fis;
enum sci_status status;
}
static const struct sci_base_state scic_sds_request_state_table[] = {
- [SCI_BASE_REQUEST_STATE_INITIAL] = { },
- [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = { },
- [SCI_BASE_REQUEST_STATE_STARTED] = {
+ [SCI_REQ_INIT] = { },
+ [SCI_REQ_CONSTRUCTED] = { },
+ [SCI_REQ_STARTED] = {
.enter_state = scic_sds_request_started_state_enter,
},
- [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
+ [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
.enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
},
- [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { },
- [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
+ [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
+ [SCI_REQ_STP_PIO_WAIT_H2D] = {
.enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
},
- [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { },
- [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = { },
- [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { },
- [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { },
- [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { },
- [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
+ [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
+ [SCI_REQ_STP_PIO_DATA_IN] = { },
+ [SCI_REQ_STP_PIO_DATA_OUT] = { },
+ [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
+ [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
+ [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
.enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
},
- [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
+ [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
.enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
},
- [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { },
- [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { },
- [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { },
- [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { },
- [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { },
- [SCI_BASE_REQUEST_STATE_COMPLETED] = {
+ [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
+ [SCI_REQ_TASK_WAIT_TC_COMP] = { },
+ [SCI_REQ_TASK_WAIT_TC_RESP] = { },
+ [SCI_REQ_SMP_WAIT_RESP] = { },
+ [SCI_REQ_SMP_WAIT_TC_COMP] = { },
+ [SCI_REQ_COMPLETED] = {
.enter_state = scic_sds_request_completed_state_enter,
},
- [SCI_BASE_REQUEST_STATE_ABORTING] = {
+ [SCI_REQ_ABORTING] = {
.enter_state = scic_sds_request_aborting_state_enter,
},
- [SCI_BASE_REQUEST_STATE_FINAL] = { },
+ [SCI_REQ_FINAL] = { },
};
-static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
- struct scic_sds_remote_device *sci_dev,
- u16 io_tag, struct scic_sds_request *sci_req)
+static void
+scic_sds_general_request_construct(struct scic_sds_controller *scic,
+ struct scic_sds_remote_device *sci_dev,
+ u16 io_tag,
+ struct scic_sds_request *sci_req)
{
- sci_base_state_machine_construct(&sci_req->state_machine,
+ sci_base_state_machine_construct(&sci_req->sm,
scic_sds_request_state_table,
- SCI_BASE_REQUEST_STATE_INITIAL);
- sci_base_state_machine_start(&sci_req->state_machine);
+ SCI_REQ_INIT);
+ sci_base_state_machine_start(&sci_req->sm);
sci_req->io_tag = io_tag;
sci_req->owning_controller = scic;
scu_smp_request_construct_task_context(sci_req, smp_req->req_len);
- sci_base_state_machine_change_state(&sci_req->state_machine,
- SCI_BASE_REQUEST_STATE_CONSTRUCTED);
+ sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED);
return SCI_SUCCESS;
}
u32 udma;
struct scic_sds_stp_pio_request {
- /**
- * Total transfer for the entire PIO request recorded at request constuction
- * time.
+ /*
+ * Total transfer for the entire PIO request recorded
+ * at request constuction time.
*
- * @todo Should we just decrement this value for each byte of data transitted
- * or received to elemenate the current_transfer_bytes field?
+ * @todo Should we just decrement this value for each
+ * byte of data transitted or received to elemenate
+ * the current_transfer_bytes field?
*/
u32 total_transfer_bytes;
- /**
- * Total number of bytes received/transmitted in data frames since the start
- * of the IO request. At the end of the IO request this should equal the
+ /*
+ * Total number of bytes received/transmitted in data
+ * frames since the start of the IO request. At the
+ * end of the IO request this should equal the
* total_transfer_bytes.
*/
u32 current_transfer_bytes;
- /**
- * The number of bytes requested in the in the PIO setup.
+ /*
+ * The number of bytes requested in the in the PIO
+ * setup.
*/
u32 pio_transfer_bytes;
- /**
- * PIO Setup ending status value to tell us if we need to wait for another FIS
- * or if the transfer is complete. On the receipt of a D2H FIS this will be
+ /*
+ * PIO Setup ending status value to tell us if we need
+ * to wait for another FIS or if the transfer is
+ * complete. On the receipt of a D2H FIS this will be
* the status field of that FIS.
*/
u8 ending_status;
- /**
- * On receipt of a D2H FIS this will be the ending error field if the
- * ending_status has the SATA_STATUS_ERR bit set.
+ /*
+ * On receipt of a D2H FIS this will be the ending
+ * error field if the ending_status has the
+ * SATA_STATUS_ERR bit set.
*/
u8 ending_error;
} pio;
struct {
- /**
- * The number of bytes requested in the PIO setup before CDB data frame.
+ /*
+ * The number of bytes requested in the PIO setup
+ * before CDB data frame.
*/
u32 device_preferred_cdb_length;
} packet;
};
struct scic_sds_request {
- /**
- * This field contains the information for the base request state machine.
+ /*
+ * This field contains the information for the base request state
+ * machine.
*/
- struct sci_base_state_machine state_machine;
+ struct sci_base_state_machine sm;
- /**
+ /*
* This field simply points to the controller to which this IO request
* is associated.
*/
struct scic_sds_controller *owning_controller;
- /**
- * This field simply points to the remote device to which this IO request
- * is associated.
+ /*
+ * This field simply points to the remote device to which this IO
+ * request is associated.
*/
struct scic_sds_remote_device *target_device;
- /**
+ /*
* This field is utilized to determine if the SCI user is managing
* the IO tag for this request or if the core is managing it.
*/
bool was_tag_assigned_by_user;
- /**
+ /*
* This field indicates the IO tag for this request. The IO tag is
* comprised of the task_index and a sequence count. The sequence count
* is utilized to help identify tasks from one life to another.
*/
u16 io_tag;
- /**
+ /*
* This field specifies the protocol being utilized for this
* IO request.
*/
enum sci_request_protocol protocol;
- /**
+ /*
* This field indicates the completion status taken from the SCUs
- * completion code. It indicates the completion result for the SCU hardware.
+ * completion code. It indicates the completion result for the SCU
+ * hardware.
*/
u32 scu_status;
- /**
- * This field indicates the completion status returned to the SCI user. It
- * indicates the users view of the io request completion.
+ /*
+ * This field indicates the completion status returned to the SCI user.
+ * It indicates the users view of the io request completion.
*/
u32 sci_status;
- /**
- * This field contains the value to be utilized when posting (e.g. Post_TC,
- * Post_TC_Abort) this request to the silicon.
+ /*
+ * This field contains the value to be utilized when posting
+ * (e.g. Post_TC, * Post_TC_Abort) this request to the silicon.
*/
u32 post_context;
#define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2)
struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
- /**
+ /*
* This field indicates if this request is a task management request or
* normal IO request.
*/
bool is_task_management_request;
- /**
- * This field is a pointer to the stored rx frame data. It is used in STP
- * internal requests and SMP response frames. If this field is non-NULL the
- * saved frame must be released on IO request completion.
+ /*
+ * This field is a pointer to the stored rx frame data. It is used in
+ * STP internal requests and SMP response frames. If this field is
+ * non-NULL the saved frame must be released on IO request completion.
*
* @todo In the future do we want to keep a list of RX frame buffers?
*/
u32 saved_rx_frame_index;
- /**
- * This field in the recorded device sequence for the io request. This is
- * recorded during the build operation and is compared in the start
- * operation. If the sequence is different then there was a change of
- * devices from the build to start operations.
+ /*
+ * This field in the recorded device sequence for the io request.
+ * This is recorded during the build operation and is compared in the
+ * start operation. If the sequence is different then there was a
+ * change of devices from the build to start operations.
*/
u8 device_sequence;
dma_addr_t request_daddr;
dma_addr_t zero_scatter_daddr;
- unsigned int num_sg_entries; /* returned by pci_alloc_sg */
+ unsigned int num_sg_entries; /* returned by pci_alloc_sg */
/** Note: "io_request_completion" is completed in two different ways
* depending on whether this is a TMF or regular request.
*
*/
enum sci_base_request_states {
- /**
+ /*
* Simply the initial state for the base request state machine.
*/
- SCI_BASE_REQUEST_STATE_INITIAL,
+ SCI_REQ_INIT,
- /**
- * This state indicates that the request has been constructed. This state
- * is entered from the INITIAL state.
+ /*
+ * This state indicates that the request has been constructed.
+ * This state is entered from the INITIAL state.
*/
- SCI_BASE_REQUEST_STATE_CONSTRUCTED,
+ SCI_REQ_CONSTRUCTED,
- /**
- * This state indicates that the request has been started. This state is
- * entered from the CONSTRUCTED state.
+ /*
+ * This state indicates that the request has been started. This state
+ * is entered from the CONSTRUCTED state.
*/
- SCI_BASE_REQUEST_STATE_STARTED,
+ SCI_REQ_STARTED,
- SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE,
- SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE,
+ SCI_REQ_STP_UDMA_WAIT_TC_COMP,
+ SCI_REQ_STP_UDMA_WAIT_D2H,
- SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE,
- SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE,
+ SCI_REQ_STP_NON_DATA_WAIT_H2D,
+ SCI_REQ_STP_NON_DATA_WAIT_D2H,
- SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE,
- SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE,
- SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE,
+ SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
+ SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
+ SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
- /**
- * While in this state the IO request object is waiting for the TC completion
- * notification for the H2D Register FIS
+ /*
+ * While in this state the IO request object is waiting for the TC
+ * completion notification for the H2D Register FIS
*/
- SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE,
+ SCI_REQ_STP_PIO_WAIT_H2D,
- /**
- * While in this state the IO request object is waiting for either a PIO Setup
- * FIS or a D2H register FIS. The type of frame received is based on the
- * result of the prior frame and line conditions.
+ /*
+ * While in this state the IO request object is waiting for either a
+ * PIO Setup FIS or a D2H register FIS. The type of frame received is
+ * based on the result of the prior frame and line conditions.
*/
- SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE,
+ SCI_REQ_STP_PIO_WAIT_FRAME,
- /**
- * While in this state the IO request object is waiting for a DATA frame from
- * the device.
+ /*
+ * While in this state the IO request object is waiting for a DATA
+ * frame from the device.
*/
- SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE,
+ SCI_REQ_STP_PIO_DATA_IN,
- /**
- * While in this state the IO request object is waiting to transmit the next data
- * frame to the device.
+ /*
+ * While in this state the IO request object is waiting to transmit
+ * the next data frame to the device.
*/
- SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE,
+ SCI_REQ_STP_PIO_DATA_OUT,
- /**
+ /*
* The AWAIT_TC_COMPLETION sub-state indicates that the started raw
* task management request is waiting for the transmission of the
* initial frame (i.e. command, task, etc.).
*/
- SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION,
+ SCI_REQ_TASK_WAIT_TC_COMP,
- /**
+ /*
* This sub-state indicates that the started task management request
* is waiting for the reception of an unsolicited frame
* (i.e. response IU).
*/
- SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE,
+ SCI_REQ_TASK_WAIT_TC_RESP,
- /**
+ /*
* This sub-state indicates that the started task management request
* is waiting for the reception of an unsolicited frame
* (i.e. response IU).
*/
- SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE,
+ SCI_REQ_SMP_WAIT_RESP,
- /**
- * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP request is
- * waiting for the transmission of the initial frame (i.e. command, task, etc.).
+ /*
+ * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
+ * request is waiting for the transmission of the initial frame
+ * (i.e. command, task, etc.).
*/
- SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION,
+ SCI_REQ_SMP_WAIT_TC_COMP,
- /**
+ /*
* This state indicates that the request has completed.
- * This state is entered from the STARTED state. This state is entered from
- * the ABORTING state.
+ * This state is entered from the STARTED state. This state is entered
+ * from the ABORTING state.
*/
- SCI_BASE_REQUEST_STATE_COMPLETED,
+ SCI_REQ_COMPLETED,
- /**
+ /*
* This state indicates that the request is in the process of being
* terminated/aborted.
* This state is entered from the CONSTRUCTED state.
* This state is entered from the STARTED state.
*/
- SCI_BASE_REQUEST_STATE_ABORTING,
+ SCI_REQ_ABORTING,
- /**
+ /*
* Simply the final state for the base request state machine.
*/
- SCI_BASE_REQUEST_STATE_FINAL,
+ SCI_REQ_FINAL,
};
/**
enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
-enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
- u32 event_code);
-enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
- u32 frame_index);
-enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
-extern enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req);
-extern enum sci_status scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code);
+enum sci_status
+scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
+ u32 event_code);
+enum sci_status
+scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
+ u32 frame_index);
+enum sci_status
+scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
+extern enum sci_status
+scic_sds_request_complete(struct scic_sds_request *sci_req);
+extern enum sci_status
+scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code);
/* XXX open code in caller */
static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req,
}
/* XXX open code in caller */
-static inline dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sci_req,
- void *virt_addr)
+static inline dma_addr_t
+scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr)
{
struct isci_request *ireq = sci_req_to_ireq(sci_req);
*
* status of the object as a isci_request_status enum.
*/
-static inline
-enum isci_request_status isci_request_get_state(
- struct isci_request *isci_request)
+static inline enum isci_request_status
+isci_request_get_state(struct isci_request *isci_request)
{
BUG_ON(isci_request == NULL);
* @status: This Parameter is the new status of the object
*
*/
-static inline enum isci_request_status isci_request_change_state(
- struct isci_request *isci_request,
- enum isci_request_status status)
+static inline enum isci_request_status
+isci_request_change_state(struct isci_request *isci_request,
+ enum isci_request_status status)
{
enum isci_request_status old_state;
unsigned long flags;
*
* state previous to any change.
*/
-static inline enum isci_request_status isci_request_change_started_to_newstate(
- struct isci_request *isci_request,
- struct completion *completion_ptr,
- enum isci_request_status newstate)
+static inline enum isci_request_status
+isci_request_change_started_to_newstate(struct isci_request *isci_request,
+ struct completion *completion_ptr,
+ enum isci_request_status newstate)
{
enum isci_request_status old_state;
unsigned long flags;
isci_request->io_request_completion = completion_ptr;
isci_request->status = newstate;
}
+
spin_unlock_irqrestore(&isci_request->state_lock, flags);
dev_dbg(&isci_request->isci_host->pdev->dev,
*
* state previous to any change.
*/
-static inline enum isci_request_status isci_request_change_started_to_aborted(
- struct isci_request *isci_request,
- struct completion *completion_ptr)
+static inline enum isci_request_status
+isci_request_change_started_to_aborted(struct isci_request *isci_request,
+ struct completion *completion_ptr)
{
- return isci_request_change_started_to_newstate(
- isci_request, completion_ptr, aborted
- );
+ return isci_request_change_started_to_newstate(isci_request,
+ completion_ptr,
+ aborted);
}
/**
* isci_request_free() - This function frees the request object.
* @isci_request: This parameter points to the isci_request object
*
*/
-static inline void isci_request_free(
- struct isci_host *isci_host,
- struct isci_request *isci_request)
+static inline void isci_request_free(struct isci_host *isci_host,
+ struct isci_request *isci_request)
{
if (!isci_request)
return;
/* release the dma memory if we fail. */
- dma_pool_free(isci_host->dma_pool, isci_request,
+ dma_pool_free(isci_host->dma_pool,
+ isci_request,
isci_request->request_daddr);
}
+#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
-/* #define ISCI_REQUEST_VALIDATE_ACCESS
- */
-
-#ifdef ISCI_REQUEST_VALIDATE_ACCESS
-
-static inline
-struct sas_task *isci_request_access_task(struct isci_request *isci_request)
-{
- BUG_ON(isci_request->ttype != io_task);
- return isci_request->ttype_ptr.io_task_ptr;
-}
-
-static inline
-struct isci_tmf *isci_request_access_tmf(struct isci_request *isci_request)
-{
- BUG_ON(isci_request->ttype != tmf_task);
- return isci_request->ttype_ptr.tmf_task_ptr;
-}
-
-#else /* not ISCI_REQUEST_VALIDATE_ACCESS */
-
-#define isci_request_access_task(RequestPtr) \
- ((RequestPtr)->ttype_ptr.io_task_ptr)
-
-#define isci_request_access_tmf(RequestPtr) \
- ((RequestPtr)->ttype_ptr.tmf_task_ptr)
-
-#endif /* not ISCI_REQUEST_VALIDATE_ACCESS */
-
+#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
-int isci_request_alloc_tmf(
- struct isci_host *isci_host,
- struct isci_tmf *isci_tmf,
- struct isci_request **isci_request,
- struct isci_remote_device *isci_device,
- gfp_t gfp_flags);
+int isci_request_alloc_tmf(struct isci_host *isci_host,
+ struct isci_tmf *isci_tmf,
+ struct isci_request **isci_request,
+ struct isci_remote_device *isci_device,
+ gfp_t gfp_flags);
-int isci_request_execute(
- struct isci_host *isci_host,
- struct sas_task *task,
- struct isci_request **request,
- gfp_t gfp_flags);
+int isci_request_execute(struct isci_host *isci_host,
+ struct sas_task *task,
+ struct isci_request **request,
+ gfp_t gfp_flags);
/**
* isci_request_unmap_sgl() - This function unmaps the DMA address of a given
* @*pdev: This Parameter is the pci_device struct for the controller
*
*/
-static inline void isci_request_unmap_sgl(
- struct isci_request *request,
- struct pci_dev *pdev)
+static inline void
+isci_request_unmap_sgl(struct isci_request *request, struct pci_dev *pdev)
{
struct sas_task *task = isci_request_access_task(request);
*
* pointer to the next sge for specified request.
*/
-static inline void *isci_request_io_request_get_next_sge(
- struct isci_request *request,
- void *current_sge_address)
+static inline void *
+isci_request_io_request_get_next_sge(struct isci_request *request,
+ void *current_sge_address)
{
struct sas_task *task = isci_request_access_task(request);
void *ret = NULL;
return ret;
}
-void isci_terminate_pending_requests(struct isci_host *isci_host,
- struct isci_remote_device *isci_device,
- enum isci_request_status new_request_state);
-enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
- struct scic_sds_remote_device *sci_dev,
- u16 io_tag,
- struct scic_sds_request *sci_req);
-enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
-enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req);
-void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
+void
+isci_terminate_pending_requests(struct isci_host *isci_host,
+ struct isci_remote_device *isci_device,
+ enum isci_request_status new_request_state);
+enum sci_status
+scic_task_request_construct(struct scic_sds_controller *scic,
+ struct scic_sds_remote_device *sci_dev,
+ u16 io_tag,
+ struct scic_sds_request *sci_req);
+enum sci_status
+scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
+enum sci_status
+scic_task_request_construct_sata(struct scic_sds_request *sci_req);
+void
+scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req);
#endif /* !defined(_ISCI_REQUEST_H_) */
sci_state_machine_exit_state(sm);
}
-/**
- * This method performs an update to the current state of the state machine.
- * @sm: This parameter specifies the state machine for which
- * the caller wishes to perform a state change.
- * @next_state: This parameter specifies the new state for the state machine.
- *
- */
-void sci_base_state_machine_change_state(
- struct sci_base_state_machine *sm,
- u32 next_state)
+void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
{
sci_state_machine_exit_state(sm);
sci_state_machine_enter_state(sm);
}
-
-/**
- * This method simply returns the current state of the state machine to the
- * caller.
- * @sm: This parameter specifies the state machine for which to
- * retrieve the current state.
- *
- * This method returns a u32 value indicating the current state for the
- * supplied state machine.
- */
-u32 sci_base_state_machine_get_state(struct sci_base_state_machine *sm)
-{
- return sm->current_state_id;
-}
-
u32 initial_state);
void sci_base_state_machine_start(struct sci_base_state_machine *sm);
void sci_base_state_machine_stop(struct sci_base_state_machine *sm);
-void sci_base_state_machine_change_state(struct sci_base_state_machine *sm,
- u32 next_state);
-u32 sci_base_state_machine_get_state(struct sci_base_state_machine *sm);
+void sci_change_state(struct sci_base_state_machine *sm, u32 next_state);
#endif /* _SCI_BASE_STATE_MACHINE_H_ */