void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, dma_addr_t phys_addr)
{
- struct isci_request *ireq = sci_req->ireq;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
dma_addr_t offset;
BUG_ON(phys_addr < ireq->request_daddr);
offset = phys_addr - ireq->request_daddr;
- BUG_ON(offset >= ireq->request_alloc_size);
+ BUG_ON(offset >= sizeof(*ireq));
return (char *)ireq + offset;
}
dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sds_request,
void *virt_addr)
{
- struct isci_request *isci_request = sds_request->ireq;
+ struct isci_request *isci_request = sci_req_to_ireq(sds_request);
char *requested_addr = (char *)virt_addr;
char *base_addr = (char *)isci_request;
BUG_ON(requested_addr < base_addr);
- BUG_ON((requested_addr - base_addr) >=
- isci_request->request_alloc_size);
+ BUG_ON((requested_addr - base_addr) >= sizeof(*isci_request));
return isci_request->request_daddr + (requested_addr - base_addr);
}
| ((char_buffer)[3]) \
)
-#define SCI_FIELD_OFFSET(type, field) ((unsigned long)&(((type *)0)->field))
-
-
#define sci_cb_make_physical_address(physical_addr, addr_upper, addr_lower) \
((physical_addr) = (addr_lower) | ((u64)addr_upper) << 32)
} SCIC_TRANSPORT_PROTOCOL;
-
-/**
- * scic_io_request_construct() - This method is called by the SCI user to
- * construct all SCI Core IO requests. Memory initialization and
- * functionality common to all IO request types is performed in this method.
- * @scic_controller: the handle to the core controller object for which to
- * build an IO request.
- * @scic_remote_device: the handle to the core remote device object for which
- * to build an IO request.
- * @io_tag: This parameter specifies the IO tag to be associated with this
- * request. If SCI_CONTROLLER_INVALID_IO_TAG is passed, then a copy of the
- * request is built internally. The request will be copied into the actual
- * controller request memory when the IO tag is allocated internally during
- * the scic_controller_start_io() method.
- * @user_io_request_object: This parameter specifies the user IO request to be
- * utilized during IO construction. This IO pointer will become the
- * associated object for the core IO request object.
- * @scic_io_request_memory: This parameter specifies the memory location to be
- * utilized when building the core request.
- * @new_scic_io_request_handle: This parameter specifies a pointer to the
- * handle the core will expect in further interactions with the core IO
- * request object.
- *
- * The SCI core implementation will create an association between the user IO
- * request object and the core IO request object. Indicate if the controller
- * successfully built the IO request. SCI_SUCCESS This value is returned if the
- * IO request was successfully built.
- */
enum sci_status scic_io_request_construct(
struct scic_sds_controller *scic_controller,
struct scic_sds_remote_device *scic_remote_device,
- u16 io_tag,
- void *user_io_request_object,
- struct scic_sds_request *scic_io_request_memory,
- struct scic_sds_request **new_scic_io_request_handle);
+ u16 io_tag, struct scic_sds_request *sci_req);
/**
* scic_io_request_construct_basic_ssp() - This method is called by the SCI
memcpy(task_context_buffer,
sci_req->task_context_buffer,
- SCI_FIELD_OFFSET(struct scu_task_context, sgl_snapshot_ac));
+ offsetof(struct scu_task_context, sgl_snapshot_ac));
/*
* Now that the soft copy of the TC has been copied into the TC
*/
void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
{
- struct isci_request *isci_request = sds_request->ireq;
+ struct isci_request *isci_request = sci_req_to_ireq(sds_request);
struct isci_host *isci_host = isci_request->isci_host;
struct sas_task *task = isci_request_access_task(isci_request);
struct scatterlist *sg = NULL;
static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
{
struct ssp_cmd_iu *cmd_iu;
- struct isci_request *ireq = sci_req->ireq;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
struct sas_task *task = isci_request_access_task(ireq);
cmd_iu = &sci_req->ssp.cmd;
static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
{
struct ssp_task_iu *task_iu;
- struct isci_request *ireq = sci_req->ireq;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
struct sas_task *task = isci_request_access_task(ireq);
struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
bool copy)
{
enum sci_status status = SCI_SUCCESS;
- struct isci_request *ireq = sci_req->ireq;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
struct sas_task *task = isci_request_access_task(ireq);
/* check for management protocols */
enum sci_status scic_io_request_construct_basic_ssp(
struct scic_sds_request *sci_req)
{
- struct isci_request *ireq = sci_req->ireq;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
struct sas_task *task = isci_request_access_task(ireq);
sci_req->protocol = SCIC_SSP_PROTOCOL;
enum sci_status status;
struct scic_sds_stp_request *stp_req;
bool copy = false;
- struct isci_request *isci_request = sci_req->ireq;
+ struct isci_request *isci_request = sci_req_to_ireq(sci_req);
struct sas_task *task = isci_request_access_task(isci_request);
stp_req = &sci_req->stp.req;
}
-enum sci_status scic_task_request_construct_sata(
- struct scic_sds_request *sci_req)
+enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
{
enum sci_status status = SCI_SUCCESS;
- struct isci_request *ireq = sci_req->ireq;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
/* check for management protocols */
if (ireq->ttype == tmf_task) {
void *resp_buf;
u32 len;
struct ssp_response_iu *ssp_response;
- struct isci_request *ireq = sci_req->ireq;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
ssp_response = &sci_req->ssp.rsp;
struct scic_sds_controller *scic =
scic_sds_request_get_controller(sci_req);
struct isci_host *ihost = scic_to_ihost(scic);
- struct isci_request *ireq = sci_req->ireq;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
SET_STATE_HANDLER(sci_req,
scic_sds_request_state_handler_table,
static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
struct scic_sds_remote_device *sci_dev,
- u16 io_tag,
- void *user_io_request_object,
- struct scic_sds_request *sci_req)
+ u16 io_tag, struct scic_sds_request *sci_req)
{
sci_base_state_machine_construct(&sci_req->state_machine, sci_req,
scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL);
sci_base_state_machine_start(&sci_req->state_machine);
sci_req->io_tag = io_tag;
- sci_req->user_request = user_io_request_object;
sci_req->owning_controller = scic;
sci_req->target_device = sci_dev;
sci_req->has_started_substate_machine = false;
enum sci_status
scic_io_request_construct(struct scic_sds_controller *scic,
struct scic_sds_remote_device *sci_dev,
- u16 io_tag,
- void *user_req,
- struct scic_sds_request *sci_req,
- struct scic_sds_request **new_sci_req)
+ u16 io_tag, struct scic_sds_request *sci_req)
{
struct domain_device *dev = sci_dev_to_domain(sci_dev);
enum sci_status status = SCI_SUCCESS;
/* Build the common part of the request */
- scic_sds_general_request_construct(scic,
- sci_dev,
- io_tag,
- user_req,
- sci_req);
+ scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
if (sci_dev->rnc.remote_node_index ==
SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
if (status == SCI_SUCCESS) {
- memset(sci_req->task_context_buffer,
- 0,
- SCI_FIELD_OFFSET(struct scu_task_context, sgl_pair_ab));
- *new_sci_req = sci_req;
+ memset(sci_req->task_context_buffer, 0,
+ offsetof(struct scu_task_context, sgl_pair_ab));
}
return status;
enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
struct scic_sds_remote_device *sci_dev,
- u16 io_tag,
- void *user_io_request_object,
- struct scic_sds_request *sci_req,
- struct scic_sds_request **new_sci_req)
+ u16 io_tag, struct scic_sds_request *sci_req)
{
struct domain_device *dev = sci_dev_to_domain(sci_dev);
enum sci_status status = SCI_SUCCESS;
/* Build the common part of the request */
- scic_sds_general_request_construct(scic, sci_dev, io_tag,
- user_io_request_object,
- sci_req);
+ scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
if (dev->dev_type == SAS_END_DEV) {
scic_sds_ssp_task_request_assign_buffers(sci_req);
if (status == SCI_SUCCESS) {
sci_req->is_task_management_request = true;
memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
- *new_sci_req = sci_req;
}
return status;
SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION,
};
-struct isci_request;
-/**
- * struct scic_sds_request - This structure contains or references all of
- * the data necessary to process a task management or normal IO request.
- *
- *
- */
struct scic_sds_request {
- /**
- * The field specifies that the peer object for the request object.
- */
- struct isci_request *ireq;
-
/**
* This field contains the information for the base request state machine.
*/
struct sci_base_state_machine state_machine;
- void *user_request;
-
/**
* This field simply points to the controller to which this IO request
* is associated.
int total_len = len;
sci_req = to_sci_req(stp_req);
- ireq = scic_sds_request_get_user_request(sci_req);
+ ireq = sci_req_to_ireq(sci_req);
task = isci_request_access_task(ireq);
src_addr = data_buf;
{
struct scic_sds_controller *scic = sci_req->owning_controller;
struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
- struct isci_request *ireq = sci_req->ireq;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
struct sas_task *task = isci_request_access_task(ireq);
struct dev_to_host_fis *frame_header;
enum sci_status status;
struct scic_sds_controller;
-/**
- * scic_task_request_construct() - This method is called by the SCI user to
- * construct all SCI Core task management requests, regardless of protocol.
- * Memory initialization and functionality common to all task request types
- * is performed in this method.
- * @scic_controller: the handle to the core controller object for which to
- * build the task managmement request.
- * @scic_remote_device: the handle to the core remote device object for which
- * to build the task management request. passed, then a copy of the request
- * is built internally. The request will be copied into the actual
- * controller request memory when the task is allocated internally during
- * the scic_controller_start_task() method.
- * @io_tag: This parameter specifies the IO tag to be associated with this
- * request. If SCI_CONTROLLER_INVALID_IO_TAG is passed, then a copy of the
- * request is built internally. The request will be copied into the actual
- * controller request memory when the IO tag is allocated internally during
- * the scic_controller_start_io() method.
- * @user_task_request_object: This parameter specifies the user task request to
- * be utilized during construction. This task pointer will become the
- * associated object for the core task request object.
- * @scic_task_request_memory: This parameter specifies the memory location to
- * be utilized when building the core request.
- * @new_scic_task_request_handle: This parameter specifies a pointer to the
- * handle the core will expect in further interactions with the core task
- * request object.
- *
- * The SCI core implementation will create an association between the user task
- * request object and the core task request object. Indicate if the controller
- * successfully built the task request. SCI_SUCCESS This value is returned if
- * the task request was successfully built.
- */
enum sci_status scic_task_request_construct(
struct scic_sds_controller *scic_controller,
struct scic_sds_remote_device *scic_remote_device,
- u16 io_tag,
- void *user_task_request_object,
- void *scic_task_request_memory,
- struct scic_sds_request **new_scic_task_request_handle);
+ u16 io_tag, struct scic_sds_request *sci_req);
/**
* scic_task_request_construct_ssp() - This method is called by the SCI user to
if (err)
return err;
- /*
- * keep the pool alloc size around, will use it for a bounds checking
- * when trying to convert virtual addresses to physical addresses
- */
- isci_host->dma_pool_alloc_size = sizeof(struct isci_request) +
- sizeof(struct scic_sds_request);
isci_host->dma_pool = dmam_pool_create(DRV_NAME, &isci_host->pdev->dev,
- isci_host->dma_pool_alloc_size,
+ sizeof(struct isci_request),
SLAB_HWCACHE_ALIGN, 0);
if (!isci_host->dma_pool)
struct list_head timers;
void *core_ctrl_memory;
struct dma_pool *dma_pool;
- unsigned int dma_pool_alloc_size;
struct isci_phy phys[SCI_MAX_PHYS];
struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
struct sas_ha_struct sas_ha;
struct sci_base_state_machine *sm = &sci_dev->state_machine;
enum scic_sds_remote_device_states state = sm->current_state_id;
struct scic_sds_port *sci_port = sci_dev->owning_port;
- struct isci_request *ireq = sci_req->ireq;
+ struct isci_request *ireq = sci_req_to_ireq(sci_req);
enum sci_status status;
switch (state) {
"%s: request = %p\n",
__func__,
request);
- status = scic_io_request_construct_basic_ssp(
- request->sci_request_handle
- );
+ status = scic_io_request_construct_basic_ssp(&request->sci);
return status;
}
*/
register_fis = isci_sata_task_to_fis_copy(task);
- status = scic_io_request_construct_basic_sata(
- request->sci_request_handle
- );
+ status = scic_io_request_construct_basic_sata(&request->sci);
/* Set the ncq tag in the fis, from the queue
* command in the task.
{
enum sci_status status = SCI_FAILURE;
struct sas_task *task = isci_request_access_task(ireq);
- struct scic_sds_request *sci_req = ireq->sci_request_handle;
+ struct scic_sds_request *sci_req = &ireq->sci;
dev_dbg(&ireq->isci_host->pdev->dev,
"%s: request = %p\n", __func__, ireq);
*/
status = scic_io_request_construct(&isci_host->sci, sci_device,
SCI_CONTROLLER_INVALID_IO_TAG,
- request, request->sci_req,
- &request->sci_request_handle);
+ &request->sci);
if (status != SCI_SUCCESS) {
dev_warn(&isci_host->pdev->dev,
return SCI_FAILURE;
}
- request->sci_request_handle->ireq = request;
-
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
status = isci_smp_request_build(request);
request->isci_host = isci_host;
request->isci_device = isci_device;
request->io_request_completion = NULL;
+ request->terminated = false;
- request->request_alloc_size = isci_host->dma_pool_alloc_size;
request->num_sg_entries = 0;
request->complete_in_target = false;
goto out;
status = isci_io_request_build(isci_host, request, isci_device);
- if (status == SCI_SUCCESS) {
-
- spin_lock_irqsave(&isci_host->scic_lock, flags);
-
- /* send the request, let the core assign the IO TAG. */
- status = scic_controller_start_io(
- &isci_host->sci,
- sci_device,
- request->sci_request_handle,
- SCI_CONTROLLER_INVALID_IO_TAG
- );
-
- if (status == SCI_SUCCESS ||
- status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
-
- /* Either I/O started OK, or the core has signaled that
- * the device needs a target reset.
- *
- * In either case, hold onto the I/O for later.
- *
- * Update it's status and add it to the list in the
- * remote device object.
- */
- isci_request_change_state(request, started);
- list_add(&request->dev_node,
- &isci_device->reqs_in_process);
-
- if (status == SCI_SUCCESS) {
- /* Save the tag for possible task mgmt later. */
- request->io_tag = scic_io_request_get_io_tag(
- request->sci_request_handle);
- } else {
- /* The request did not really start in the
- * hardware, so clear the request handle
- * here so no terminations will be done.
- */
- request->sci_request_handle = NULL;
- }
+ if (status != SCI_SUCCESS) {
+ dev_warn(&isci_host->pdev->dev,
+ "%s: request_construct failed - status = 0x%x\n",
+ __func__,
+ status);
+ goto out;
+ }
- } else
- dev_warn(&isci_host->pdev->dev,
- "%s: failed request start (0x%x)\n",
- __func__, status);
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+ /* send the request, let the core assign the IO TAG. */
+ status = scic_controller_start_io(&isci_host->sci, sci_device,
+ &request->sci,
+ SCI_CONTROLLER_INVALID_IO_TAG);
+ if (status != SCI_SUCCESS &&
+ status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ dev_warn(&isci_host->pdev->dev,
+ "%s: failed request start (0x%x)\n",
+ __func__, status);
spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ goto out;
+ }
- if (status ==
- SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
- /* Signal libsas that we need the SCSI error
- * handler thread to work on this I/O and that
- * we want a device reset.
- */
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- /* Cause this task to be scheduled in the SCSI error
- * handler thread.
- */
- isci_execpath_callback(isci_host, task,
- sas_task_abort);
-
- /* Change the status, since we are holding
- * the I/O until it is managed by the SCSI
- * error handler.
- */
- status = SCI_SUCCESS;
- }
+ /* Either I/O started OK, or the core has signaled that
+ * the device needs a target reset.
+ *
+ * In either case, hold onto the I/O for later.
+ *
+ * Update it's status and add it to the list in the
+ * remote device object.
+ */
+ isci_request_change_state(request, started);
+ list_add(&request->dev_node, &isci_device->reqs_in_process);
- } else
- dev_warn(&isci_host->pdev->dev,
- "%s: request_construct failed - status = 0x%x\n",
- __func__,
- status);
+ if (status == SCI_SUCCESS) {
+ /* Save the tag for possible task mgmt later. */
+ request->io_tag = scic_io_request_get_io_tag(&request->sci);
+ } else {
+ /* The request did not really start in the
+ * hardware, so clear the request handle
+ * here so no terminations will be done.
+ */
+ request->terminated = true;
+ }
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ if (status ==
+ SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ /* Signal libsas that we need the SCSI error
+ * handler thread to work on this I/O and that
+ * we want a device reset.
+ */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* Cause this task to be scheduled in the SCSI error
+ * handler thread.
+ */
+ isci_execpath_callback(isci_host, task,
+ sas_task_abort);
+
+ /* Change the status, since we are holding
+ * the I/O until it is managed by the SCSI
+ * error handler.
+ */
+ status = SCI_SUCCESS;
+ }
out:
if (status != SCI_SUCCESS) {
{
unsigned int cstatus;
- cstatus = scic_request_get_controller_status(
- request->sci_request_handle
- );
+ cstatus = scic_request_get_controller_status(&request->sci);
dev_dbg(&request->isci_host->pdev->dev,
"%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
task);
if (sas_protocol_ata(task->task_proto)) {
- resp_buf = &request->sci_request_handle->stp.rsp;
+ resp_buf = &request->sci.stp.rsp;
isci_request_process_stp_response(task,
resp_buf);
} else if (SAS_PROTOCOL_SSP == task->task_proto) {
/* crack the iu response buffer. */
- resp_iu = &request->sci_request_handle->ssp.rsp;
+ resp_iu = &request->sci.ssp.rsp;
isci_request_process_response_iu(task, resp_iu,
&isci_host->pdev->dev);
request->complete_in_target = true;
if (task->task_proto == SAS_PROTOCOL_SMP) {
- void *rsp = &request->sci_request_handle->smp.rsp;
+ void *rsp = &request->sci.smp.rsp;
dev_dbg(&isci_host->pdev->dev,
"%s: SMP protocol completion\n",
* the maximum was transferred.
*/
u32 transferred_length
- = scic_io_request_get_number_of_bytes_transferred(
- request->sci_request_handle);
+ = scic_io_request_get_number_of_bytes_transferred(&request->sci);
task->task_status.residual
= task->total_xfer_len - transferred_length;
/* complete the io request to the core. */
scic_controller_complete_io(&isci_host->sci,
&isci_device->sci,
- request->sci_request_handle);
- /* NULL the request handle so it cannot be completed or
+ &request->sci);
+ /* set terminated handle so it cannot be completed or
* terminated again, and to cause any calls into abort
* task to recognize the already completed case.
*/
- request->sci_request_handle = NULL;
+ request->terminated = true;
isci_host_can_dequeue(isci_host, 1);
}
};
struct isci_request {
- struct scic_sds_request *sci_request_handle;
enum isci_request_status status;
enum task_type ttype;
unsigned short io_tag;
bool complete_in_target;
+ bool terminated;
union ttype_ptr_union {
struct sas_task *io_task_ptr; /* When ttype==io_task */
dma_addr_t zero_scatter_daddr;
unsigned int num_sg_entries; /* returned by pci_alloc_sg */
- unsigned int request_alloc_size; /* size of block from dma_pool_alloc */
/** Note: "io_request_completion" is completed in two different ways
* depending on whether this is a TMF or regular request.
* TMF was aborting is guaranteed to have completed.
*/
struct completion *io_request_completion;
- struct scic_sds_request sci_req[0] ____cacheline_aligned;
+ struct scic_sds_request sci;
};
+static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
+{
+ struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci);
+
+ return ireq;
+}
+
/**
* This function gets the status of the request object.
* @request: This parameter points to the isci_request object
struct host_to_dev_fis *isci_sata_task_to_fis_copy(struct sas_task *task)
{
struct isci_request *ireq = task->lldd_task;
- struct host_to_dev_fis *fis = &ireq->sci_request_handle->stp.cmd;
+ struct host_to_dev_fis *fis = &ireq->sci.stp.cmd;
memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
struct isci_request *request = task->lldd_task;
register_fis->sector_count = qc->tag << 3;
- scic_stp_io_request_set_ncq_tag(request->sci_request_handle, qc->tag);
+ scic_stp_io_request_set_ncq_tag(&request->sci, qc->tag);
}
/**
enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
{
- struct scic_sds_request *sci_req = ireq->sci_request_handle;
+ struct scic_sds_request *sci_req = &ireq->sci;
struct isci_tmf *isci_tmf;
enum sci_status status;
/* core builds the protocol specific request
* based on the h2d fis.
*/
- status = scic_task_request_construct_sata(
- ireq->sci_request_handle
- );
+ status = scic_task_request_construct_sata(&ireq->sci);
return status;
}
/* let the core do it's construct. */
status = scic_task_request_construct(&isci_host->sci, sci_device,
SCI_CONTROLLER_INVALID_IO_TAG,
- request, &request->sci_req,
- &request->sci_request_handle);
+ &request->sci);
if (status != SCI_SUCCESS) {
dev_warn(&isci_host->pdev->dev,
goto errout;
}
- request->sci_request_handle->ireq = request;
-
/* XXX convert to get this from task->tproto like other drivers */
if (dev->dev_type == SAS_END_DEV) {
isci_tmf->proto = SAS_PROTOCOL_SSP;
- status = scic_task_request_construct_ssp(
- request->sci_request_handle
- );
+ status = scic_task_request_construct_ssp(&request->sci);
if (status != SCI_SUCCESS)
goto errout;
}
status = scic_controller_terminate_request(
&request->isci_host->sci,
&request->isci_device->sci,
- request->sci_request_handle
- );
+ &request->sci);
dev_dbg(&request->isci_host->pdev->dev,
"%s: tmf_request = %p; tmf = %p; status = %d\n",
status = scic_controller_start_task(
&isci_host->sci,
sci_device,
- request->sci_request_handle,
- SCI_CONTROLLER_INVALID_IO_TAG
- );
+ &request->sci,
+ SCI_CONTROLLER_INVALID_IO_TAG);
if (status != SCI_TASK_SUCCESS) {
dev_warn(&isci_host->pdev->dev,
* device condition (if the request handle is NULL, then the
* request completed but needed additional handling here).
*/
- if (isci_request->sci_request_handle != NULL) {
+ if (!isci_request->terminated) {
was_terminated = true;
needs_cleanup_handling = true;
status = scic_controller_terminate_request(
&isci_host->sci,
&isci_device->sci,
- isci_request->sci_request_handle);
+ &isci_request->sci);
}
spin_unlock_irqrestore(&isci_host->scic_lock, flags);
enum isci_request_status old_state;
struct isci_tmf *tmf = isci_request_access_tmf(ireq);
struct completion *tmf_complete;
- struct scic_sds_request *sci_req = ireq->sci_request_handle;
+ struct scic_sds_request *sci_req = &ireq->sci;
dev_dbg(&ihost->pdev->dev,
"%s: request = %p, status=%d\n",
/* PRINT_TMF( ((struct isci_tmf *)request->task)); */
tmf_complete = tmf->complete;
- scic_controller_complete_io(&ihost->sci, &idev->sci,
- ireq->sci_request_handle);
- /* NULL the request handle to make sure it cannot be terminated
+ scic_controller_complete_io(&ihost->sci, &idev->sci, &ireq->sci);
+ /* set the 'terminated' flag handle to make sure it cannot be terminated
* or completed again.
*/
- ireq->sci_request_handle = NULL;
+ ireq->terminated = true;;
isci_request_change_state(ireq, unallocated);
list_del_init(&ireq->dev_node);