static void sbp2util_mark_command_completed(struct sbp2_lu *lu,
struct sbp2_command_info *cmd)
{
- struct hpsb_host *host = lu->ud->ne->host;
-
- if (cmd->cmd_dma) {
- if (cmd->dma_type == CMD_DMA_SINGLE)
- dma_unmap_single(host->device.parent, cmd->cmd_dma,
- cmd->dma_size, cmd->dma_dir);
- else if (cmd->dma_type == CMD_DMA_PAGE)
- dma_unmap_page(host->device.parent, cmd->cmd_dma,
- cmd->dma_size, cmd->dma_dir);
- /* XXX: Check for CMD_DMA_NONE bug */
- cmd->dma_type = CMD_DMA_NONE;
- cmd->cmd_dma = 0;
- }
- if (cmd->sge_buffer) {
- dma_unmap_sg(host->device.parent, cmd->sge_buffer,
- cmd->dma_size, cmd->dma_dir);
- cmd->sge_buffer = NULL;
- }
+ if (scsi_sg_count(cmd->Current_SCpnt))
+ dma_unmap_sg(lu->ud->ne->host->device.parent,
+ scsi_sglist(cmd->Current_SCpnt),
+ scsi_sg_count(cmd->Current_SCpnt),
+ cmd->Current_SCpnt->sc_data_direction);
list_move_tail(&cmd->list, &lu->cmd_orb_completed);
}
#endif
}
+ if (dma_get_max_seg_size(hi->host->device.parent) > SBP2_MAX_SEG_SIZE)
+ BUG_ON(dma_set_max_seg_size(hi->host->device.parent,
+ SBP2_MAX_SEG_SIZE));
+
/* Prevent unloading of the 1394 host */
if (!try_module_get(hi->host->driver->owner)) {
SBP2_ERR("failed to get a reference on 1394 host driver");
static int sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
struct sbp2_fwhost_info *hi,
struct sbp2_command_info *cmd,
- unsigned int scsi_use_sg,
+ unsigned int sg_count,
struct scatterlist *sg,
u32 orb_direction,
enum dma_data_direction dma_dir)
{
struct device *dmadev = hi->host->device.parent;
+ struct sbp2_unrestricted_page_table *pt;
+ int i, n;
+
+ n = dma_map_sg(dmadev, sg, sg_count, dma_dir);
+ if (n == 0)
+ return -ENOMEM;
- cmd->dma_dir = dma_dir;
orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
orb->misc |= ORB_SET_DIRECTION(orb_direction);
/* special case if only one element (and less than 64KB in size) */
- if (scsi_use_sg == 1 && sg->length <= SBP2_MAX_SG_ELEMENT_LENGTH) {
-
- cmd->dma_size = sg->length;
- cmd->dma_type = CMD_DMA_PAGE;
- cmd->cmd_dma = dma_map_page(dmadev, sg_page(sg), sg->offset,
- cmd->dma_size, cmd->dma_dir);
- if (dma_mapping_error(dmadev, cmd->cmd_dma)) {
- cmd->cmd_dma = 0;
- return -ENOMEM;
- }
-
- orb->data_descriptor_lo = cmd->cmd_dma;
- orb->misc |= ORB_SET_DATA_SIZE(cmd->dma_size);
-
+ if (n == 1) {
+ orb->misc |= ORB_SET_DATA_SIZE(sg_dma_len(sg));
+ orb->data_descriptor_lo = sg_dma_address(sg);
} else {
- struct sbp2_unrestricted_page_table *sg_element =
- &cmd->scatter_gather_element[0];
- u32 sg_count, sg_len;
- dma_addr_t sg_addr;
- int i, count = dma_map_sg(dmadev, sg, scsi_use_sg, dma_dir);
-
- cmd->dma_size = scsi_use_sg;
- cmd->sge_buffer = sg;
-
- /* use page tables (s/g) */
- orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
- orb->data_descriptor_lo = cmd->sge_dma;
+ pt = &cmd->scatter_gather_element[0];
dma_sync_single_for_cpu(dmadev, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
DMA_TO_DEVICE);
- /* loop through and fill out our SBP-2 page tables
- * (and split up anything too large) */
- for (i = 0, sg_count = 0; i < count; i++, sg = sg_next(sg)) {
- sg_len = sg_dma_len(sg);
- sg_addr = sg_dma_address(sg);
- while (sg_len) {
- sg_element[sg_count].segment_base_lo = sg_addr;
- if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
- sg_element[sg_count].length_segment_base_hi =
- PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
- sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
- sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
- } else {
- sg_element[sg_count].length_segment_base_hi =
- PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
- sg_len = 0;
- }
- sg_count++;
- }
+ for_each_sg(sg, sg, n, i) {
+ pt[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
+ pt[i].low = cpu_to_be32(sg_dma_address(sg));
}
- orb->misc |= ORB_SET_DATA_SIZE(sg_count);
-
- sbp2util_cpu_to_be32_buffer(sg_element,
- (sizeof(struct sbp2_unrestricted_page_table)) *
- sg_count);
+ orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1) |
+ ORB_SET_DATA_SIZE(n);
+ orb->data_descriptor_lo = cmd->sge_dma;
dma_sync_single_for_device(dmadev, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
sdev->start_stop_pwr_cond = 1;
if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
+
+ blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
return 0;
}
u32 status_fifo_lo;
} __attribute__((packed));
-#define PAGE_TABLE_SET_SEGMENT_BASE_HI(v) ((v) & 0xffff)
-#define PAGE_TABLE_SET_SEGMENT_LENGTH(v) (((v) & 0xffff) << 16)
-
struct sbp2_unrestricted_page_table {
- u32 length_segment_base_hi;
- u32 segment_base_lo;
-} __attribute__((packed));
+ __be32 high;
+ __be32 low;
+};
#define RESP_STATUS_REQUEST_COMPLETE 0x0
#define RESP_STATUS_TRANSPORT_FAILURE 0x1
#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
#define SBP2_SW_VERSION_ENTRY 0x00010483
-
/*
- * SCSI specific definitions
+ * The default maximum s/g segment size of a FireWire controller is
+ * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
+ * be quadlet-aligned, we set the length limit to 0xffff & ~3.
*/
+#define SBP2_MAX_SEG_SIZE 0xfffc
-#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
-/* There is no real limitation of the queue depth (i.e. length of the linked
+/*
+ * There is no real limitation of the queue depth (i.e. length of the linked
* list of command ORBs) at the target. The chosen depth is merely an
- * implementation detail of the sbp2 driver. */
+ * implementation detail of the sbp2 driver.
+ */
#define SBP2_MAX_CMDS 8
#define SBP2_SCSI_STATUS_GOOD 0x0
* Representations of commands and devices
*/
-enum sbp2_dma_types {
- CMD_DMA_NONE,
- CMD_DMA_PAGE,
- CMD_DMA_SINGLE
-};
-
/* Per SCSI command */
struct sbp2_command_info {
struct list_head list;
struct sbp2_unrestricted_page_table
scatter_gather_element[SG_ALL] __attribute__((aligned(8)));
dma_addr_t sge_dma;
- void *sge_buffer;
- dma_addr_t cmd_dma;
- enum sbp2_dma_types dma_type;
- unsigned long dma_size;
- enum dma_data_direction dma_dir;
};
/* Per FireWire host */