libata-sff: ata_sff_[dumb_]qc_prep are BMDMA specific
authorTejun Heo <tj@kernel.org>
Mon, 10 May 2010 19:41:40 +0000 (21:41 +0200)
committerJeff Garzik <jgarzik@redhat.com>
Wed, 19 May 2010 17:36:50 +0000 (13:36 -0400)
Both qc_prep functions deal only with BMDMA PRD setup and PIO only SFF
drivers don't need them.  Rename to ata_bmdma_[dumb_]qc_prep() and
relocate.

All usages are renamed except for pdc_adma and sata_qstor.  Those two
drivers are not BMDMA drivers and don't need to call BMDMA qc_prep
functions.  Calls to ata_sff_qc_prep() in the two drivers are removed.

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
drivers/ata/libata-sff.c
drivers/ata/pata_atiixp.c
drivers/ata/pata_cs5520.c
drivers/ata/pata_cs5530.c
drivers/ata/pata_sc1200.c
drivers/ata/pdc_adma.c
drivers/ata/sata_nv.c
drivers/ata/sata_qstor.c
include/linux/libata.h

index aa378c04ed873a2cd4a82c77e66efd0b871a24e1..a58693bdde9de4cf8ebdda5947032ec564d68677 100644 (file)
@@ -45,7 +45,7 @@ static struct workqueue_struct *ata_sff_wq;
 const struct ata_port_operations ata_sff_port_ops = {
        .inherits               = &ata_base_port_ops,
 
-       .qc_prep                = ata_sff_qc_prep,
+       .qc_prep                = ata_noop_qc_prep,
        .qc_issue               = ata_sff_qc_issue,
        .qc_fill_rtf            = ata_sff_qc_fill_rtf,
 
@@ -70,149 +70,6 @@ const struct ata_port_operations ata_sff_port_ops = {
 };
 EXPORT_SYMBOL_GPL(ata_sff_port_ops);
 
-/**
- *     ata_fill_sg - Fill PCI IDE PRD table
- *     @qc: Metadata associated with taskfile to be transferred
- *
- *     Fill PCI IDE PRD (scatter-gather) table with segments
- *     associated with the current disk command.
- *
- *     LOCKING:
- *     spin_lock_irqsave(host lock)
- *
- */
-static void ata_fill_sg(struct ata_queued_cmd *qc)
-{
-       struct ata_port *ap = qc->ap;
-       struct scatterlist *sg;
-       unsigned int si, pi;
-
-       pi = 0;
-       for_each_sg(qc->sg, sg, qc->n_elem, si) {
-               u32 addr, offset;
-               u32 sg_len, len;
-
-               /* determine if physical DMA addr spans 64K boundary.
-                * Note h/w doesn't support 64-bit, so we unconditionally
-                * truncate dma_addr_t to u32.
-                */
-               addr = (u32) sg_dma_address(sg);
-               sg_len = sg_dma_len(sg);
-
-               while (sg_len) {
-                       offset = addr & 0xffff;
-                       len = sg_len;
-                       if ((offset + sg_len) > 0x10000)
-                               len = 0x10000 - offset;
-
-                       ap->prd[pi].addr = cpu_to_le32(addr);
-                       ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
-                       VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
-
-                       pi++;
-                       sg_len -= len;
-                       addr += len;
-               }
-       }
-
-       ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-}
-
-/**
- *     ata_fill_sg_dumb - Fill PCI IDE PRD table
- *     @qc: Metadata associated with taskfile to be transferred
- *
- *     Fill PCI IDE PRD (scatter-gather) table with segments
- *     associated with the current disk command. Perform the fill
- *     so that we avoid writing any length 64K records for
- *     controllers that don't follow the spec.
- *
- *     LOCKING:
- *     spin_lock_irqsave(host lock)
- *
- */
-static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
-{
-       struct ata_port *ap = qc->ap;
-       struct scatterlist *sg;
-       unsigned int si, pi;
-
-       pi = 0;
-       for_each_sg(qc->sg, sg, qc->n_elem, si) {
-               u32 addr, offset;
-               u32 sg_len, len, blen;
-
-               /* determine if physical DMA addr spans 64K boundary.
-                * Note h/w doesn't support 64-bit, so we unconditionally
-                * truncate dma_addr_t to u32.
-                */
-               addr = (u32) sg_dma_address(sg);
-               sg_len = sg_dma_len(sg);
-
-               while (sg_len) {
-                       offset = addr & 0xffff;
-                       len = sg_len;
-                       if ((offset + sg_len) > 0x10000)
-                               len = 0x10000 - offset;
-
-                       blen = len & 0xffff;
-                       ap->prd[pi].addr = cpu_to_le32(addr);
-                       if (blen == 0) {
-                               /* Some PATA chipsets like the CS5530 can't
-                                  cope with 0x0000 meaning 64K as the spec
-                                  says */
-                               ap->prd[pi].flags_len = cpu_to_le32(0x8000);
-                               blen = 0x8000;
-                               ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
-                       }
-                       ap->prd[pi].flags_len = cpu_to_le32(blen);
-                       VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
-
-                       pi++;
-                       sg_len -= len;
-                       addr += len;
-               }
-       }
-
-       ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-}
-
-/**
- *     ata_sff_qc_prep - Prepare taskfile for submission
- *     @qc: Metadata associated with taskfile to be prepared
- *
- *     Prepare ATA taskfile for submission.
- *
- *     LOCKING:
- *     spin_lock_irqsave(host lock)
- */
-void ata_sff_qc_prep(struct ata_queued_cmd *qc)
-{
-       if (!(qc->flags & ATA_QCFLAG_DMAMAP))
-               return;
-
-       ata_fill_sg(qc);
-}
-EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
-
-/**
- *     ata_sff_dumb_qc_prep - Prepare taskfile for submission
- *     @qc: Metadata associated with taskfile to be prepared
- *
- *     Prepare ATA taskfile for submission.
- *
- *     LOCKING:
- *     spin_lock_irqsave(host lock)
- */
-void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc)
-{
-       if (!(qc->flags & ATA_QCFLAG_DMAMAP))
-               return;
-
-       ata_fill_sg_dumb(qc);
-}
-EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
-
 /**
  *     ata_sff_check_status - Read device status reg & clear interrupt
  *     @ap: port where the device is
@@ -2760,6 +2617,8 @@ const struct ata_port_operations ata_bmdma_port_ops = {
        .error_handler          = ata_bmdma_error_handler,
        .post_internal_cmd      = ata_bmdma_post_internal_cmd,
 
+       .qc_prep                = ata_bmdma_qc_prep,
+
        .bmdma_setup            = ata_bmdma_setup,
        .bmdma_start            = ata_bmdma_start,
        .bmdma_stop             = ata_bmdma_stop,
@@ -2777,6 +2636,149 @@ const struct ata_port_operations ata_bmdma32_port_ops = {
 };
 EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
 
+/**
+ *     ata_bmdma_fill_sg - Fill PCI IDE PRD table
+ *     @qc: Metadata associated with taskfile to be transferred
+ *
+ *     Fill PCI IDE PRD (scatter-gather) table with segments
+ *     associated with the current disk command.
+ *
+ *     LOCKING:
+ *     spin_lock_irqsave(host lock)
+ *
+ */
+static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
+{
+       struct ata_port *ap = qc->ap;
+       struct scatterlist *sg;
+       unsigned int si, pi;
+
+       pi = 0;
+       for_each_sg(qc->sg, sg, qc->n_elem, si) {
+               u32 addr, offset;
+               u32 sg_len, len;
+
+               /* determine if physical DMA addr spans 64K boundary.
+                * Note h/w doesn't support 64-bit, so we unconditionally
+                * truncate dma_addr_t to u32.
+                */
+               addr = (u32) sg_dma_address(sg);
+               sg_len = sg_dma_len(sg);
+
+               while (sg_len) {
+                       offset = addr & 0xffff;
+                       len = sg_len;
+                       if ((offset + sg_len) > 0x10000)
+                               len = 0x10000 - offset;
+
+                       ap->prd[pi].addr = cpu_to_le32(addr);
+                       ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
+                       VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+                       pi++;
+                       sg_len -= len;
+                       addr += len;
+               }
+       }
+
+       ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+/**
+ *     ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
+ *     @qc: Metadata associated with taskfile to be transferred
+ *
+ *     Fill PCI IDE PRD (scatter-gather) table with segments
+ *     associated with the current disk command. Perform the fill
+ *     so that we avoid writing any length 64K records for
+ *     controllers that don't follow the spec.
+ *
+ *     LOCKING:
+ *     spin_lock_irqsave(host lock)
+ *
+ */
+static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
+{
+       struct ata_port *ap = qc->ap;
+       struct scatterlist *sg;
+       unsigned int si, pi;
+
+       pi = 0;
+       for_each_sg(qc->sg, sg, qc->n_elem, si) {
+               u32 addr, offset;
+               u32 sg_len, len, blen;
+
+               /* determine if physical DMA addr spans 64K boundary.
+                * Note h/w doesn't support 64-bit, so we unconditionally
+                * truncate dma_addr_t to u32.
+                */
+               addr = (u32) sg_dma_address(sg);
+               sg_len = sg_dma_len(sg);
+
+               while (sg_len) {
+                       offset = addr & 0xffff;
+                       len = sg_len;
+                       if ((offset + sg_len) > 0x10000)
+                               len = 0x10000 - offset;
+
+                       blen = len & 0xffff;
+                       ap->prd[pi].addr = cpu_to_le32(addr);
+                       if (blen == 0) {
+                               /* Some PATA chipsets like the CS5530 can't
+                                  cope with 0x0000 meaning 64K as the spec
+                                  says */
+                               ap->prd[pi].flags_len = cpu_to_le32(0x8000);
+                               blen = 0x8000;
+                               ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
+                       }
+                       ap->prd[pi].flags_len = cpu_to_le32(blen);
+                       VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+                       pi++;
+                       sg_len -= len;
+                       addr += len;
+               }
+       }
+
+       ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+/**
+ *     ata_bmdma_qc_prep - Prepare taskfile for submission
+ *     @qc: Metadata associated with taskfile to be prepared
+ *
+ *     Prepare ATA taskfile for submission.
+ *
+ *     LOCKING:
+ *     spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
+{
+       if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+               return;
+
+       ata_bmdma_fill_sg(qc);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
+
+/**
+ *     ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
+ *     @qc: Metadata associated with taskfile to be prepared
+ *
+ *     Prepare ATA taskfile for submission.
+ *
+ *     LOCKING:
+ *     spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
+{
+       if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+               return;
+
+       ata_bmdma_fill_sg_dumb(qc);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
+
 /**
  *     ata_bmdma_error_handler - Stock error handler for BMDMA controller
  *     @ap: port to handle error for
index cbaf2eddac6b0a1b62fe983384d14e548aff2e7d..44d88b380ddda7257a604a04334824ff533a0d7a 100644 (file)
@@ -217,7 +217,7 @@ static struct scsi_host_template atiixp_sht = {
 static struct ata_port_operations atiixp_port_ops = {
        .inherits       = &ata_bmdma_port_ops,
 
-       .qc_prep        = ata_sff_dumb_qc_prep,
+       .qc_prep        = ata_bmdma_dumb_qc_prep,
        .bmdma_start    = atiixp_bmdma_start,
        .bmdma_stop     = atiixp_bmdma_stop,
 
index 95ebdac517f2ff0d05d01adecafddfead4a4de65..17c5f346ff01510a32a1fe0081e56e1667b081f3 100644 (file)
@@ -110,7 +110,7 @@ static struct scsi_host_template cs5520_sht = {
 
 static struct ata_port_operations cs5520_port_ops = {
        .inherits               = &ata_bmdma_port_ops,
-       .qc_prep                = ata_sff_dumb_qc_prep,
+       .qc_prep                = ata_bmdma_dumb_qc_prep,
        .cable_detect           = ata_cable_40wire,
        .set_piomode            = cs5520_set_piomode,
 };
index 738ad2e14a97b245dba9f74ea423888c53c9cc3a..4b9a66f18de654d42a2874fa2fc4a9acd6af4e25 100644 (file)
@@ -167,7 +167,7 @@ static struct scsi_host_template cs5530_sht = {
 static struct ata_port_operations cs5530_port_ops = {
        .inherits       = &ata_bmdma_port_ops,
 
-       .qc_prep        = ata_sff_dumb_qc_prep,
+       .qc_prep        = ata_bmdma_dumb_qc_prep,
        .qc_issue       = cs5530_qc_issue,
 
        .cable_detect   = ata_cable_40wire,
index dfecc6f964b04eea992d410a856340d839805f81..599e648a722f71aea167f7a54a82fbe5316e6eab 100644 (file)
@@ -209,7 +209,7 @@ static struct scsi_host_template sc1200_sht = {
 
 static struct ata_port_operations sc1200_port_ops = {
        .inherits       = &ata_bmdma_port_ops,
-       .qc_prep        = ata_sff_dumb_qc_prep,
+       .qc_prep        = ata_bmdma_dumb_qc_prep,
        .qc_issue       = sc1200_qc_issue,
        .qc_defer       = sc1200_qc_defer,
        .cable_detect   = ata_cable_40wire,
index bb4f838655b6144f36ea88b59b312aa1552b46da..adbe0426c8f08a13a3ec53ff7fb51f58d65b9597 100644 (file)
@@ -324,10 +324,8 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
        VPRINTK("ENTER\n");
 
        adma_enter_reg_mode(qc->ap);
-       if (qc->tf.protocol != ATA_PROT_DMA) {
-               ata_sff_qc_prep(qc);
+       if (qc->tf.protocol != ATA_PROT_DMA)
                return;
-       }
 
        buf[i++] = 0;   /* Response flags */
        buf[i++] = 0;   /* reserved */
index 64e99824d8c1d1b66a7bd258bf23a5cd78dbed52..7a283d5d68f356f625e845a845fbbdbb82df795c 100644 (file)
@@ -1409,7 +1409,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
                BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
                        (qc->flags & ATA_QCFLAG_DMAMAP));
                nv_adma_register_mode(qc->ap);
-               ata_sff_qc_prep(qc);
+               ata_bmdma_qc_prep(qc);
                return;
        }
 
@@ -2012,7 +2012,7 @@ static int nv_swncq_port_start(struct ata_port *ap)
 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
 {
        if (qc->tf.protocol != ATA_PROT_NCQ) {
-               ata_sff_qc_prep(qc);
+               ata_bmdma_qc_prep(qc);
                return;
        }
 
index d3a22f2ae7b64c2ea329085ad39aea025bad2acf..d533b3d20ca152f9bbbf12f270e4d7101816acf6 100644 (file)
@@ -303,10 +303,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
        VPRINTK("ENTER\n");
 
        qs_enter_reg_mode(qc->ap);
-       if (qc->tf.protocol != ATA_PROT_DMA) {
-               ata_sff_qc_prep(qc);
+       if (qc->tf.protocol != ATA_PROT_DMA)
                return;
-       }
 
        nelem = qs_fill_sg(qc);
 
index 1d3859016aec57406f5cc4edb818cb5034c3dad7..3675fd29b2e5a0c2deb952f9d466638b43702e70 100644 (file)
@@ -1570,8 +1570,6 @@ extern const struct ata_port_operations ata_bmdma32_port_ops;
        .sg_tablesize           = LIBATA_MAX_PRD,               \
        .dma_boundary           = ATA_DMA_BOUNDARY
 
-extern void ata_sff_qc_prep(struct ata_queued_cmd *qc);
-extern void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc);
 extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device);
 extern u8 ata_sff_check_status(struct ata_port *ap);
 extern void ata_sff_pause(struct ata_port *ap);
@@ -1628,6 +1626,8 @@ extern int ata_pci_sff_init_one(struct pci_dev *pdev,
                struct scsi_host_template *sht, void *host_priv, int hflags);
 #endif /* CONFIG_PCI */
 
+extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
+extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
 extern void ata_bmdma_error_handler(struct ata_port *ap);
 extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
 extern void ata_bmdma_setup(struct ata_queued_cmd *qc);