IA64: sg chaining support
authorJens Axboe <jens.axboe@oracle.com>
Tue, 16 Oct 2007 09:27:26 +0000 (11:27 +0200)
committerJens Axboe <jens.axboe@oracle.com>
Tue, 16 Oct 2007 09:27:26 +0000 (11:27 +0200)
This updates the ia64 iommu/pci dma mappers to sg chaining.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
arch/ia64/hp/common/sba_iommu.c
arch/ia64/sn/pci/pci_dma.c
include/asm-ia64/dma-mapping.h
include/asm-ia64/scatterlist.h

index e980e7aa2306eb52ddc51632b48d97d892157704..4338f4123f31688ad44697c17f7605efeaf4e4b3 100644 (file)
@@ -396,7 +396,7 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
                printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
                       startsg->dma_address, startsg->dma_length,
                       sba_sg_address(startsg));
-               startsg++;
+               startsg = sg_next(startsg);
        }
 }
 
@@ -409,7 +409,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
        while (the_nents-- > 0) {
                if (sba_sg_address(the_sg) == 0x0UL)
                        sba_dump_sg(NULL, startsg, nents);
-               the_sg++;
+               the_sg = sg_next(the_sg);
        }
 }
 
@@ -1201,7 +1201,7 @@ sba_fill_pdir(
                        u32 pide = startsg->dma_address & ~PIDE_FLAG;
                        dma_offset = (unsigned long) pide & ~iovp_mask;
                        startsg->dma_address = 0;
-                       dma_sg++;
+                       dma_sg = sg_next(dma_sg);
                        dma_sg->dma_address = pide | ioc->ibase;
                        pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
                        n_mappings++;
@@ -1228,7 +1228,7 @@ sba_fill_pdir(
                                pdirp++;
                        } while (cnt > 0);
                }
-               startsg++;
+               startsg = sg_next(startsg);
        }
        /* force pdir update */
        wmb();
@@ -1297,7 +1297,7 @@ sba_coalesce_chunks( struct ioc *ioc,
                while (--nents > 0) {
                        unsigned long vaddr;    /* tmp */
 
-                       startsg++;
+                       startsg = sg_next(startsg);
 
                        /* PARANOID */
                        startsg->dma_address = startsg->dma_length = 0;
@@ -1407,7 +1407,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
 #ifdef ALLOW_IOV_BYPASS_SG
        ASSERT(to_pci_dev(dev)->dma_mask);
        if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
-               for (sg = sglist ; filled < nents ; filled++, sg++){
+               for_each_sg(sglist, sg, nents, filled) {
                        sg->dma_length = sg->length;
                        sg->dma_address = virt_to_phys(sba_sg_address(sg));
                }
@@ -1501,7 +1501,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
        while (nents && sglist->dma_length) {
 
                sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
-               sglist++;
+               sglist = sg_next(sglist);
                nents--;
        }
 
index d79ddacfba2d282aa5c7075e1433b92c2f7deae2..ecd8a52b9b9e23a2e9987d2703976983b30af4c0 100644 (file)
@@ -218,16 +218,17 @@ EXPORT_SYMBOL(sn_dma_unmap_single);
  *
  * Unmap a set of streaming mode DMA translations.
  */
-void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
                     int nhwentries, int direction)
 {
        int i;
        struct pci_dev *pdev = to_pci_dev(dev);
        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
+       struct scatterlist *sg;
 
        BUG_ON(dev->bus != &pci_bus_type);
 
-       for (i = 0; i < nhwentries; i++, sg++) {
+       for_each_sg(sgl, sg, nhwentries, i) {
                provider->dma_unmap(pdev, sg->dma_address, direction);
                sg->dma_address = (dma_addr_t) NULL;
                sg->dma_length = 0;
@@ -244,11 +245,11 @@ EXPORT_SYMBOL(sn_dma_unmap_sg);
  *
  * Maps each entry of @sg for DMA.
  */
-int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
                  int direction)
 {
        unsigned long phys_addr;
-       struct scatterlist *saved_sg = sg;
+       struct scatterlist *saved_sg = sgl, *sg;
        struct pci_dev *pdev = to_pci_dev(dev);
        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
        int i;
@@ -258,7 +259,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
        /*
         * Setup a DMA address for each entry in the scatterlist.
         */
-       for (i = 0; i < nhwentries; i++, sg++) {
+       for_each_sg(sgl, sg, nhwentries, i) {
                phys_addr = SG_ENT_PHYS_ADDRESS(sg);
                sg->dma_address = provider->dma_map(pdev,
                                                    phys_addr, sg->length,
index 3ca6d5c14b2e729ca772db529e70c7e479449ab2..f1735a22d0ea28cd08ce5be1413c78cd1e397061 100644 (file)
@@ -6,7 +6,7 @@
  *     David Mosberger-Tang <davidm@hpl.hp.com>
  */
 #include <asm/machvec.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
 
 #define dma_alloc_coherent     platform_dma_alloc_coherent
 /* coherent mem. is cheap */
index a452ea24205a70b1fd271d47158706f7809d6957..7d5234d50312d4cbf784c6d3b51a6eda93923194 100644 (file)
@@ -30,4 +30,6 @@ struct scatterlist {
 #define sg_dma_len(sg)         ((sg)->dma_length)
 #define sg_dma_address(sg)     ((sg)->dma_address)
 
+#define        ARCH_HAS_SG_CHAIN
+
 #endif /* _ASM_IA64_SCATTERLIST_H */