[RAMEN9610-12171] dma-buf: add dma_buf_[un]map_attacment_area
authorhyesoo.yu <hyesoo.yu@samsung.com>
Fri, 31 Aug 2018 01:15:20 +0000 (10:15 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:23:17 +0000 (20:23 +0300)
dma-buf supports the dma_buf_[un]map_attachment_area
to inform exporter of the payload area to use
buffer synchronization or mapping for DMA.

Change-Id: I690a6f9695df77a650eadceb38a8280b1a569408
Signed-off-by: hyesoo.yu <hyesoo.yu@samsung.com>
drivers/dma-buf/dma-buf.c
include/linux/dma-buf.h

index 4e60350ca7e80eb86fd833ffb36d0ca7cacf8e2a..08c24996b84d3839dedcc9ec48d75b67ef47669b 100644 (file)
@@ -640,6 +640,76 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 }
 EXPORT_SYMBOL_GPL(dma_buf_detach);
 
+/**
+ * dma_buf_map_attachment_area - Returns the scatterlist table of
+ * the attachment mapped into _device_ address space.
+ * Is a wrapper for map_dma_buf_area() of the dma_buf_ops.
+ *
+ * @attach:    [in]    attachment whose scatterlist is to be returned
+ * @direction: [in]    direction of DMA transfer
+ * @size:       [in]    the hint for the exporter
+ *
+ * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
+ * on error. May return -EINTR if it is interrupted by a signal.
+ *
+ * This passes the size as hint. The exporter can use this size to map or manage
+ * cache maintenance for DMA. However the exporter must ensure that scatterlist
+ * or manage device virtual address space even if pair of [un]map_dma_buf and
+ * [un]map_dma_buf_area doesn't match each other, that is, the sg_table from
+ * map_dma_buf_area could be relased by unmap_dma_buf or the sg_table from
+ * map_dma_buf could be released by unmap_dma_buf_area.
+ */
+struct sg_table *dma_buf_map_attachment_area(struct dma_buf_attachment *attach,
+                                            enum dma_data_direction direction,
+                                            size_t size)
+{
+       struct sg_table *sg_table;
+
+       if (!attach->dmabuf->ops->map_dma_buf_area)
+               return dma_buf_map_attachment(attach, direction);
+
+       might_sleep();
+
+       if (WARN_ON(!attach || !attach->dmabuf))
+               return ERR_PTR(-EINVAL);
+
+       sg_table = attach->dmabuf->ops->map_dma_buf_area(attach, direction,
+                                                        size);
+       if (!sg_table)
+               sg_table = ERR_PTR(-ENOMEM);
+
+       return sg_table;
+}
+EXPORT_SYMBOL_GPL(dma_buf_map_attachment_area);
+
+/**
+ * dma_buf_unmap_attachment_area - unmaps and decreases usecount of the buffer
+ * might deallocate the scatterlist associated by requested size.
+ * Is a wrapper for unmap_dma_buf_area() of dma_buf_ops.
+ *
+ * @attach:    [in]    attachment to unmap buffer from
+ * @sg_table:  [in]    scatterlist info of the buffer to unmap
+ * @direction:  [in]    direction of DMA transfer
+ * @size:       [in]    the hint for the exporter
+ */
+void dma_buf_unmap_attachment_area(struct dma_buf_attachment *attach,
+                                  struct sg_table *sg_table,
+                                  enum dma_data_direction direction,
+                                  size_t size)
+{
+       if (!attach->dmabuf->ops->unmap_dma_buf_area)
+               return dma_buf_unmap_attachment(attach, sg_table, direction);
+
+       might_sleep();
+
+       if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
+               return;
+
+       attach->dmabuf->ops->unmap_dma_buf_area(attach, sg_table,
+                                                  direction, size);
+}
+EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment_area);
+
 /**
  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
index 59d3d00b42af55639b08fb42b14f86892bd72266..84dd3d681302a653990d0f17e149adc8c3e71e0b 100644 (file)
@@ -143,7 +143,23 @@ struct dma_buf_ops {
        void (*unmap_dma_buf)(struct dma_buf_attachment *,
                              struct sg_table *,
                              enum dma_data_direction);
-
+       /**
+        * @[un]map_dma_buf_area:
+        *
+        * This is called by dma_buf_[un]map_attachment_area().
+        * This is the same as [un]map_dma_buf, but this can pass the size
+        * to the exporter additionally. This size is actually accssed by DMA,
+        * so the exporter might try to optmize mapping or cache maintenance.
+        *
+        * This callback is optional.
+        */
+       struct sg_table * (*map_dma_buf_area)(struct dma_buf_attachment *,
+                                             enum dma_data_direction,
+                                             size_t size);
+       void (*unmap_dma_buf_area)(struct dma_buf_attachment *,
+                                  struct sg_table *,
+                                  enum dma_data_direction,
+                                  size_t size);
        /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
         * if the call would block.
         */
@@ -387,6 +403,13 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags);
 struct dma_buf *dma_buf_get(int fd);
 void dma_buf_put(struct dma_buf *dmabuf);
 
+struct sg_table *dma_buf_map_attachment_area(struct dma_buf_attachment *attach,
+                                            enum dma_data_direction direction,
+                                            size_t size);
+void dma_buf_unmap_attachment_area(struct dma_buf_attachment *attach,
+                                  struct sg_table *sg_table,
+                                  enum dma_data_direction direction,
+                                  size_t size);
 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
                                        enum dma_data_direction);
 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,