aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/bcm27xx/patches-5.4/950-0720-udmabuf-implement-begin_cpu_access-end_cpu_access-ho.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/bcm27xx/patches-5.4/950-0720-udmabuf-implement-begin_cpu_access-end_cpu_access-ho.patch')
-rw-r--r--target/linux/bcm27xx/patches-5.4/950-0720-udmabuf-implement-begin_cpu_access-end_cpu_access-ho.patch90
1 files changed, 90 insertions, 0 deletions
diff --git a/target/linux/bcm27xx/patches-5.4/950-0720-udmabuf-implement-begin_cpu_access-end_cpu_access-ho.patch b/target/linux/bcm27xx/patches-5.4/950-0720-udmabuf-implement-begin_cpu_access-end_cpu_access-ho.patch
new file mode 100644
index 0000000000..79f6a67387
--- /dev/null
+++ b/target/linux/bcm27xx/patches-5.4/950-0720-udmabuf-implement-begin_cpu_access-end_cpu_access-ho.patch
@@ -0,0 +1,90 @@
+From 9dc454ebc4380cd90c24a3c224bb0ac7b3d9cc29 Mon Sep 17 00:00:00 2001
+From: Gurchetan Singh <gurchetansingh@chromium.org>
+Date: Mon, 2 Dec 2019 17:36:27 -0800
+Subject: [PATCH] udmabuf: implement begin_cpu_access/end_cpu_access
+ hooks
+
+Commit 284562e1f34874e267d4f499362c3816f8f6bc3f upstream.
+
+With the misc device, we should end up using the result of
+get_arch_dma_ops(..) or dma-direct ops.
+
+This can allow us to have WC mappings in the guest after
+synchronization.
+
+Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
+Link: http://patchwork.freedesktop.org/patch/msgid/20191203013627.85991-4-gurchetansingh@chromium.org
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+---
+ drivers/dma-buf/udmabuf.c | 39 +++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 39 insertions(+)
+
+--- a/drivers/dma-buf/udmabuf.c
++++ b/drivers/dma-buf/udmabuf.c
+@@ -18,6 +18,7 @@ static const size_t size_limit_mb = 64;
+ struct udmabuf {
+ pgoff_t pagecount;
+ struct page **pages;
++ struct sg_table *sg;
+ struct miscdevice *device;
+ };
+
+@@ -98,20 +99,58 @@ static void unmap_udmabuf(struct dma_buf
+ static void release_udmabuf(struct dma_buf *buf)
+ {
+ struct udmabuf *ubuf = buf->priv;
++ struct device *dev = ubuf->device->this_device;
+ pgoff_t pg;
+
++ if (ubuf->sg)
++ put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
++
+ for (pg = 0; pg < ubuf->pagecount; pg++)
+ put_page(ubuf->pages[pg]);
+ kfree(ubuf->pages);
+ kfree(ubuf);
+ }
+
++static int begin_cpu_udmabuf(struct dma_buf *buf,
++ enum dma_data_direction direction)
++{
++ struct udmabuf *ubuf = buf->priv;
++ struct device *dev = ubuf->device->this_device;
++
++ if (!ubuf->sg) {
++ ubuf->sg = get_sg_table(dev, buf, direction);
++ if (IS_ERR(ubuf->sg))
++ return PTR_ERR(ubuf->sg);
++ } else {
++ dma_sync_sg_for_device(dev, ubuf->sg->sgl,
++ ubuf->sg->nents,
++ direction);
++ }
++
++ return 0;
++}
++
++static int end_cpu_udmabuf(struct dma_buf *buf,
++ enum dma_data_direction direction)
++{
++ struct udmabuf *ubuf = buf->priv;
++ struct device *dev = ubuf->device->this_device;
++
++ if (!ubuf->sg)
++ return -EINVAL;
++
++ dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
++ return 0;
++}
++
+ static const struct dma_buf_ops udmabuf_ops = {
+ .cache_sgt_mapping = true,
+ .map_dma_buf = map_udmabuf,
+ .unmap_dma_buf = unmap_udmabuf,
+ .release = release_udmabuf,
+ .mmap = mmap_udmabuf,
++ .begin_cpu_access = begin_cpu_udmabuf,
++ .end_cpu_access = end_cpu_udmabuf,
+ };
+
+ #define SEALS_WANTED (F_SEAL_SHRINK)