diff options
Diffstat (limited to 'target/linux/bcm27xx/patches-5.4/950-0723-dma-buf-heaps-Add-heap-helpers.patch')
-rw-r--r-- | target/linux/bcm27xx/patches-5.4/950-0723-dma-buf-heaps-Add-heap-helpers.patch | 394 |
1 files changed, 0 insertions, 394 deletions
diff --git a/target/linux/bcm27xx/patches-5.4/950-0723-dma-buf-heaps-Add-heap-helpers.patch b/target/linux/bcm27xx/patches-5.4/950-0723-dma-buf-heaps-Add-heap-helpers.patch deleted file mode 100644 index 10eb46ca7b..0000000000 --- a/target/linux/bcm27xx/patches-5.4/950-0723-dma-buf-heaps-Add-heap-helpers.patch +++ /dev/null @@ -1,394 +0,0 @@ -From adde2d6532428cdcaeb60081abb299ce6e5aa76b Mon Sep 17 00:00:00 2001 -From: John Stultz <john.stultz@linaro.org> -Date: Tue, 3 Dec 2019 17:26:38 +0000 -Subject: [PATCH] dma-buf: heaps: Add heap helpers - -Commit 5248eb12fea890a03b4cdc3ef546d6319d4d9b73 upstream. - -Add generic helper dmabuf ops for dma heaps, so we can reduce -the amount of duplicative code for the exported dmabufs. - -This code is an evolution of the Android ION implementation, so -thanks to its original authors and maintainters: - Rebecca Schultz Zavin, Colin Cross, Laura Abbott, and others! - -Cc: Laura Abbott <labbott@redhat.com> -Cc: Benjamin Gaignard <benjamin.gaignard@linaro.org> -Cc: Sumit Semwal <sumit.semwal@linaro.org> -Cc: Liam Mark <lmark@codeaurora.org> -Cc: Pratik Patel <pratikp@codeaurora.org> -Cc: Brian Starkey <Brian.Starkey@arm.com> -Cc: Vincent Donnefort <Vincent.Donnefort@arm.com> -Cc: Sudipto Paul <Sudipto.Paul@arm.com> -Cc: Andrew F. Davis <afd@ti.com> -Cc: Christoph Hellwig <hch@infradead.org> -Cc: Chenbo Feng <fengc@google.com> -Cc: Alistair Strachan <astrachan@google.com> -Cc: Hridya Valsaraju <hridya@google.com> -Cc: Sandeep Patil <sspatil@google.com> -Cc: Hillf Danton <hdanton@sina.com> -Cc: Dave Airlie <airlied@gmail.com> -Cc: dri-devel@lists.freedesktop.org -Reviewed-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> -Reviewed-by: Brian Starkey <brian.starkey@arm.com> -Acked-by: Sandeep Patil <sspatil@android.com> -Acked-by: Laura Abbott <labbott@redhat.com> -Tested-by: Ayan Kumar Halder <ayan.halder@arm.com> -Signed-off-by: John Stultz <john.stultz@linaro.org> -Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org> -Link: https://patchwork.freedesktop.org/patch/msgid/20191203172641.66642-3-john.stultz@linaro.org ---- - drivers/dma-buf/Makefile | 1 + - drivers/dma-buf/heaps/Makefile | 2 + - drivers/dma-buf/heaps/heap-helpers.c | 271 +++++++++++++++++++++++++++ - drivers/dma-buf/heaps/heap-helpers.h | 53 ++++++ - 4 files changed, 327 insertions(+) - create mode 100644 drivers/dma-buf/heaps/Makefile - create mode 100644 drivers/dma-buf/heaps/heap-helpers.c - create mode 100644 drivers/dma-buf/heaps/heap-helpers.h - ---- a/drivers/dma-buf/Makefile -+++ b/drivers/dma-buf/Makefile -@@ -4,6 +4,7 @@ obj-$(CONFIG_DMA_SHARED_BUFFER) := dma-s - dma-buf-objs-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ - dma-resv.o seqno-fence.o - obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o -+obj-$(CONFIG_DMABUF_HEAPS) += heaps/ - dma-buf-objs-$(CONFIG_SYNC_FILE) += sync_file.o - dma-buf-objs-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o - dma-buf-objs-$(CONFIG_UDMABUF) += udmabuf.o ---- /dev/null -+++ b/drivers/dma-buf/heaps/Makefile -@@ -0,0 +1,2 @@ -+# SPDX-License-Identifier: GPL-2.0 -+obj-y += heap-helpers.o ---- /dev/null -+++ b/drivers/dma-buf/heaps/heap-helpers.c -@@ -0,0 +1,271 @@ -+// SPDX-License-Identifier: GPL-2.0 -+#include <linux/device.h> -+#include <linux/dma-buf.h> -+#include <linux/err.h> -+#include <linux/highmem.h> -+#include <linux/idr.h> -+#include <linux/list.h> -+#include <linux/slab.h> -+#include <linux/uaccess.h> -+#include <linux/vmalloc.h> -+#include <uapi/linux/dma-heap.h> -+ -+#include "heap-helpers.h" -+ -+void init_heap_helper_buffer(struct heap_helper_buffer *buffer, -+ void (*free)(struct heap_helper_buffer *)) -+{ -+ buffer->priv_virt = NULL; -+ mutex_init(&buffer->lock); -+ buffer->vmap_cnt = 0; -+ buffer->vaddr = NULL; -+ buffer->pagecount = 0; -+ buffer->pages = NULL; -+ INIT_LIST_HEAD(&buffer->attachments); -+ buffer->free = free; -+} -+ -+struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer, -+ int fd_flags) -+{ -+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info); -+ -+ exp_info.ops = &heap_helper_ops; -+ exp_info.size = buffer->size; -+ exp_info.flags = fd_flags; -+ exp_info.priv = buffer; -+ -+ return dma_buf_export(&exp_info); -+} -+ -+static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer) -+{ -+ void *vaddr; -+ -+ vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); -+ if (!vaddr) -+ return ERR_PTR(-ENOMEM); -+ -+ return vaddr; -+} -+ -+static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer) -+{ -+ if (buffer->vmap_cnt > 0) { -+ WARN(1, "%s: buffer still mapped in the kernel\n", __func__); -+ vunmap(buffer->vaddr); -+ } -+ -+ buffer->free(buffer); -+} -+ -+static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer) -+{ -+ void *vaddr; -+ -+ if (buffer->vmap_cnt) { -+ buffer->vmap_cnt++; -+ return buffer->vaddr; -+ } -+ vaddr = dma_heap_map_kernel(buffer); -+ if (IS_ERR(vaddr)) -+ return vaddr; -+ buffer->vaddr = vaddr; -+ buffer->vmap_cnt++; -+ return vaddr; -+} -+ -+static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer) -+{ -+ if (!--buffer->vmap_cnt) { -+ vunmap(buffer->vaddr); -+ buffer->vaddr = NULL; -+ } -+} -+ -+struct dma_heaps_attachment { -+ struct device *dev; -+ struct sg_table table; -+ struct list_head list; -+}; -+ -+static int dma_heap_attach(struct dma_buf *dmabuf, -+ struct dma_buf_attachment *attachment) -+{ -+ struct dma_heaps_attachment *a; -+ struct heap_helper_buffer *buffer = dmabuf->priv; -+ int ret; -+ -+ a = kzalloc(sizeof(*a), GFP_KERNEL); -+ if (!a) -+ return -ENOMEM; -+ -+ ret = sg_alloc_table_from_pages(&a->table, buffer->pages, -+ buffer->pagecount, 0, -+ buffer->pagecount << PAGE_SHIFT, -+ GFP_KERNEL); -+ if (ret) { -+ kfree(a); -+ return ret; -+ } -+ -+ a->dev = attachment->dev; -+ INIT_LIST_HEAD(&a->list); -+ -+ attachment->priv = a; -+ -+ mutex_lock(&buffer->lock); -+ list_add(&a->list, &buffer->attachments); -+ mutex_unlock(&buffer->lock); -+ -+ return 0; -+} -+ -+static void dma_heap_detach(struct dma_buf *dmabuf, -+ struct dma_buf_attachment *attachment) -+{ -+ struct dma_heaps_attachment *a = attachment->priv; -+ struct heap_helper_buffer *buffer = dmabuf->priv; -+ -+ mutex_lock(&buffer->lock); -+ list_del(&a->list); -+ mutex_unlock(&buffer->lock); -+ -+ sg_free_table(&a->table); -+ kfree(a); -+} -+ -+static -+struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment, -+ enum dma_data_direction direction) -+{ -+ struct dma_heaps_attachment *a = attachment->priv; -+ struct sg_table *table; -+ -+ table = &a->table; -+ -+ if (!dma_map_sg(attachment->dev, table->sgl, table->nents, -+ direction)) -+ table = ERR_PTR(-ENOMEM); -+ return table; -+} -+ -+static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, -+ struct sg_table *table, -+ enum dma_data_direction direction) -+{ -+ dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction); -+} -+ -+static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf) -+{ -+ struct vm_area_struct *vma = vmf->vma; -+ struct heap_helper_buffer *buffer = vma->vm_private_data; -+ -+ if (vmf->pgoff > buffer->pagecount) -+ return VM_FAULT_SIGBUS; -+ -+ vmf->page = buffer->pages[vmf->pgoff]; -+ get_page(vmf->page); -+ -+ return 0; -+} -+ -+static const struct vm_operations_struct dma_heap_vm_ops = { -+ .fault = dma_heap_vm_fault, -+}; -+ -+static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) -+{ -+ struct heap_helper_buffer *buffer = dmabuf->priv; -+ -+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) -+ return -EINVAL; -+ -+ vma->vm_ops = &dma_heap_vm_ops; -+ vma->vm_private_data = buffer; -+ -+ return 0; -+} -+ -+static void dma_heap_dma_buf_release(struct dma_buf *dmabuf) -+{ -+ struct heap_helper_buffer *buffer = dmabuf->priv; -+ -+ dma_heap_buffer_destroy(buffer); -+} -+ -+static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, -+ enum dma_data_direction direction) -+{ -+ struct heap_helper_buffer *buffer = dmabuf->priv; -+ struct dma_heaps_attachment *a; -+ int ret = 0; -+ -+ mutex_lock(&buffer->lock); -+ -+ if (buffer->vmap_cnt) -+ invalidate_kernel_vmap_range(buffer->vaddr, buffer->size); -+ -+ list_for_each_entry(a, &buffer->attachments, list) { -+ dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents, -+ direction); -+ } -+ mutex_unlock(&buffer->lock); -+ -+ return ret; -+} -+ -+static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, -+ enum dma_data_direction direction) -+{ -+ struct heap_helper_buffer *buffer = dmabuf->priv; -+ struct dma_heaps_attachment *a; -+ -+ mutex_lock(&buffer->lock); -+ -+ if (buffer->vmap_cnt) -+ flush_kernel_vmap_range(buffer->vaddr, buffer->size); -+ -+ list_for_each_entry(a, &buffer->attachments, list) { -+ dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents, -+ direction); -+ } -+ mutex_unlock(&buffer->lock); -+ -+ return 0; -+} -+ -+static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf) -+{ -+ struct heap_helper_buffer *buffer = dmabuf->priv; -+ void *vaddr; -+ -+ mutex_lock(&buffer->lock); -+ vaddr = dma_heap_buffer_vmap_get(buffer); -+ mutex_unlock(&buffer->lock); -+ -+ return vaddr; -+} -+ -+static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) -+{ -+ struct heap_helper_buffer *buffer = dmabuf->priv; -+ -+ mutex_lock(&buffer->lock); -+ dma_heap_buffer_vmap_put(buffer); -+ mutex_unlock(&buffer->lock); -+} -+ -+const struct dma_buf_ops heap_helper_ops = { -+ .map_dma_buf = dma_heap_map_dma_buf, -+ .unmap_dma_buf = dma_heap_unmap_dma_buf, -+ .mmap = dma_heap_mmap, -+ .release = dma_heap_dma_buf_release, -+ .attach = dma_heap_attach, -+ .detach = dma_heap_detach, -+ .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access, -+ .end_cpu_access = dma_heap_dma_buf_end_cpu_access, -+ .vmap = dma_heap_dma_buf_vmap, -+ .vunmap = dma_heap_dma_buf_vunmap, -+}; ---- /dev/null -+++ b/drivers/dma-buf/heaps/heap-helpers.h -@@ -0,0 +1,53 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * DMABUF Heaps helper code -+ * -+ * Copyright (C) 2011 Google, Inc. -+ * Copyright (C) 2019 Linaro Ltd. -+ */ -+ -+#ifndef _HEAP_HELPERS_H -+#define _HEAP_HELPERS_H -+ -+#include <linux/dma-heap.h> -+#include <linux/list.h> -+ -+/** -+ * struct heap_helper_buffer - helper buffer metadata -+ * @heap: back pointer to the heap the buffer came from -+ * @dmabuf: backing dma-buf for this buffer -+ * @size: size of the buffer -+ * @priv_virt pointer to heap specific private value -+ * @lock mutext to protect the data in this structure -+ * @vmap_cnt count of vmap references on the buffer -+ * @vaddr vmap'ed virtual address -+ * @pagecount number of pages in the buffer -+ * @pages list of page pointers -+ * @attachments list of device attachments -+ * -+ * @free heap callback to free the buffer -+ */ -+struct heap_helper_buffer { -+ struct dma_heap *heap; -+ struct dma_buf *dmabuf; -+ size_t size; -+ -+ void *priv_virt; -+ struct mutex lock; -+ int vmap_cnt; -+ void *vaddr; -+ pgoff_t pagecount; -+ struct page **pages; -+ struct list_head attachments; -+ -+ void (*free)(struct heap_helper_buffer *buffer); -+}; -+ -+void init_heap_helper_buffer(struct heap_helper_buffer *buffer, -+ void (*free)(struct heap_helper_buffer *)); -+ -+struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer, -+ int fd_flags); -+ -+extern const struct dma_buf_ops heap_helper_ops; -+#endif /* _HEAP_HELPERS_H */ |