bcm27xx-userland: update to latest version
[openwrt/openwrt.git] / target / linux / bcm27xx / patches-5.4 / 950-0735-dma-buf-heaps-Add-CMA-heap-to-dmabuf-heaps.patch
1 From d5e996267c71a9517b2c831d072e76bacb8f0e56 Mon Sep 17 00:00:00 2001
2 From: John Stultz <john.stultz@linaro.org>
3 Date: Tue, 3 Dec 2019 17:26:40 +0000
4 Subject: [PATCH] dma-buf: heaps: Add CMA heap to dmabuf heaps
5
6 Commit b61614ec318aae0c77ecd2816878d851dd61d9a6 upstream.
7
8 This adds a CMA heap, which allows userspace to allocate
9 a dma-buf of contiguous memory out of a CMA region.
10
11 This code is an evolution of the Android ION implementation, so
12 thanks to its original author and maintainters:
13 Benjamin Gaignard, Laura Abbott, and others!
14
15 NOTE: This patch only adds the default CMA heap. We will enable
16 selectively adding other CMA memory regions to the dmabuf heaps
17 interface with a later patch (which requires a dt binding)
18
19 Cc: Laura Abbott <labbott@redhat.com>
20 Cc: Benjamin Gaignard <benjamin.gaignard@linaro.org>
21 Cc: Sumit Semwal <sumit.semwal@linaro.org>
22 Cc: Liam Mark <lmark@codeaurora.org>
23 Cc: Pratik Patel <pratikp@codeaurora.org>
24 Cc: Brian Starkey <Brian.Starkey@arm.com>
25 Cc: Vincent Donnefort <Vincent.Donnefort@arm.com>
26 Cc: Sudipto Paul <Sudipto.Paul@arm.com>
27 Cc: Andrew F. Davis <afd@ti.com>
28 Cc: Christoph Hellwig <hch@infradead.org>
29 Cc: Chenbo Feng <fengc@google.com>
30 Cc: Alistair Strachan <astrachan@google.com>
31 Cc: Hridya Valsaraju <hridya@google.com>
32 Cc: Sandeep Patil <sspatil@google.com>
33 Cc: Hillf Danton <hdanton@sina.com>
34 Cc: Dave Airlie <airlied@gmail.com>
35 Cc: dri-devel@lists.freedesktop.org
36 Reviewed-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
37 Reviewed-by: Brian Starkey <brian.starkey@arm.com>
38 Acked-by: Sandeep Patil <sspatil@android.com>
39 Acked-by: Laura Abbott <labbott@redhat.com>
40 Tested-by: Ayan Kumar Halder <ayan.halder@arm.com>
41 Signed-off-by: John Stultz <john.stultz@linaro.org>
42 Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
43 Link: https://patchwork.freedesktop.org/patch/msgid/20191203172641.66642-5-john.stultz@linaro.org
44 ---
45 drivers/dma-buf/heaps/Kconfig | 8 ++
46 drivers/dma-buf/heaps/Makefile | 1 +
47 drivers/dma-buf/heaps/cma_heap.c | 177 +++++++++++++++++++++++++++++++
48 3 files changed, 186 insertions(+)
49 create mode 100644 drivers/dma-buf/heaps/cma_heap.c
50
51 --- a/drivers/dma-buf/heaps/Kconfig
52 +++ b/drivers/dma-buf/heaps/Kconfig
53 @@ -4,3 +4,11 @@ config DMABUF_HEAPS_SYSTEM
54 help
55 Choose this option to enable the system dmabuf heap. The system heap
56 is backed by pages from the buddy allocator. If in doubt, say Y.
57 +
58 +config DMABUF_HEAPS_CMA
59 + bool "DMA-BUF CMA Heap"
60 + depends on DMABUF_HEAPS && DMA_CMA
61 + help
62 + Choose this option to enable dma-buf CMA heap. This heap is backed
63 + by the Contiguous Memory Allocator (CMA). If your system has these
64 + regions, you should say Y here.
65 --- a/drivers/dma-buf/heaps/Makefile
66 +++ b/drivers/dma-buf/heaps/Makefile
67 @@ -1,3 +1,4 @@
68 # SPDX-License-Identifier: GPL-2.0
69 obj-y += heap-helpers.o
70 obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
71 +obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o
72 --- /dev/null
73 +++ b/drivers/dma-buf/heaps/cma_heap.c
74 @@ -0,0 +1,177 @@
75 +// SPDX-License-Identifier: GPL-2.0
76 +/*
77 + * DMABUF CMA heap exporter
78 + *
79 + * Copyright (C) 2012, 2019 Linaro Ltd.
80 + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
81 + */
82 +
83 +#include <linux/cma.h>
84 +#include <linux/device.h>
85 +#include <linux/dma-buf.h>
86 +#include <linux/dma-heap.h>
87 +#include <linux/dma-contiguous.h>
88 +#include <linux/err.h>
89 +#include <linux/errno.h>
90 +#include <linux/highmem.h>
91 +#include <linux/module.h>
92 +#include <linux/slab.h>
93 +#include <linux/scatterlist.h>
94 +#include <linux/sched/signal.h>
95 +
96 +#include "heap-helpers.h"
97 +
98 +struct cma_heap {
99 + struct dma_heap *heap;
100 + struct cma *cma;
101 +};
102 +
103 +static void cma_heap_free(struct heap_helper_buffer *buffer)
104 +{
105 + struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
106 + unsigned long nr_pages = buffer->pagecount;
107 + struct page *cma_pages = buffer->priv_virt;
108 +
109 + /* free page list */
110 + kfree(buffer->pages);
111 + /* release memory */
112 + cma_release(cma_heap->cma, cma_pages, nr_pages);
113 + kfree(buffer);
114 +}
115 +
116 +/* dmabuf heap CMA operations functions */
117 +static int cma_heap_allocate(struct dma_heap *heap,
118 + unsigned long len,
119 + unsigned long fd_flags,
120 + unsigned long heap_flags)
121 +{
122 + struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
123 + struct heap_helper_buffer *helper_buffer;
124 + struct page *cma_pages;
125 + size_t size = PAGE_ALIGN(len);
126 + unsigned long nr_pages = size >> PAGE_SHIFT;
127 + unsigned long align = get_order(size);
128 + struct dma_buf *dmabuf;
129 + int ret = -ENOMEM;
130 + pgoff_t pg;
131 +
132 + if (align > CONFIG_CMA_ALIGNMENT)
133 + align = CONFIG_CMA_ALIGNMENT;
134 +
135 + helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
136 + if (!helper_buffer)
137 + return -ENOMEM;
138 +
139 + init_heap_helper_buffer(helper_buffer, cma_heap_free);
140 + helper_buffer->heap = heap;
141 + helper_buffer->size = len;
142 +
143 + cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
144 + if (!cma_pages)
145 + goto free_buf;
146 +
147 + if (PageHighMem(cma_pages)) {
148 + unsigned long nr_clear_pages = nr_pages;
149 + struct page *page = cma_pages;
150 +
151 + while (nr_clear_pages > 0) {
152 + void *vaddr = kmap_atomic(page);
153 +
154 + memset(vaddr, 0, PAGE_SIZE);
155 + kunmap_atomic(vaddr);
156 + /*
157 + * Avoid wasting time zeroing memory if the process
158 + * has been killed by by SIGKILL
159 + */
160 + if (fatal_signal_pending(current))
161 + goto free_cma;
162 +
163 + page++;
164 + nr_clear_pages--;
165 + }
166 + } else {
167 + memset(page_address(cma_pages), 0, size);
168 + }
169 +
170 + helper_buffer->pagecount = nr_pages;
171 + helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
172 + sizeof(*helper_buffer->pages),
173 + GFP_KERNEL);
174 + if (!helper_buffer->pages) {
175 + ret = -ENOMEM;
176 + goto free_cma;
177 + }
178 +
179 + for (pg = 0; pg < helper_buffer->pagecount; pg++)
180 + helper_buffer->pages[pg] = &cma_pages[pg];
181 +
182 + /* create the dmabuf */
183 + dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
184 + if (IS_ERR(dmabuf)) {
185 + ret = PTR_ERR(dmabuf);
186 + goto free_pages;
187 + }
188 +
189 + helper_buffer->dmabuf = dmabuf;
190 + helper_buffer->priv_virt = cma_pages;
191 +
192 + ret = dma_buf_fd(dmabuf, fd_flags);
193 + if (ret < 0) {
194 + dma_buf_put(dmabuf);
195 + /* just return, as put will call release and that will free */
196 + return ret;
197 + }
198 +
199 + return ret;
200 +
201 +free_pages:
202 + kfree(helper_buffer->pages);
203 +free_cma:
204 + cma_release(cma_heap->cma, cma_pages, nr_pages);
205 +free_buf:
206 + kfree(helper_buffer);
207 + return ret;
208 +}
209 +
210 +static const struct dma_heap_ops cma_heap_ops = {
211 + .allocate = cma_heap_allocate,
212 +};
213 +
214 +static int __add_cma_heap(struct cma *cma, void *data)
215 +{
216 + struct cma_heap *cma_heap;
217 + struct dma_heap_export_info exp_info;
218 +
219 + cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
220 + if (!cma_heap)
221 + return -ENOMEM;
222 + cma_heap->cma = cma;
223 +
224 + exp_info.name = cma_get_name(cma);
225 + exp_info.ops = &cma_heap_ops;
226 + exp_info.priv = cma_heap;
227 +
228 + cma_heap->heap = dma_heap_add(&exp_info);
229 + if (IS_ERR(cma_heap->heap)) {
230 + int ret = PTR_ERR(cma_heap->heap);
231 +
232 + kfree(cma_heap);
233 + return ret;
234 + }
235 +
236 + return 0;
237 +}
238 +
239 +static int add_default_cma_heap(void)
240 +{
241 + struct cma *default_cma = dev_get_cma_area(NULL);
242 + int ret = 0;
243 +
244 + if (default_cma)
245 + ret = __add_cma_heap(default_cma, NULL);
246 +
247 + return ret;
248 +}
249 +module_init(add_default_cma_heap);
250 +MODULE_DESCRIPTION("DMA-BUF CMA Heap");
251 +MODULE_LICENSE("GPL v2");