apm821xx: dw_dmac: backport fixes and cleanups from 4.7
[openwrt/openwrt.git] / target / linux / apm821xx / patches-4.4 / 011-dmaengine-core-Introduce-new-universal-API-to-reques.patch
1 From a8135d0d79e9d0ad3a4ff494fceeaae838becf38 Mon Sep 17 00:00:00 2001
2 From: Peter Ujfalusi <peter.ujfalusi@ti.com>
3 Date: Mon, 14 Dec 2015 22:47:40 +0200
4 Subject: [PATCH 2/3] dmaengine: core: Introduce new, universal API to request
5 a channel
6
7 The two API function can cover most, if not all current APIs used to
8 request a channel. With minimal effort dmaengine drivers, platforms and
9 dmaengine user drivers can be converted to use the two function.
10
11 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
12
13 To request any channel matching with the requested capabilities, can be
14 used to request channel for memcpy, memset, xor, etc where no hardware
15 synchronization is needed.
16
17 struct dma_chan *dma_request_chan(struct device *dev, const char *name);
18 To request a slave channel. The dma_request_chan() will try to find the
19 channel via DT, ACPI or in case if the kernel booted in non DT/ACPI mode
20 it will use a filter lookup table and retrieves the needed information from
21 the dma_slave_map provided by the DMA drivers.
22 This legacy mode needs changes in platform code, in dmaengine drivers and
23 finally the dmaengine user drivers can be converted:
24
25 For each dmaengine driver an array of DMA device, slave and the parameter
26 for the filter function needs to be added:
27
28 static const struct dma_slave_map da830_edma_map[] = {
29 { "davinci-mcasp.0", "rx", EDMA_FILTER_PARAM(0, 0) },
30 { "davinci-mcasp.0", "tx", EDMA_FILTER_PARAM(0, 1) },
31 { "davinci-mcasp.1", "rx", EDMA_FILTER_PARAM(0, 2) },
32 { "davinci-mcasp.1", "tx", EDMA_FILTER_PARAM(0, 3) },
33 { "davinci-mcasp.2", "rx", EDMA_FILTER_PARAM(0, 4) },
34 { "davinci-mcasp.2", "tx", EDMA_FILTER_PARAM(0, 5) },
35 { "spi_davinci.0", "rx", EDMA_FILTER_PARAM(0, 14) },
36 { "spi_davinci.0", "tx", EDMA_FILTER_PARAM(0, 15) },
37 { "da830-mmc.0", "rx", EDMA_FILTER_PARAM(0, 16) },
38 { "da830-mmc.0", "tx", EDMA_FILTER_PARAM(0, 17) },
39 { "spi_davinci.1", "rx", EDMA_FILTER_PARAM(0, 18) },
40 { "spi_davinci.1", "tx", EDMA_FILTER_PARAM(0, 19) },
41 };
42
43 This information is going to be needed by the dmaengine driver, so
44 modification to the platform_data is needed, and the driver map should be
45 added to the pdata of the DMA driver:
46
47 da8xx_edma0_pdata.slave_map = da830_edma_map;
48 da8xx_edma0_pdata.slavecnt = ARRAY_SIZE(da830_edma_map);
49
50 The DMA driver then needs to configure the needed device -> filter_fn
51 mapping before it registers with dma_async_device_register() :
52
53 ecc->dma_slave.filter_map.map = info->slave_map;
54 ecc->dma_slave.filter_map.mapcnt = info->slavecnt;
55 ecc->dma_slave.filter_map.fn = edma_filter_fn;
56
57 When neither DT or ACPI lookup is available the dma_request_chan() will
58 try to match the requester's device name with the filter_map's list of
59 device names, when a match found it will use the information from the
60 dma_slave_map to get the channel with the dma_get_channel() internal
61 function.
62
63 Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
64 Reviewed-by: Arnd Bergmann <arnd@arndb.de>
65 Signed-off-by: Vinod Koul <vinod.koul@intel.com>
66 ---
67 Documentation/dmaengine/client.txt | 23 +++-------
68 drivers/dma/dmaengine.c | 89 +++++++++++++++++++++++++++++++++-----
69 include/linux/dmaengine.h | 51 +++++++++++++++++++---
70 3 files changed, 127 insertions(+), 36 deletions(-)
71
72 diff --git a/Documentation/dmaengine/client.txt b/Documentation/dmaengine/client.txt
73 index 11fb87f..4b04d89 100644
74 --- a/Documentation/dmaengine/client.txt
75 +++ b/Documentation/dmaengine/client.txt
76 @@ -22,25 +22,14 @@ The slave DMA usage consists of following steps:
77 Channel allocation is slightly different in the slave DMA context,
78 client drivers typically need a channel from a particular DMA
79 controller only and even in some cases a specific channel is desired.
80 - To request a channel dma_request_channel() API is used.
81 + To request a channel dma_request_chan() API is used.
82
83 Interface:
84 - struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
85 - dma_filter_fn filter_fn,
86 - void *filter_param);
87 - where dma_filter_fn is defined as:
88 - typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
89 -
90 - The 'filter_fn' parameter is optional, but highly recommended for
91 - slave and cyclic channels as they typically need to obtain a specific
92 - DMA channel.
93 -
94 - When the optional 'filter_fn' parameter is NULL, dma_request_channel()
95 - simply returns the first channel that satisfies the capability mask.
96 -
97 - Otherwise, the 'filter_fn' routine will be called once for each free
98 - channel which has a capability in 'mask'. 'filter_fn' is expected to
99 - return 'true' when the desired DMA channel is found.
100 + struct dma_chan *dma_request_chan(struct device *dev, const char *name);
101 +
102 + Which will find and return the 'name' DMA channel associated with the 'dev'
103 + device. The association is done via DT, ACPI or board file based
104 + dma_slave_map matching table.
105
106 A channel allocated via this interface is exclusive to the caller,
107 until dma_release_channel() is called.
108 diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
109 index 81a36fc..a094dbb 100644
110 --- a/drivers/dma/dmaengine.c
111 +++ b/drivers/dma/dmaengine.c
112 @@ -43,6 +43,7 @@
113
114 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
115
116 +#include <linux/platform_device.h>
117 #include <linux/dma-mapping.h>
118 #include <linux/init.h>
119 #include <linux/module.h>
120 @@ -665,27 +666,73 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
121 }
122 EXPORT_SYMBOL_GPL(__dma_request_channel);
123
124 +static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
125 + const char *name,
126 + struct device *dev)
127 +{
128 + int i;
129 +
130 + if (!device->filter.mapcnt)
131 + return NULL;
132 +
133 + for (i = 0; i < device->filter.mapcnt; i++) {
134 + const struct dma_slave_map *map = &device->filter.map[i];
135 +
136 + if (!strcmp(map->devname, dev_name(dev)) &&
137 + !strcmp(map->slave, name))
138 + return map;
139 + }
140 +
141 + return NULL;
142 +}
143 +
144 /**
145 - * dma_request_slave_channel_reason - try to allocate an exclusive slave channel
146 + * dma_request_chan - try to allocate an exclusive slave channel
147 * @dev: pointer to client device structure
148 * @name: slave channel name
149 *
150 * Returns pointer to appropriate DMA channel on success or an error pointer.
151 */
152 -struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
153 - const char *name)
154 +struct dma_chan *dma_request_chan(struct device *dev, const char *name)
155 {
156 + struct dma_device *d, *_d;
157 + struct dma_chan *chan = NULL;
158 +
159 /* If device-tree is present get slave info from here */
160 if (dev->of_node)
161 - return of_dma_request_slave_channel(dev->of_node, name);
162 + chan = of_dma_request_slave_channel(dev->of_node, name);
163
164 /* If device was enumerated by ACPI get slave info from here */
165 - if (ACPI_HANDLE(dev))
166 - return acpi_dma_request_slave_chan_by_name(dev, name);
167 + if (has_acpi_companion(dev) && !chan)
168 + chan = acpi_dma_request_slave_chan_by_name(dev, name);
169 +
170 + if (chan) {
171 + /* Valid channel found or requester need to be deferred */
172 + if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
173 + return chan;
174 + }
175 +
176 + /* Try to find the channel via the DMA filter map(s) */
177 + mutex_lock(&dma_list_mutex);
178 + list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
179 + dma_cap_mask_t mask;
180 + const struct dma_slave_map *map = dma_filter_match(d, name, dev);
181 +
182 + if (!map)
183 + continue;
184 +
185 + dma_cap_zero(mask);
186 + dma_cap_set(DMA_SLAVE, mask);
187
188 - return ERR_PTR(-ENODEV);
189 + chan = find_candidate(d, &mask, d->filter.fn, map->param);
190 + if (!IS_ERR(chan))
191 + break;
192 + }
193 + mutex_unlock(&dma_list_mutex);
194 +
195 + return chan ? chan : ERR_PTR(-EPROBE_DEFER);
196 }
197 -EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
198 +EXPORT_SYMBOL_GPL(dma_request_chan);
199
200 /**
201 * dma_request_slave_channel - try to allocate an exclusive slave channel
202 @@ -697,17 +744,35 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
203 struct dma_chan *dma_request_slave_channel(struct device *dev,
204 const char *name)
205 {
206 - struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
207 + struct dma_chan *ch = dma_request_chan(dev, name);
208 if (IS_ERR(ch))
209 return NULL;
210
211 - dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
212 - ch->device->privatecnt++;
213 -
214 return ch;
215 }
216 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
217
218 +/**
219 + * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
220 + * @mask: capabilities that the channel must satisfy
221 + *
222 + * Returns pointer to appropriate DMA channel on success or an error pointer.
223 + */
224 +struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
225 +{
226 + struct dma_chan *chan;
227 +
228 + if (!mask)
229 + return ERR_PTR(-ENODEV);
230 +
231 + chan = __dma_request_channel(mask, NULL, NULL);
232 + if (!chan)
233 + chan = ERR_PTR(-ENODEV);
234 +
235 + return chan;
236 +}
237 +EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
238 +
239 void dma_release_channel(struct dma_chan *chan)
240 {
241 mutex_lock(&dma_list_mutex);
242 diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
243 index c47c68e..d50a6b51 100644
244 --- a/include/linux/dmaengine.h
245 +++ b/include/linux/dmaengine.h
246 @@ -607,11 +607,38 @@ enum dmaengine_alignment {
247 };
248
249 /**
250 + * struct dma_slave_map - associates slave device and it's slave channel with
251 + * parameter to be used by a filter function
252 + * @devname: name of the device
253 + * @slave: slave channel name
254 + * @param: opaque parameter to pass to struct dma_filter.fn
255 + */
256 +struct dma_slave_map {
257 + const char *devname;
258 + const char *slave;
259 + void *param;
260 +};
261 +
262 +/**
263 + * struct dma_filter - information for slave device/channel to filter_fn/param
264 + * mapping
265 + * @fn: filter function callback
266 + * @mapcnt: number of slave device/channel in the map
267 + * @map: array of channel to filter mapping data
268 + */
269 +struct dma_filter {
270 + dma_filter_fn fn;
271 + int mapcnt;
272 + const struct dma_slave_map *map;
273 +};
274 +
275 +/**
276 * struct dma_device - info on the entity supplying DMA services
277 * @chancnt: how many DMA channels are supported
278 * @privatecnt: how many DMA channels are requested by dma_request_channel
279 * @channels: the list of struct dma_chan
280 * @global_node: list_head for global dma_device_list
281 + * @filter: information for device/slave to filter function/param mapping
282 * @cap_mask: one or more dma_capability flags
283 * @max_xor: maximum number of xor sources, 0 if no capability
284 * @max_pq: maximum number of PQ sources and PQ-continue capability
285 @@ -666,6 +693,7 @@ struct dma_device {
286 unsigned int privatecnt;
287 struct list_head channels;
288 struct list_head global_node;
289 + struct dma_filter filter;
290 dma_cap_mask_t cap_mask;
291 unsigned short max_xor;
292 unsigned short max_pq;
293 @@ -1140,9 +1168,11 @@ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
294 void dma_issue_pending_all(void);
295 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
296 dma_filter_fn fn, void *fn_param);
297 -struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
298 - const char *name);
299 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
300 +
301 +struct dma_chan *dma_request_chan(struct device *dev, const char *name);
302 +struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
303 +
304 void dma_release_channel(struct dma_chan *chan);
305 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
306 #else
307 @@ -1166,16 +1196,21 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
308 {
309 return NULL;
310 }
311 -static inline struct dma_chan *dma_request_slave_channel_reason(
312 - struct device *dev, const char *name)
313 -{
314 - return ERR_PTR(-ENODEV);
315 -}
316 static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
317 const char *name)
318 {
319 return NULL;
320 }
321 +static inline struct dma_chan *dma_request_chan(struct device *dev,
322 + const char *name)
323 +{
324 + return ERR_PTR(-ENODEV);
325 +}
326 +static inline struct dma_chan *dma_request_chan_by_mask(
327 + const dma_cap_mask_t *mask)
328 +{
329 + return ERR_PTR(-ENODEV);
330 +}
331 static inline void dma_release_channel(struct dma_chan *chan)
332 {
333 }
334 @@ -1186,6 +1221,8 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
335 }
336 #endif
337
338 +#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
339 +
340 static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
341 {
342 struct dma_slave_caps caps;
343 --
344 2.8.1
345