3dc60a70321ac55fc9eabc18f0a59fed09e0022a
[openwrt/openwrt.git] / target / linux / lantiq / patches-4.4 / 0100-spi-add-support-for-Lantiq-SPI-controller.patch
1 From 0175fc559debc22fe8d17e9b8ffd1452e0a4667d Mon Sep 17 00:00:00 2001
2 From: Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
3 Date: Tue, 16 Dec 2014 15:40:32 +0100
4 Subject: [PATCH 1/2] spi: add support for Lantiq SPI controller
5
6 Signed-off-by: Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
7 ---
8 drivers/spi/Kconfig | 7 +
9 drivers/spi/Makefile | 1 +
10 drivers/spi/spi-lantiq.c | 1089 ++++++++++++++++++++++++++++++++++++++++++++++
11 3 files changed, 1097 insertions(+)
12 create mode 100644 drivers/spi/spi-lantiq.c
13
14 --- a/drivers/spi/Kconfig
15 +++ b/drivers/spi/Kconfig
16 @@ -355,6 +355,13 @@ config SPI_MT65XX
17 say Y or M here.If you are not sure, say N.
18 SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
19
20 +config SPI_LANTIQ
21 + tristate "Lantiq SPI controller"
22 + depends on LANTIQ && (SOC_TYPE_XWAY || SOC_FALCON)
23 + help
24 + This driver supports the Lantiq SPI controller in master
25 + mode.
26 +
27 config SPI_OC_TINY
28 tristate "OpenCores tiny SPI"
29 depends on GPIOLIB || COMPILE_TEST
30 --- a/drivers/spi/Makefile
31 +++ b/drivers/spi/Makefile
32 @@ -45,6 +45,7 @@ obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
33 obj-$(CONFIG_SPI_GPIO_OLD) += spi_gpio_old.o
34 obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
35 obj-$(CONFIG_SPI_IMX) += spi-imx.o
36 +obj-$(CONFIG_SPI_LANTIQ) += spi-lantiq.o
37 obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
38 obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
39 obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
40 --- /dev/null
41 +++ b/drivers/spi/spi-lantiq.c
42 @@ -0,0 +1,1091 @@
43 +/*
44 + * Copyright (C) 2011-2015 Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
45 + *
46 + * This program is free software; you can distribute it and/or modify it
47 + * under the terms of the GNU General Public License (Version 2) as
48 + * published by the Free Software Foundation.
49 + */
50 +
51 +#include <linux/kernel.h>
52 +#include <linux/module.h>
53 +#include <linux/of_device.h>
54 +#include <linux/io.h>
55 +#include <linux/delay.h>
56 +#include <linux/interrupt.h>
57 +#include <linux/sched.h>
58 +#include <linux/completion.h>
59 +#include <linux/spinlock.h>
60 +#include <linux/err.h>
61 +#include <linux/gpio.h>
62 +#include <linux/pm_runtime.h>
63 +#include <linux/spi/spi.h>
64 +
65 +#include <lantiq_soc.h>
66 +
67 +#define SPI_RX_IRQ_NAME "spi_rx"
68 +#define SPI_TX_IRQ_NAME "spi_tx"
69 +#define SPI_ERR_IRQ_NAME "spi_err"
70 +#define SPI_FRM_IRQ_NAME "spi_frm"
71 +
72 +#define SPI_CLC 0x00
73 +#define SPI_PISEL 0x04
74 +#define SPI_ID 0x08
75 +#define SPI_CON 0x10
76 +#define SPI_STAT 0x14
77 +#define SPI_WHBSTATE 0x18
78 +#define SPI_TB 0x20
79 +#define SPI_RB 0x24
80 +#define SPI_RXFCON 0x30
81 +#define SPI_TXFCON 0x34
82 +#define SPI_FSTAT 0x38
83 +#define SPI_BRT 0x40
84 +#define SPI_BRSTAT 0x44
85 +#define SPI_SFCON 0x60
86 +#define SPI_SFSTAT 0x64
87 +#define SPI_GPOCON 0x70
88 +#define SPI_GPOSTAT 0x74
89 +#define SPI_FPGO 0x78
90 +#define SPI_RXREQ 0x80
91 +#define SPI_RXCNT 0x84
92 +#define SPI_DMACON 0xec
93 +#define SPI_IRNEN 0xf4
94 +#define SPI_IRNICR 0xf8
95 +#define SPI_IRNCR 0xfc
96 +
97 +#define SPI_CLC_SMC_S 16 /* Clock divider for sleep mode */
98 +#define SPI_CLC_SMC_M (0xFF << SPI_CLC_SMC_S)
99 +#define SPI_CLC_RMC_S 8 /* Clock divider for normal run mode */
100 +#define SPI_CLC_RMC_M (0xFF << SPI_CLC_RMC_S)
101 +#define SPI_CLC_DISS BIT(1) /* Disable status bit */
102 +#define SPI_CLC_DISR BIT(0) /* Disable request bit */
103 +
104 +#define SPI_ID_TXFS_S 24 /* Implemented TX FIFO size */
105 +#define SPI_ID_TXFS_M (0x3F << SPI_ID_TXFS_S)
106 +#define SPI_ID_RXFS_S 16 /* Implemented RX FIFO size */
107 +#define SPI_ID_RXFS_M (0x3F << SPI_ID_RXFS_S)
108 +#define SPI_ID_REV_M 0x1F /* Hardware revision number */
109 +#define SPI_ID_CFG BIT(5) /* DMA interface support */
110 +
111 +#define SPI_CON_BM_S 16 /* Data width selection */
112 +#define SPI_CON_BM_M (0x1F << SPI_CON_BM_S)
113 +#define SPI_CON_EM BIT(24) /* Echo mode */
114 +#define SPI_CON_IDLE BIT(23) /* Idle bit value */
115 +#define SPI_CON_ENBV BIT(22) /* Enable byte valid control */
116 +#define SPI_CON_RUEN BIT(12) /* Receive underflow error enable */
117 +#define SPI_CON_TUEN BIT(11) /* Transmit underflow error enable */
118 +#define SPI_CON_AEN BIT(10) /* Abort error enable */
119 +#define SPI_CON_REN BIT(9) /* Receive overflow error enable */
120 +#define SPI_CON_TEN BIT(8) /* Transmit overflow error enable */
121 +#define SPI_CON_LB BIT(7) /* Loopback control */
122 +#define SPI_CON_PO BIT(6) /* Clock polarity control */
123 +#define SPI_CON_PH BIT(5) /* Clock phase control */
124 +#define SPI_CON_HB BIT(4) /* Heading control */
125 +#define SPI_CON_RXOFF BIT(1) /* Switch receiver off */
126 +#define SPI_CON_TXOFF BIT(0) /* Switch transmitter off */
127 +
128 +#define SPI_STAT_RXBV_S 28
129 +#define SPI_STAT_RXBV_M (0x7 << SPI_STAT_RXBV_S)
130 +#define SPI_STAT_BSY BIT(13) /* Busy flag */
131 +#define SPI_STAT_RUE BIT(12) /* Receive underflow error flag */
132 +#define SPI_STAT_TUE BIT(11) /* Transmit underflow error flag */
133 +#define SPI_STAT_AE BIT(10) /* Abort error flag */
134 +#define SPI_STAT_RE BIT(9) /* Receive error flag */
135 +#define SPI_STAT_TE BIT(8) /* Transmit error flag */
136 +#define SPI_STAT_ME BIT(7) /* Mode error flag */
137 +#define SPI_STAT_MS BIT(1) /* Master/slave select bit */
138 +#define SPI_STAT_EN BIT(0) /* Enable bit */
139 +
140 +#define SPI_WHBSTATE_SETTUE BIT(15) /* Set transmit underflow error flag */
141 +#define SPI_WHBSTATE_SETAE BIT(14) /* Set abort error flag */
142 +#define SPI_WHBSTATE_SETRE BIT(13) /* Set receive error flag */
143 +#define SPI_WHBSTATE_SETTE BIT(12) /* Set transmit error flag */
144 +#define SPI_WHBSTATE_CLRTUE BIT(11) /* Clear transmit underflow error flag */
145 +#define SPI_WHBSTATE_CLRAE BIT(10) /* Clear abort error flag */
146 +#define SPI_WHBSTATE_CLRRE BIT(9) /* Clear receive error flag */
147 +#define SPI_WHBSTATE_CLRTE BIT(8) /* Clear transmit error flag */
148 +#define SPI_WHBSTATE_SETME BIT(7) /* Set mode error flag */
149 +#define SPI_WHBSTATE_CLRME BIT(6) /* Clear mode error flag */
150 +#define SPI_WHBSTATE_SETRUE BIT(5) /* Set receive underflow error flag */
151 +#define SPI_WHBSTATE_CLRRUE BIT(4) /* Clear receive underflow error flag */
152 +#define SPI_WHBSTATE_SETMS BIT(3) /* Set master select bit */
153 +#define SPI_WHBSTATE_CLRMS BIT(2) /* Clear master select bit */
154 +#define SPI_WHBSTATE_SETEN BIT(1) /* Set enable bit (operational mode) */
155 +#define SPI_WHBSTATE_CLREN BIT(0) /* Clear enable bit (config mode */
156 +#define SPI_WHBSTATE_CLR_ERRORS 0x0F50
157 +
158 +#define SPI_RXFCON_RXFITL_S 8 /* FIFO interrupt trigger level */
159 +#define SPI_RXFCON_RXFITL_M (0x3F << SPI_RXFCON_RXFITL_S)
160 +#define SPI_RXFCON_RXFLU BIT(1) /* FIFO flush */
161 +#define SPI_RXFCON_RXFEN BIT(0) /* FIFO enable */
162 +
163 +#define SPI_TXFCON_TXFITL_S 8 /* FIFO interrupt trigger level */
164 +#define SPI_TXFCON_TXFITL_M (0x3F << SPI_TXFCON_TXFITL_S)
165 +#define SPI_TXFCON_TXFLU BIT(1) /* FIFO flush */
166 +#define SPI_TXFCON_TXFEN BIT(0) /* FIFO enable */
167 +
168 +#define SPI_FSTAT_RXFFL_S 0
169 +#define SPI_FSTAT_RXFFL_M (0x3f << SPI_FSTAT_RXFFL_S)
170 +#define SPI_FSTAT_TXFFL_S 8
171 +#define SPI_FSTAT_TXFFL_M (0x3f << SPI_FSTAT_TXFFL_S)
172 +
173 +#define SPI_GPOCON_ISCSBN_S 8
174 +#define SPI_GPOCON_INVOUTN_S 0
175 +
176 +#define SPI_FGPO_SETOUTN_S 8
177 +#define SPI_FGPO_CLROUTN_S 0
178 +
179 +#define SPI_RXREQ_RXCNT_M 0xFFFF /* Receive count value */
180 +#define SPI_RXCNT_TODO_M 0xFFFF /* Recevie to-do value */
181 +
182 +#define SPI_IRNEN_TFI BIT(4) /* TX finished interrupt */
183 +#define SPI_IRNEN_F BIT(3) /* Frame end interrupt request */
184 +#define SPI_IRNEN_E BIT(2) /* Error end interrupt request */
185 +#define SPI_IRNEN_T_XWAY BIT(1) /* Transmit end interrupt request */
186 +#define SPI_IRNEN_R_XWAY BIT(0) /* Receive end interrupt request */
187 +#define SPI_IRNEN_R_XRX BIT(1) /* Transmit end interrupt request */
188 +#define SPI_IRNEN_T_XRX BIT(0) /* Receive end interrupt request */
189 +#define SPI_IRNEN_ALL 0x1F
190 +
191 +struct lantiq_spi_hwcfg {
192 + unsigned int num_chipselect;
193 + unsigned int irnen_r;
194 + unsigned int irnen_t;
195 +};
196 +
197 +struct lantiq_spi {
198 + struct spi_master *master;
199 + struct device *dev;
200 + void __iomem *regbase;
201 + struct clk *spi_clk;
202 + struct clk *fpi_clk;
203 + const struct lantiq_spi_hwcfg *hwcfg;
204 +
205 + spinlock_t lock;
206 + struct completion xfer_complete;
207 +
208 + const u8 *tx;
209 + u8 *rx;
210 + unsigned int tx_todo;
211 + unsigned int rx_todo;
212 + unsigned int bits_per_word;
213 + unsigned int speed_hz;
214 + int status;
215 + unsigned long timeout;
216 + unsigned int cs_delay;
217 +};
218 +
219 +struct lantiq_spi_cstate {
220 + int cs_gpio;
221 +};
222 +
223 +static u32 lantiq_spi_readl(const struct lantiq_spi *spi, u32 reg)
224 +{
225 + return readl_be(spi->regbase + reg);
226 +}
227 +
228 +static void lantiq_spi_writel(const struct lantiq_spi *spi, u32 val, u32 reg)
229 +{
230 + writel_be(val, spi->regbase + reg);
231 +}
232 +
233 +static void lantiq_spi_maskl(const struct lantiq_spi *spi, u32 clr, u32 set,
234 + u32 reg)
235 +{
236 + u32 val = readl_be(spi->regbase + reg);
237 + val &= ~clr;
238 + val |= set;
239 + writel_be(val, spi->regbase + reg);
240 +}
241 +
242 +static int supports_dma(const struct lantiq_spi *spi)
243 +{
244 + u32 id = lantiq_spi_readl(spi, SPI_ID);
245 + return id & SPI_ID_CFG;
246 +}
247 +
248 +static unsigned int tx_fifo_size(const struct lantiq_spi *spi)
249 +{
250 + u32 id = lantiq_spi_readl(spi, SPI_ID);
251 + return (id & SPI_ID_TXFS_M) >> SPI_ID_TXFS_S;
252 +}
253 +
254 +static unsigned int rx_fifo_size(const struct lantiq_spi *spi)
255 +{
256 + u32 id = lantiq_spi_readl(spi, SPI_ID);
257 + return (id & SPI_ID_RXFS_M) >> SPI_ID_RXFS_S;
258 +}
259 +
260 +static unsigned int tx_fifo_level(const struct lantiq_spi *spi)
261 +{
262 + u32 fstat = lantiq_spi_readl(spi, SPI_FSTAT);
263 + return (fstat & SPI_FSTAT_TXFFL_M) >> SPI_FSTAT_TXFFL_S;
264 +}
265 +
266 +static unsigned int rx_fifo_level(const struct lantiq_spi *spi)
267 +{
268 + u32 fstat = lantiq_spi_readl(spi, SPI_FSTAT);
269 + return fstat & SPI_FSTAT_RXFFL_M;
270 +}
271 +
272 +static unsigned int tx_fifo_free(const struct lantiq_spi *spi)
273 +{
274 + return tx_fifo_size(spi) - tx_fifo_level(spi);
275 +}
276 +
277 +static void rx_fifo_reset(const struct lantiq_spi *spi)
278 +{
279 + u32 val = rx_fifo_size(spi) << SPI_RXFCON_RXFITL_S;
280 + val |= SPI_RXFCON_RXFEN | SPI_RXFCON_RXFLU;
281 + lantiq_spi_writel(spi, val, SPI_RXFCON);
282 +}
283 +
284 +static void tx_fifo_reset(const struct lantiq_spi *spi)
285 +{
286 + u32 val = 1 << SPI_TXFCON_TXFITL_S;
287 + val |= SPI_TXFCON_TXFEN | SPI_TXFCON_TXFLU;
288 + lantiq_spi_writel(spi, val, SPI_TXFCON);
289 +}
290 +
291 +static void rx_fifo_flush(const struct lantiq_spi *spi)
292 +{
293 + lantiq_spi_maskl(spi, 0, SPI_RXFCON_RXFLU, SPI_RXFCON);
294 +}
295 +
296 +static void tx_fifo_flush(const struct lantiq_spi *spi)
297 +{
298 + lantiq_spi_maskl(spi, 0, SPI_TXFCON_TXFLU, SPI_TXFCON);
299 +}
300 +
301 +static int hw_is_busy(const struct lantiq_spi *spi)
302 +{
303 + u32 stat = lantiq_spi_readl(spi, SPI_STAT);
304 + return stat & SPI_STAT_BSY;
305 +}
306 +
307 +static void hw_enter_config_mode(const struct lantiq_spi *spi)
308 +{
309 + lantiq_spi_writel(spi, SPI_WHBSTATE_CLREN, SPI_WHBSTATE);
310 +}
311 +
312 +static void hw_enter_active_mode(const struct lantiq_spi *spi)
313 +{
314 + lantiq_spi_writel(spi, SPI_WHBSTATE_SETEN, SPI_WHBSTATE);
315 +}
316 +
317 +static void hw_setup_speed_hz(const struct lantiq_spi *spi,
318 + unsigned int max_speed_hz)
319 +{
320 + u32 spi_clk, brt;
321 +
322 + /*
323 + * SPI module clock is derived from FPI bus clock dependent on
324 + * divider value in CLC.RMS which is always set to 1.
325 + *
326 + * f_SPI
327 + * baudrate = --------------
328 + * 2 * (BR + 1)
329 + */
330 + spi_clk = clk_get_rate(spi->fpi_clk) / 2;
331 +
332 + if (max_speed_hz > spi_clk)
333 + brt = 0;
334 + else
335 + brt = spi_clk / max_speed_hz - 1;
336 +
337 + if (brt > 0xFFFF)
338 + brt = 0xFFFF;
339 +
340 + dev_dbg(spi->dev, "spi_clk %u, max_speed_hz %u, brt %u\n",
341 + spi_clk, max_speed_hz, brt);
342 +
343 + lantiq_spi_writel(spi, brt, SPI_BRT);
344 +}
345 +
346 +static void hw_setup_bits_per_word(const struct lantiq_spi *spi,
347 + unsigned int bits_per_word)
348 +{
349 + u32 bm;
350 +
351 + /* CON.BM value = bits_per_word - 1 */
352 + bm = (bits_per_word - 1) << SPI_CON_BM_S;
353 +
354 + lantiq_spi_maskl(spi, SPI_CON_BM_M, bm, SPI_CON);
355 +}
356 +
357 +static void hw_setup_clock_mode(const struct lantiq_spi *spi,
358 + unsigned int mode)
359 +{
360 + u32 con_set = 0, con_clr = 0;
361 +
362 + /*
363 + * SPI mode mapping in CON register:
364 + * Mode CPOL CPHA CON.PO CON.PH
365 + * 0 0 0 0 1
366 + * 1 0 1 0 0
367 + * 2 1 0 1 1
368 + * 3 1 1 1 0
369 + */
370 + if (mode & SPI_CPHA)
371 + con_clr |= SPI_CON_PH;
372 + else
373 + con_set |= SPI_CON_PH;
374 +
375 + if (mode & SPI_CPOL)
376 + con_set |= SPI_CON_PO | SPI_CON_IDLE;
377 + else
378 + con_clr |= SPI_CON_PO | SPI_CON_IDLE;
379 +
380 + /* Set heading control */
381 + if (mode & SPI_LSB_FIRST)
382 + con_clr |= SPI_CON_HB;
383 + else
384 + con_set |= SPI_CON_HB;
385 +
386 + /* Set loopback mode */
387 + if (mode & SPI_LOOP)
388 + con_set |= SPI_CON_LB;
389 + else
390 + con_clr |= SPI_CON_LB;
391 +
392 + lantiq_spi_maskl(spi, con_clr, con_set, SPI_CON);
393 +}
394 +
395 +static void lantiq_spi_hw_init(const struct lantiq_spi *spi)
396 +{
397 + /*
398 + * Set clock divider for run mode to 1 to
399 + * run at same frequency as FPI bus
400 + */
401 + lantiq_spi_writel(spi, 1 << SPI_CLC_RMC_S, SPI_CLC);
402 +
403 + /* Put controller into config mode */
404 + hw_enter_config_mode(spi);
405 +
406 + /* Disable all interrupts */
407 + lantiq_spi_writel(spi, 0, SPI_IRNEN);
408 +
409 + /* Clear error flags */
410 + lantiq_spi_maskl(spi, 0, SPI_WHBSTATE_CLR_ERRORS, SPI_WHBSTATE);
411 +
412 + /* Enable error checking, disable TX/RX */
413 + lantiq_spi_writel(spi, SPI_CON_RUEN | SPI_CON_AEN | SPI_CON_TEN |
414 + SPI_CON_REN | SPI_CON_TXOFF | SPI_CON_RXOFF, SPI_CON);
415 +
416 + /* Setup default SPI mode */
417 + hw_setup_bits_per_word(spi, spi->bits_per_word);
418 + hw_setup_clock_mode(spi, SPI_MODE_0);
419 +
420 + /* Enable master mode and clear error flags */
421 + lantiq_spi_writel(spi, SPI_WHBSTATE_SETMS | SPI_WHBSTATE_CLR_ERRORS,
422 + SPI_WHBSTATE);
423 +
424 + /* Reset GPIO/CS registers */
425 + lantiq_spi_writel(spi, 0, SPI_GPOCON);
426 + lantiq_spi_writel(spi, 0xFF00, SPI_FPGO);
427 +
428 + /* Enable and flush FIFOs */
429 + rx_fifo_reset(spi);
430 + tx_fifo_reset(spi);
431 +}
432 +
433 +static void hw_chipselect_set(struct lantiq_spi *spi, unsigned int cs)
434 +{
435 + u32 fgpo = (1 << (cs - 1 + SPI_FGPO_SETOUTN_S));
436 + lantiq_spi_writel(spi, fgpo, SPI_FPGO);
437 +}
438 +
439 +static void hw_chipselect_clear(struct lantiq_spi *spi, unsigned int cs)
440 +{
441 + u32 fgpo = (1 << (cs - 1));
442 + lantiq_spi_writel(spi, fgpo, SPI_FPGO);
443 +}
444 +
445 +static void hw_chipselect_init(struct lantiq_spi *spi, unsigned int cs,
446 + unsigned int cs_high)
447 +{
448 + u32 gpocon;
449 +
450 + /* set GPO pin to CS mode */
451 + gpocon = 1 << ((cs - 1) + SPI_GPOCON_ISCSBN_S);
452 +
453 + /* invert GPO pin */
454 + if (cs_high)
455 + gpocon |= 1 << (cs - 1);
456 +
457 + lantiq_spi_maskl(spi, 0, gpocon, SPI_GPOCON);
458 +}
459 +
460 +static void chipselect_enable(struct spi_device *spidev)
461 +{
462 + struct lantiq_spi *spi = spi_master_get_devdata(spidev->master);
463 + struct lantiq_spi_cstate *cstate = spi_get_ctldata(spidev);
464 +
465 + if (cstate->cs_gpio >= 0)
466 + gpio_set_value(cstate->cs_gpio, spidev->mode & SPI_CS_HIGH);
467 + else
468 + hw_chipselect_clear(spi, spidev->chip_select);
469 +
470 + /* CS setup/recovery time */
471 + if (spi->cs_delay)
472 + ndelay(spi->cs_delay);
473 +}
474 +
475 +static void chipselect_disable(struct spi_device *spidev)
476 +{
477 + struct lantiq_spi *spi = spi_master_get_devdata(spidev->master);
478 + struct lantiq_spi_cstate *cstate = spi_get_ctldata(spidev);
479 +
480 + /* CS hold time */
481 + if (spi->cs_delay)
482 + ndelay(spi->cs_delay);
483 +
484 + if (cstate->cs_gpio >= 0)
485 + gpio_set_value(cstate->cs_gpio, !(spidev->mode & SPI_CS_HIGH));
486 + else
487 + hw_chipselect_set(spi, spidev->chip_select);
488 +
489 + /* CS setup/recovery time */
490 + if (spi->cs_delay)
491 + ndelay(spi->cs_delay);
492 +}
493 +
494 +static int lantiq_spi_setup(struct spi_device *spidev)
495 +{
496 + struct lantiq_spi *spi = spi_master_get_devdata(spidev->master);
497 + struct lantiq_spi_cstate *cstate = spi_get_ctldata(spidev);
498 + int err;
499 +
500 + if (!cstate) {
501 + cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
502 + if (!cstate)
503 + return -ENOMEM;
504 +
505 + spi_set_ctldata(spidev, cstate);
506 + cstate->cs_gpio = -ENOENT;
507 +
508 + if (spidev->cs_gpio >= 0) {
509 + dev_dbg(spi->dev, "using chipselect %u on GPIO %d\n",
510 + spidev->chip_select, spidev->cs_gpio);
511 +
512 + err = gpio_request(spidev->cs_gpio, dev_name(spi->dev));
513 + if (err)
514 + return err;
515 +
516 + gpio_direction_output(spidev->cs_gpio,
517 + !(spidev->mode & SPI_CS_HIGH));
518 +
519 + cstate->cs_gpio = spidev->cs_gpio;
520 + } else {
521 + dev_dbg(spi->dev, "using internal chipselect %u\n",
522 + spidev->chip_select);
523 +
524 + hw_chipselect_init(spi, spidev->chip_select,
525 + spidev->mode & SPI_CS_HIGH);
526 + hw_chipselect_set(spi, spidev->chip_select);
527 + }
528 + }
529 +
530 + return 0;
531 +}
532 +
533 +static void lantiq_spi_cleanup(struct spi_device *spidev)
534 +{
535 + struct lantiq_spi_cstate *cstate = spi_get_ctldata(spidev);
536 +
537 + if (cstate->cs_gpio >= 0)
538 + gpio_free(cstate->cs_gpio);
539 +
540 + kfree(cstate);
541 + spi_set_ctldata(spidev, NULL);
542 +}
543 +
544 +static void hw_setup_message(const struct lantiq_spi *spi,
545 + struct spi_device *spidev)
546 +{
547 + const struct lantiq_spi_hwcfg *hwcfg = spi->hwcfg;
548 +
549 + hw_enter_config_mode(spi);
550 + hw_setup_clock_mode(spi, spidev->mode);
551 + hw_enter_active_mode(spi);
552 +
553 + /* Enable interrupts */
554 + lantiq_spi_writel(spi, hwcfg->irnen_t | hwcfg->irnen_r | SPI_IRNEN_E,
555 + SPI_IRNEN);
556 +}
557 +
558 +static void hw_setup_transfer(struct lantiq_spi *spi, struct spi_device *spidev,
559 + struct spi_transfer *t)
560 +{
561 + unsigned int speed_hz, bits_per_word;
562 + u32 con;
563 +
564 + if (t->speed_hz)
565 + speed_hz = t->speed_hz;
566 + else
567 + speed_hz = spidev->max_speed_hz;
568 +
569 + if (t->bits_per_word)
570 + bits_per_word = t->bits_per_word;
571 + else
572 + bits_per_word = spidev->bits_per_word;
573 +
574 + if (bits_per_word != spi->bits_per_word ||
575 + speed_hz != spi->speed_hz) {
576 + hw_enter_config_mode(spi);
577 + hw_setup_speed_hz(spi, speed_hz);
578 + hw_setup_bits_per_word(spi, bits_per_word);
579 + hw_enter_active_mode(spi);
580 +
581 + spi->speed_hz = speed_hz;
582 + spi->bits_per_word = bits_per_word;
583 + }
584 +
585 + /* Configure transmitter and receiver */
586 + con = lantiq_spi_readl(spi, SPI_CON);
587 + if (t->tx_buf)
588 + con &= ~SPI_CON_TXOFF;
589 + else
590 + con |= SPI_CON_TXOFF;
591 +
592 + if (t->rx_buf)
593 + con &= ~SPI_CON_RXOFF;
594 + else
595 + con |= SPI_CON_RXOFF;
596 +
597 + lantiq_spi_writel(spi, con, SPI_CON);
598 +}
599 +
600 +static void hw_finish_message(const struct lantiq_spi *spi)
601 +{
602 + /* Disable interrupts */
603 + lantiq_spi_writel(spi, 0, SPI_IRNEN);
604 +
605 + /* Disable transmitter and receiver */
606 + lantiq_spi_maskl(spi, 0, SPI_CON_TXOFF | SPI_CON_RXOFF, SPI_CON);
607 +}
608 +
609 +static void tx_fifo_write(struct lantiq_spi *spi)
610 +{
611 + const u8 *tx8;
612 + const u16 *tx16;
613 + const u32 *tx32;
614 + u32 data;
615 + unsigned int tx_free = tx_fifo_free(spi);
616 +
617 + while (spi->tx_todo && tx_free) {
618 + switch (spi->bits_per_word) {
619 + case 8:
620 + tx8 = spi->tx;
621 + data = *tx8;
622 + spi->tx_todo--;
623 + spi->tx++;
624 + break;
625 + case 16:
626 + tx16 = (u16 *) spi->tx;
627 + data = *tx16;
628 + spi->tx_todo -= 2;
629 + spi->tx += 2;
630 + break;
631 + case 32:
632 + tx32 = (u32 *) spi->tx;
633 + data = *tx32;
634 + spi->tx_todo -= 4;
635 + spi->tx += 4;
636 + break;
637 + default:
638 + BUG();
639 + }
640 +
641 + lantiq_spi_writel(spi, data, SPI_TB);
642 + tx_free--;
643 + }
644 +}
645 +
646 +static void rx_fifo_read_full_duplex(struct lantiq_spi *spi)
647 +{
648 + u8 *rx8;
649 + u16 *rx16;
650 + u32 *rx32;
651 + u32 data;
652 + unsigned int rx_fill = rx_fifo_level(spi);
653 +
654 + while (rx_fill) {
655 + data = lantiq_spi_readl(spi, SPI_RB);
656 +
657 + switch (spi->bits_per_word) {
658 + case 8:
659 + rx8 = spi->rx;
660 + *rx8 = data;
661 + spi->rx_todo--;
662 + spi->rx++;
663 + break;
664 + case 16:
665 + rx16 = (u16 *) spi->rx;
666 + *rx16 = data;
667 + spi->rx_todo -= 2;
668 + spi->rx += 2;
669 + break;
670 + case 32:
671 + rx32 = (u32 *) spi->rx;
672 + *rx32 = data;
673 + spi->rx_todo -= 4;
674 + spi->rx += 4;
675 + break;
676 + default:
677 + BUG();
678 + }
679 +
680 + rx_fill--;
681 + }
682 +}
683 +
684 +static void rx_fifo_read_half_duplex(struct lantiq_spi *spi)
685 +{
686 + u32 data, *rx32;
687 + u8 *rx8;
688 + unsigned int rxbv, shift;
689 + unsigned int rx_fill = rx_fifo_level(spi);
690 +
691 + /*
692 + * In RX-only mode the bits per word value is ignored by HW. A value
693 + * of 32 is used instead. Thus all 4 bytes per FIFO must be read.
694 + * If remaining RX bytes are less than 4, the FIFO must be read
695 + * differently. The amount of received and valid bytes is indicated
696 + * by STAT.RXBV register value.
697 + */
698 + while (rx_fill) {
699 + if (spi->rx_todo < 4) {
700 + rxbv = (lantiq_spi_readl(spi, SPI_STAT) &
701 + SPI_STAT_RXBV_M) >> SPI_STAT_RXBV_S;
702 + data = lantiq_spi_readl(spi, SPI_RB);
703 +
704 + shift = (rxbv - 1) * 8;
705 + rx8 = spi->rx;
706 +
707 + while (rxbv) {
708 + *rx8++ = (data >> shift) & 0xFF;
709 + rxbv--;
710 + shift -= 8;
711 + spi->rx_todo--;
712 + spi->rx++;
713 + }
714 + } else {
715 + data = lantiq_spi_readl(spi, SPI_RB);
716 + rx32 = (u32 *) spi->rx;
717 +
718 + *rx32++ = data;
719 + spi->rx_todo -= 4;
720 + spi->rx += 4;
721 + }
722 + rx_fill--;
723 + }
724 +}
725 +
726 +static void rx_request(struct lantiq_spi *spi)
727 +{
728 + unsigned int rxreq, rxreq_max;
729 +
730 + /*
731 + * To avoid receive overflows at high clocks it is better to request
732 + * only the amount of bytes that fits into all FIFOs. This value
733 + * depends on the FIFO size implemented in hardware.
734 + */
735 + rxreq = spi->rx_todo;
736 + rxreq_max = rx_fifo_size(spi) * 4;
737 + if (rxreq > rxreq_max)
738 + rxreq = rxreq_max;
739 +
740 + lantiq_spi_writel(spi, rxreq, SPI_RXREQ);
741 +}
742 +
743 +static irqreturn_t lantiq_spi_xmit_interrupt(int irq, void *data)
744 +{
745 + struct lantiq_spi *spi = data;
746 +
747 + /* handle possible interrupts after device initialization */
748 + if (!spi->rx && !spi->tx)
749 + return IRQ_HANDLED;
750 +
751 + if (spi->tx) {
752 + if (spi->rx && spi->rx_todo)
753 + rx_fifo_read_full_duplex(spi);
754 +
755 + if (spi->tx_todo)
756 + tx_fifo_write(spi);
757 + else
758 + goto completed;
759 + } else if (spi->rx) {
760 + if (spi->rx_todo) {
761 + rx_fifo_read_half_duplex(spi);
762 +
763 + if (spi->rx_todo)
764 + rx_request(spi);
765 + else
766 + goto completed;
767 + } else
768 + goto completed;
769 + }
770 +
771 + return IRQ_HANDLED;
772 +
773 +completed:
774 + spi->status = 0;
775 + complete(&spi->xfer_complete);
776 +
777 + return IRQ_HANDLED;
778 +}
779 +
780 +static irqreturn_t lantiq_spi_err_interrupt(int irq, void *data)
781 +{
782 + struct lantiq_spi *spi = data;
783 + u32 stat = lantiq_spi_readl(spi, SPI_STAT);
784 +
785 + if (stat & SPI_STAT_RUE)
786 + dev_err(spi->dev, "receive underflow error\n");
787 + if (stat & SPI_STAT_TUE)
788 + dev_err(spi->dev, "transmit underflow error\n");
789 + if (stat & SPI_STAT_RE)
790 + dev_err(spi->dev, "receive overflow error\n");
791 + if (stat & SPI_STAT_TE)
792 + dev_err(spi->dev, "transmit overflow error\n");
793 + if (stat & SPI_STAT_ME)
794 + dev_err(spi->dev, "mode error\n");
795 +
796 + /* Disable all interrupts */
797 + lantiq_spi_writel(spi, 0, SPI_IRNEN);
798 +
799 + /* Clear error flags */
800 + lantiq_spi_maskl(spi, 0, SPI_WHBSTATE_CLR_ERRORS, SPI_WHBSTATE);
801 +
802 + /* flush FIFOs */
803 + rx_fifo_flush(spi);
804 + tx_fifo_flush(spi);
805 +
806 + /* set bad status so it can be retried */
807 + spi->status = -EIO;
808 + complete(&spi->xfer_complete);
809 +
810 + return IRQ_HANDLED;
811 +}
812 +
813 +static int transfer_start(struct lantiq_spi *spi, struct spi_device *spidev,
814 + struct spi_transfer *t)
815 +{
816 + unsigned long flags;
817 +
818 + spin_lock_irqsave(&spi->lock, flags);
819 +
820 + spi->tx = t->tx_buf;
821 + spi->rx = t->rx_buf;
822 + spi->status = -EINPROGRESS;
823 +
824 + if (t->tx_buf) {
825 + spi->tx_todo = t->len;
826 +
827 + /* initially fill TX FIFO */
828 + tx_fifo_write(spi);
829 + }
830 +
831 + if (spi->rx) {
832 + spi->rx_todo = t->len;
833 +
834 + /* start shift clock in RX-only mode */
835 + if (!spi->tx)
836 + rx_request(spi);
837 + }
838 +
839 + spin_unlock_irqrestore(&spi->lock, flags);
840 +
841 + return 0;
842 +}
843 +
844 +static int transfer_wait_finished(struct lantiq_spi *spi)
845 +{
846 + unsigned long timeout;
847 +
848 + /* wait for completion by interrupt */
849 + timeout = wait_for_completion_timeout(&spi->xfer_complete,
850 + msecs_to_jiffies(spi->timeout));
851 + if (!timeout)
852 + return -EIO;
853 +
854 + /* make sure that HW is idle */
855 + timeout = jiffies + msecs_to_jiffies(spi->timeout);
856 + do {
857 + if (!hw_is_busy(spi))
858 + return 0;
859 +
860 + cond_resched();
861 + } while (!time_after_eq(jiffies, timeout));
862 +
863 + /* flush FIFOs on timeout */
864 + rx_fifo_flush(spi);
865 + tx_fifo_flush(spi);
866 +
867 + return -EIO;
868 +}
869 +
870 +static int lantiq_spi_transfer_one_message(struct spi_master *master,
871 + struct spi_message *msg)
872 +{
873 + struct lantiq_spi *spi = spi_master_get_devdata(master);
874 + struct spi_device *spidev = msg->spi;
875 + struct spi_transfer *t;
876 + int status;
877 + unsigned int cs_change = 1;
878 +
879 + hw_setup_message(spi, spidev);
880 +
881 + list_for_each_entry(t, &msg->transfers, transfer_list) {
882 + reinit_completion(&spi->xfer_complete);
883 +
884 + hw_setup_transfer(spi, spidev, t);
885 +
886 + if (cs_change)
887 + chipselect_enable(spidev);
888 + cs_change = t->cs_change;
889 +
890 + status = transfer_start(spi, spidev, t);
891 + if (status) {
892 + dev_err(spi->dev, "failed to start transfer\n");
893 + goto done;
894 + }
895 +
896 + status = transfer_wait_finished(spi);
897 + if (status) {
898 + dev_err(spi->dev, "transfer timeout\n");
899 + goto done;
900 + }
901 +
902 + status = spi->status;
903 + if (status) {
904 + dev_err(spi->dev, "transfer failed\n");
905 + goto done;
906 + }
907 +
908 + msg->actual_length += t->len;
909 +
910 + if (t->delay_usecs)
911 + udelay(t->delay_usecs);
912 +
913 + if (cs_change)
914 + chipselect_disable(spidev);
915 + }
916 +
917 +done:
918 + msg->status = status;
919 +
920 + if (!(status == 0 && cs_change))
921 + chipselect_disable(spidev);
922 +
923 + spi_finalize_current_message(master);
924 + hw_finish_message(spi);
925 +
926 + return status;
927 +}
928 +
929 +static int lantiq_spi_prepare_transfer(struct spi_master *master)
930 +{
931 + struct lantiq_spi *spi = spi_master_get_devdata(master);
932 +
933 + pm_runtime_get_sync(spi->dev);
934 +
935 + return 0;
936 +}
937 +
938 +static int lantiq_spi_unprepare_transfer(struct spi_master *master)
939 +{
940 + struct lantiq_spi *spi = spi_master_get_devdata(master);
941 +
942 + pm_runtime_put(spi->dev);
943 +
944 + return 0;
945 +}
946 +
947 +static const struct lantiq_spi_hwcfg spi_xway = {
948 + .num_chipselect = 3,
949 + .irnen_r = SPI_IRNEN_R_XWAY,
950 + .irnen_t = SPI_IRNEN_T_XWAY,
951 +};
952 +
953 +static const struct lantiq_spi_hwcfg spi_xrx = {
954 + .num_chipselect = 6,
955 + .irnen_r = SPI_IRNEN_R_XRX,
956 + .irnen_t = SPI_IRNEN_T_XRX,
957 +};
958 +
959 +static const struct of_device_id lantiq_spi_match[] = {
960 + { .compatible = "lantiq,spi-xway", .data = &spi_xway, }, /* DEPRECATED */
961 + { .compatible = "lantiq,ase-spi", .data = &spi_xway, },
962 + { .compatible = "lantiq,xrx100-spi", .data = &spi_xrx, },
963 + { .compatible = "lantiq,xrx200-spi", .data = &spi_xrx, },
964 + { .compatible = "lantiq,xrx330-spi", .data = &spi_xrx, },
965 + {},
966 +};
967 +MODULE_DEVICE_TABLE(of, lantiq_spi_match);
968 +
969 +static int lantiq_spi_probe(struct platform_device *pdev)
970 +{
971 + struct spi_master *master;
972 + struct resource *res;
973 + struct lantiq_spi *spi;
974 + const struct lantiq_spi_hwcfg *hwcfg;
975 + const struct of_device_id *match;
976 + int err, rx_irq, tx_irq, err_irq;
977 +
978 + match = of_match_device(lantiq_spi_match, &pdev->dev);
979 + if (!match) {
980 + dev_err(&pdev->dev, "no device match\n");
981 + return -EINVAL;
982 + }
983 + hwcfg = match->data;
984 +
985 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
986 + if (!res) {
987 + dev_err(&pdev->dev, "failed to get resources\n");
988 + return -ENXIO;
989 + }
990 +
991 + rx_irq = platform_get_irq_byname(pdev, SPI_RX_IRQ_NAME);
992 + if (rx_irq < 0) {
993 + dev_err(&pdev->dev, "failed to get %s\n", SPI_RX_IRQ_NAME);
994 + return -ENXIO;
995 + }
996 +
997 + tx_irq = platform_get_irq_byname(pdev, SPI_TX_IRQ_NAME);
998 + if (tx_irq < 0) {
999 + dev_err(&pdev->dev, "failed to get %s\n", SPI_TX_IRQ_NAME);
1000 + return -ENXIO;
1001 + }
1002 +
1003 + err_irq = platform_get_irq_byname(pdev, SPI_ERR_IRQ_NAME);
1004 + if (err_irq < 0) {
1005 + dev_err(&pdev->dev, "failed to get %s\n", SPI_ERR_IRQ_NAME);
1006 + return -ENXIO;
1007 + }
1008 +
1009 + master = spi_alloc_master(&pdev->dev, sizeof(struct lantiq_spi));
1010 + if (!master)
1011 + return -ENOMEM;
1012 +
1013 + spi = spi_master_get_devdata(master);
1014 + spi->master = master;
1015 + spi->dev = &pdev->dev;
1016 + spi->hwcfg = hwcfg;
1017 + platform_set_drvdata(pdev, spi);
1018 +
1019 + spi->regbase = devm_ioremap_resource(&pdev->dev, res);
1020 + if (IS_ERR(spi->regbase)) {
1021 + err = PTR_ERR(spi->regbase);
1022 + goto err_master_put;
1023 + }
1024 +
1025 + err = devm_request_irq(&pdev->dev, rx_irq, lantiq_spi_xmit_interrupt, 0,
1026 + SPI_RX_IRQ_NAME, spi);
1027 + if (err)
1028 + goto err_master_put;
1029 +
1030 + err = devm_request_irq(&pdev->dev, tx_irq, lantiq_spi_xmit_interrupt, 0,
1031 + SPI_TX_IRQ_NAME, spi);
1032 + if (err)
1033 + goto err_master_put;
1034 +
1035 + err = devm_request_irq(&pdev->dev, err_irq, lantiq_spi_err_interrupt, 0,
1036 + SPI_ERR_IRQ_NAME, spi);
1037 + if (err)
1038 + goto err_master_put;
1039 +
1040 + spi->spi_clk = clk_get(&pdev->dev, NULL);
1041 + if (IS_ERR(spi->spi_clk)) {
1042 + err = PTR_ERR(spi->spi_clk);
1043 + goto err_master_put;
1044 + }
1045 + clk_prepare_enable(spi->spi_clk);
1046 +
1047 + spi->fpi_clk = clk_get_fpi();
1048 + if (IS_ERR(spi->fpi_clk)) {
1049 + err = PTR_ERR(spi->fpi_clk);
1050 + goto err_clk_disable;
1051 + }
1052 +
1053 + init_completion(&spi->xfer_complete);
1054 + spin_lock_init(&spi->lock);
1055 + spi->timeout = 2000;
1056 + spi->cs_delay = 100;
1057 + spi->bits_per_word = 8;
1058 + spi->speed_hz = 0;
1059 +
1060 + master->dev.of_node = pdev->dev.of_node;
1061 + master->bus_num = 0;
1062 + master->num_chipselect = hwcfg->num_chipselect;
1063 + master->setup = lantiq_spi_setup;
1064 + master->cleanup = lantiq_spi_cleanup;
1065 + master->prepare_transfer_hardware = lantiq_spi_prepare_transfer;
1066 + master->transfer_one_message = lantiq_spi_transfer_one_message;
1067 + master->unprepare_transfer_hardware = lantiq_spi_unprepare_transfer;
1068 + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH |
1069 + SPI_LOOP;
1070 +
1071 + lantiq_spi_hw_init(spi);
1072 +
1073 + err = spi_register_master(master);
1074 + if (err) {
1075 + dev_err(&pdev->dev, "failed to register spi_master\n");
1076 + goto err_clk_put;
1077 + }
1078 +
1079 + dev_info(&pdev->dev,
1080 + "Lantiq SPI controller (TXFS %u, RXFS %u, DMA %u)\n",
1081 + tx_fifo_size(spi), rx_fifo_size(spi), supports_dma(spi));
1082 +
1083 + return 0;
1084 +
1085 +err_clk_put:
1086 + clk_put(spi->fpi_clk);
1087 +err_clk_disable:
1088 + clk_disable_unprepare(spi->spi_clk);
1089 + clk_put(spi->spi_clk);
1090 +err_master_put:
1091 + platform_set_drvdata(pdev, NULL);
1092 + spi_master_put(master);
1093 +
1094 + return err;
1095 +}
1096 +
1097 +static int lantiq_spi_remove(struct platform_device *pdev)
1098 +{
1099 + struct lantiq_spi *spi = platform_get_drvdata(pdev);
1100 + struct spi_master *master = spi->master;
1101 +
1102 + spi_unregister_master(master);
1103 +
1104 + lantiq_spi_writel(spi, 0, SPI_IRNEN);
1105 + rx_fifo_flush(spi);
1106 + tx_fifo_flush(spi);
1107 + hw_enter_config_mode(spi);
1108 +
1109 + clk_disable_unprepare(spi->spi_clk);
1110 + clk_put(spi->spi_clk);
1111 + clk_put(spi->fpi_clk);
1112 +
1113 + platform_set_drvdata(pdev, NULL);
1114 + spi_master_put(master);
1115 +
1116 + return 0;
1117 +}
1118 +
1119 +static struct platform_driver lantiq_spi_driver = {
1120 + .probe = lantiq_spi_probe,
1121 + .remove = lantiq_spi_remove,
1122 + .driver = {
1123 + .name = "spi-lantiq",
1124 + .owner = THIS_MODULE,
1125 + .of_match_table = lantiq_spi_match,
1126 + },
1127 +};
1128 +module_platform_driver(lantiq_spi_driver);
1129 +
1130 +MODULE_DESCRIPTION("Lantiq SPI controller driver");
1131 +MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@gmail.com>");
1132 +MODULE_LICENSE("GPL");
1133 +MODULE_ALIAS("platform:spi-lantiq");