da3dc120b7049d599c3152402d163356c51bf43e
[openwrt/staging/hauke.git] / target / linux / layerscape / patches-4.9 / 301-arch-support-layerscape.patch
1 From 7edaf7ed8fbd5fb50950a4fc8067a9c14557d010 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 25 Sep 2017 10:03:52 +0800
4 Subject: [PATCH] arch: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is a integrated patch for layerscape arch support.
10
11 Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
12 Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
13 Signed-off-by: Zhao Qiang <B45475@freescale.com>
14 Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
15 Signed-off-by: Haiying Wang <Haiying.wang@freescale.com>
16 Signed-off-by: Pan Jiafei <Jiafei.Pan@nxp.com>
17 Signed-off-by: Po Liu <po.liu@nxp.com>
18 Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
19 Signed-off-by: Jianhua Xie <jianhua.xie@nxp.com>
20 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
21 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
22 ---
23 arch/arm/include/asm/delay.h | 16 +++++++++
24 arch/arm/include/asm/io.h | 31 ++++++++++++++++++
25 arch/arm/include/asm/mach/map.h | 4 +--
26 arch/arm/include/asm/pgtable.h | 7 ++++
27 arch/arm/kernel/bios32.c | 43 ++++++++++++++++++++++++
28 arch/arm/mm/dma-mapping.c | 1 +
29 arch/arm/mm/ioremap.c | 7 ++++
30 arch/arm/mm/mmu.c | 9 +++++
31 arch/arm64/include/asm/cache.h | 2 +-
32 arch/arm64/include/asm/io.h | 2 ++
33 arch/arm64/include/asm/pci.h | 4 +++
34 arch/arm64/include/asm/pgtable-prot.h | 1 +
35 arch/arm64/include/asm/pgtable.h | 5 +++
36 arch/arm64/kernel/pci.c | 62 +++++++++++++++++++++++++++++++++++
37 arch/arm64/mm/dma-mapping.c | 6 ++++
38 15 files changed, 197 insertions(+), 3 deletions(-)
39
40 diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
41 index b1ce037e..1445b0ca 100644
42 --- a/arch/arm/include/asm/delay.h
43 +++ b/arch/arm/include/asm/delay.h
44 @@ -57,6 +57,22 @@ extern void __bad_udelay(void);
45 __const_udelay((n) * UDELAY_MULT)) : \
46 __udelay(n))
47
48 +#define spin_event_timeout(condition, timeout, delay) \
49 +({ \
50 + typeof(condition) __ret; \
51 + int i = 0; \
52 + while (!(__ret = (condition)) && (i++ < timeout)) { \
53 + if (delay) \
54 + udelay(delay); \
55 + else \
56 + cpu_relax(); \
57 + udelay(1); \
58 + } \
59 + if (!__ret) \
60 + __ret = (condition); \
61 + __ret; \
62 +})
63 +
64 /* Loop-based definitions for assembly code. */
65 extern void __loop_delay(unsigned long loops);
66 extern void __loop_udelay(unsigned long usecs);
67 diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
68 index 021692c6..172a4f2e 100644
69 --- a/arch/arm/include/asm/io.h
70 +++ b/arch/arm/include/asm/io.h
71 @@ -129,6 +129,7 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
72 #define MT_DEVICE_NONSHARED 1
73 #define MT_DEVICE_CACHED 2
74 #define MT_DEVICE_WC 3
75 +#define MT_MEMORY_RW_NS 4
76 /*
77 * types 4 onwards can be found in asm/mach/map.h and are undefined
78 * for ioremap
79 @@ -220,6 +221,34 @@ extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
80 #endif
81 #endif
82
83 +/* access ports */
84 +#define setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
85 +#define clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
86 +
87 +#define setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
88 +#define clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
89 +
90 +#define setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
91 +#define clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
92 +
93 +/* Clear and set bits in one shot. These macros can be used to clear and
94 + * set multiple bits in a register using a single read-modify-write. These
95 + * macros can also be used to set a multiple-bit bit pattern using a mask,
96 + * by specifying the mask in the 'clear' parameter and the new bit pattern
97 + * in the 'set' parameter.
98 + */
99 +
100 +#define clrsetbits_be32(addr, clear, set) \
101 + iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
102 +#define clrsetbits_le32(addr, clear, set) \
103 + iowrite32le((ioread32le(addr) & ~(clear)) | (set), (addr))
104 +#define clrsetbits_be16(addr, clear, set) \
105 + iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
106 +#define clrsetbits_le16(addr, clear, set) \
107 + iowrite16le((ioread16le(addr) & ~(clear)) | (set), (addr))
108 +#define clrsetbits_8(addr, clear, set) \
109 + iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
110 +
111 /*
112 * IO port access primitives
113 * -------------------------
114 @@ -408,6 +437,8 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
115 #define ioremap_wc ioremap_wc
116 #define ioremap_wt ioremap_wc
117
118 +void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size);
119 +
120 void iounmap(volatile void __iomem *iomem_cookie);
121 #define iounmap iounmap
122
123 diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
124 index 9b7c328f..27f3df7d 100644
125 --- a/arch/arm/include/asm/mach/map.h
126 +++ b/arch/arm/include/asm/mach/map.h
127 @@ -21,9 +21,9 @@ struct map_desc {
128 unsigned int type;
129 };
130
131 -/* types 0-3 are defined in asm/io.h */
132 +/* types 0-4 are defined in asm/io.h */
133 enum {
134 - MT_UNCACHED = 4,
135 + MT_UNCACHED = 5,
136 MT_CACHECLEAN,
137 MT_MINICLEAN,
138 MT_LOW_VECTORS,
139 diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
140 index a8d656d9..4ab57b37 100644
141 --- a/arch/arm/include/asm/pgtable.h
142 +++ b/arch/arm/include/asm/pgtable.h
143 @@ -118,6 +118,13 @@ extern pgprot_t pgprot_s2_device;
144 #define pgprot_noncached(prot) \
145 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
146
147 +#define pgprot_cached(prot) \
148 + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED)
149 +
150 +#define pgprot_cached_ns(prot) \
151 + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED | \
152 + L_PTE_MT_DEV_NONSHARED)
153 +
154 #define pgprot_writecombine(prot) \
155 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
156
157 diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
158 index 2f0e0773..d2f4869a 100644
159 --- a/arch/arm/kernel/bios32.c
160 +++ b/arch/arm/kernel/bios32.c
161 @@ -11,6 +11,8 @@
162 #include <linux/slab.h>
163 #include <linux/init.h>
164 #include <linux/io.h>
165 +#include <linux/of_irq.h>
166 +#include <linux/pcieport_if.h>
167
168 #include <asm/mach-types.h>
169 #include <asm/mach/map.h>
170 @@ -63,6 +65,47 @@ void pcibios_report_status(u_int status_mask, int warn)
171 pcibios_bus_report_status(bus, status_mask, warn);
172 }
173
174 +/*
175 + * Check device tree if the service interrupts are there
176 + */
177 +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
178 +{
179 + int ret, count = 0;
180 + struct device_node *np = NULL;
181 +
182 + if (dev->bus->dev.of_node)
183 + np = dev->bus->dev.of_node;
184 +
185 + if (np == NULL)
186 + return 0;
187 +
188 + if (!IS_ENABLED(CONFIG_OF_IRQ))
189 + return 0;
190 +
191 + /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
192 + * request irq for aer
193 + */
194 + if (mask & PCIE_PORT_SERVICE_AER) {
195 + ret = of_irq_get_byname(np, "aer");
196 + if (ret > 0) {
197 + irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
198 + count++;
199 + }
200 + }
201 +
202 + if (mask & PCIE_PORT_SERVICE_PME) {
203 + ret = of_irq_get_byname(np, "pme");
204 + if (ret > 0) {
205 + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
206 + count++;
207 + }
208 + }
209 +
210 + /* TODO: add more service interrupts if there it is in the device tree*/
211 +
212 + return count;
213 +}
214 +
215 /*
216 * We don't use this to fix the device, but initialisation of it.
217 * It's not the correct use for this, but it works.
218 diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
219 index ab771000..9b5f4465 100644
220 --- a/arch/arm/mm/dma-mapping.c
221 +++ b/arch/arm/mm/dma-mapping.c
222 @@ -2392,6 +2392,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
223
224 set_dma_ops(dev, dma_ops);
225 }
226 +EXPORT_SYMBOL(arch_setup_dma_ops);
227
228 void arch_teardown_dma_ops(struct device *dev)
229 {
230 diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
231 index ff0eed23..2f2f4269 100644
232 --- a/arch/arm/mm/ioremap.c
233 +++ b/arch/arm/mm/ioremap.c
234 @@ -398,6 +398,13 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
235 }
236 EXPORT_SYMBOL(ioremap_wc);
237
238 +void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size)
239 +{
240 + return arch_ioremap_caller(res_cookie, size, MT_MEMORY_RW_NS,
241 + __builtin_return_address(0));
242 +}
243 +EXPORT_SYMBOL(ioremap_cache_ns);
244 +
245 /*
246 * Remap an arbitrary physical address space into the kernel virtual
247 * address space as memory. Needed when the kernel wants to execute
248 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
249 index f7c74135..4a2fb704 100644
250 --- a/arch/arm/mm/mmu.c
251 +++ b/arch/arm/mm/mmu.c
252 @@ -313,6 +313,13 @@ static struct mem_type mem_types[] __ro_after_init = {
253 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
254 .domain = DOMAIN_KERNEL,
255 },
256 + [MT_MEMORY_RW_NS] = {
257 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
258 + L_PTE_XN,
259 + .prot_l1 = PMD_TYPE_TABLE,
260 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
261 + .domain = DOMAIN_KERNEL,
262 + },
263 [MT_ROM] = {
264 .prot_sect = PMD_TYPE_SECT,
265 .domain = DOMAIN_KERNEL,
266 @@ -644,6 +651,7 @@ static void __init build_mem_type_table(void)
267 }
268 kern_pgprot |= PTE_EXT_AF;
269 vecs_pgprot |= PTE_EXT_AF;
270 + mem_types[MT_MEMORY_RW_NS].prot_pte |= PTE_EXT_AF | cp->pte;
271
272 /*
273 * Set PXN for user mappings
274 @@ -672,6 +680,7 @@ static void __init build_mem_type_table(void)
275 mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
276 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
277 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
278 + mem_types[MT_MEMORY_RW_NS].prot_sect |= ecc_mask | cp->pmd;
279 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
280 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
281 mem_types[MT_ROM].prot_sect |= cp->pmd;
282 diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
283 index 5082b30b..bde44993 100644
284 --- a/arch/arm64/include/asm/cache.h
285 +++ b/arch/arm64/include/asm/cache.h
286 @@ -18,7 +18,7 @@
287
288 #include <asm/cachetype.h>
289
290 -#define L1_CACHE_SHIFT 7
291 +#define L1_CACHE_SHIFT 6
292 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
293
294 /*
295 diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
296 index 0bba427b..36c1fbf3 100644
297 --- a/arch/arm64/include/asm/io.h
298 +++ b/arch/arm64/include/asm/io.h
299 @@ -171,6 +171,8 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
300 #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
301 #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
302 #define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
303 +#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), \
304 + __pgprot(PROT_NORMAL_NS))
305 #define iounmap __iounmap
306
307 /*
308 diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
309 index b9a7ba9c..8a189159 100644
310 --- a/arch/arm64/include/asm/pci.h
311 +++ b/arch/arm64/include/asm/pci.h
312 @@ -31,6 +31,10 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
313 return -ENODEV;
314 }
315
316 +#define HAVE_PCI_MMAP
317 +extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
318 + enum pci_mmap_state mmap_state,
319 + int write_combine);
320 static inline int pci_proc_domain(struct pci_bus *bus)
321 {
322 return 1;
323 diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
324 index 2142c772..cdf8b25d 100644
325 --- a/arch/arm64/include/asm/pgtable-prot.h
326 +++ b/arch/arm64/include/asm/pgtable-prot.h
327 @@ -42,6 +42,7 @@
328 #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
329 #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
330 #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
331 +#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
332
333 #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
334 #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
335 diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
336 index 61e21401..b8c876fb 100644
337 --- a/arch/arm64/include/asm/pgtable.h
338 +++ b/arch/arm64/include/asm/pgtable.h
339 @@ -356,6 +356,11 @@ static inline int pmd_protnone(pmd_t pmd)
340 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
341 #define pgprot_writecombine(prot) \
342 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
343 +#define pgprot_cached(prot) \
344 + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL) | \
345 + PTE_PXN | PTE_UXN)
346 +#define pgprot_cached_ns(prot) \
347 + __pgprot(pgprot_val(pgprot_cached(prot)) ^ PTE_SHARED)
348 #define pgprot_device(prot) \
349 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
350 #define __HAVE_PHYS_MEM_ACCESS_PROT
351 diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
352 index 409abc45..0568ec3a 100644
353 --- a/arch/arm64/kernel/pci.c
354 +++ b/arch/arm64/kernel/pci.c
355 @@ -17,6 +17,8 @@
356 #include <linux/mm.h>
357 #include <linux/of_pci.h>
358 #include <linux/of_platform.h>
359 +#include <linux/of_irq.h>
360 +#include <linux/pcieport_if.h>
361 #include <linux/pci.h>
362 #include <linux/pci-acpi.h>
363 #include <linux/pci-ecam.h>
364 @@ -54,6 +56,66 @@ int pcibios_alloc_irq(struct pci_dev *dev)
365 return 0;
366 }
367
368 +/*
369 + * Check device tree if the service interrupts are there
370 + */
371 +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
372 +{
373 + int ret, count = 0;
374 + struct device_node *np = NULL;
375 +
376 + if (dev->bus->dev.of_node)
377 + np = dev->bus->dev.of_node;
378 +
379 + if (np == NULL)
380 + return 0;
381 +
382 + if (!IS_ENABLED(CONFIG_OF_IRQ))
383 + return 0;
384 +
385 + /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
386 + * request irq for aer
387 + */
388 + if (mask & PCIE_PORT_SERVICE_AER) {
389 + ret = of_irq_get_byname(np, "aer");
390 + if (ret > 0) {
391 + irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
392 + count++;
393 + }
394 + }
395 +
396 + if (mask & PCIE_PORT_SERVICE_PME) {
397 + ret = of_irq_get_byname(np, "pme");
398 + if (ret > 0) {
399 + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
400 + count++;
401 + }
402 + }
403 +
404 + /* TODO: add more service interrupts if there it is in the device tree*/
405 +
406 + return count;
407 +}
408 +
409 +int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
410 + enum pci_mmap_state mmap_state, int write_combine)
411 +{
412 + if (mmap_state == pci_mmap_io)
413 + return -EINVAL;
414 +
415 + /*
416 + * Mark this as IO
417 + */
418 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
419 +
420 + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
421 + vma->vm_end - vma->vm_start,
422 + vma->vm_page_prot))
423 + return -EAGAIN;
424 +
425 + return 0;
426 +}
427 +
428 /*
429 * raw_pci_read/write - Platform-specific PCI config space access.
430 */
431 diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
432 index b5bf46ce..1ef0d6df 100644
433 --- a/arch/arm64/mm/dma-mapping.c
434 +++ b/arch/arm64/mm/dma-mapping.c
435 @@ -30,6 +30,7 @@
436 #include <linux/swiotlb.h>
437
438 #include <asm/cacheflush.h>
439 +#include <../../../drivers/staging/fsl-mc/include/mc-bus.h>
440
441 static int swiotlb __ro_after_init;
442
443 @@ -917,6 +918,10 @@ static int __init __iommu_dma_init(void)
444 #ifdef CONFIG_PCI
445 if (!ret)
446 ret = register_iommu_dma_ops_notifier(&pci_bus_type);
447 +#endif
448 +#ifdef CONFIG_FSL_MC_BUS
449 + if (!ret)
450 + ret = register_iommu_dma_ops_notifier(&fsl_mc_bus_type);
451 #endif
452 return ret;
453 }
454 @@ -971,3 +976,4 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
455 dev->archdata.dma_coherent = coherent;
456 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
457 }
458 +EXPORT_SYMBOL(arch_setup_dma_ops);
459 --
460 2.14.1
461