bcm63xx: ar-5315u: expose LEDs through controller
[openwrt/staging/luka.git] / target / linux / layerscape / patches-4.14 / 805-qe-support-layerscape.patch
1 From f4e3e2cf6484056225385d717da4e9c4f8613935 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Wed, 17 Apr 2019 18:58:58 +0800
4 Subject: [PATCH] qe: support layerscape
5
6 This is an integrated patch of qe for layerscape
7
8 Signed-off-by: Biwen Li <biwen.li@nxp.com>
9 Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
10 ---
11 .../fsl/qe/qe_ic.c => irqchip/irq-qeic.c} | 389 +++++++++++-------
12 drivers/soc/fsl/qe/Kconfig | 2 +-
13 drivers/soc/fsl/qe/Makefile | 2 +-
14 drivers/soc/fsl/qe/qe.c | 80 ++--
15 drivers/soc/fsl/qe/qe_ic.h | 103 -----
16 drivers/soc/fsl/qe/qe_io.c | 42 +-
17 drivers/soc/fsl/qe/qe_tdm.c | 8 +-
18 drivers/soc/fsl/qe/ucc.c | 10 +-
19 drivers/soc/fsl/qe/ucc_fast.c | 74 ++--
20 drivers/tty/serial/ucc_uart.c | 1 +
21 include/soc/fsl/qe/qe.h | 1 -
22 include/soc/fsl/qe/qe_ic.h | 139 -------
23 12 files changed, 359 insertions(+), 492 deletions(-)
24 rename drivers/{soc/fsl/qe/qe_ic.c => irqchip/irq-qeic.c} (54%)
25 delete mode 100644 drivers/soc/fsl/qe/qe_ic.h
26 delete mode 100644 include/soc/fsl/qe/qe_ic.h
27
28 --- a/drivers/soc/fsl/qe/qe_ic.c
29 +++ /dev/null
30 @@ -1,512 +0,0 @@
31 -/*
32 - * arch/powerpc/sysdev/qe_lib/qe_ic.c
33 - *
34 - * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
35 - *
36 - * Author: Li Yang <leoli@freescale.com>
37 - * Based on code from Shlomi Gridish <gridish@freescale.com>
38 - *
39 - * QUICC ENGINE Interrupt Controller
40 - *
41 - * This program is free software; you can redistribute it and/or modify it
42 - * under the terms of the GNU General Public License as published by the
43 - * Free Software Foundation; either version 2 of the License, or (at your
44 - * option) any later version.
45 - */
46 -
47 -#include <linux/of_irq.h>
48 -#include <linux/of_address.h>
49 -#include <linux/kernel.h>
50 -#include <linux/init.h>
51 -#include <linux/errno.h>
52 -#include <linux/reboot.h>
53 -#include <linux/slab.h>
54 -#include <linux/stddef.h>
55 -#include <linux/sched.h>
56 -#include <linux/signal.h>
57 -#include <linux/device.h>
58 -#include <linux/spinlock.h>
59 -#include <asm/irq.h>
60 -#include <asm/io.h>
61 -#include <soc/fsl/qe/qe_ic.h>
62 -
63 -#include "qe_ic.h"
64 -
65 -static DEFINE_RAW_SPINLOCK(qe_ic_lock);
66 -
67 -static struct qe_ic_info qe_ic_info[] = {
68 - [1] = {
69 - .mask = 0x00008000,
70 - .mask_reg = QEIC_CIMR,
71 - .pri_code = 0,
72 - .pri_reg = QEIC_CIPWCC,
73 - },
74 - [2] = {
75 - .mask = 0x00004000,
76 - .mask_reg = QEIC_CIMR,
77 - .pri_code = 1,
78 - .pri_reg = QEIC_CIPWCC,
79 - },
80 - [3] = {
81 - .mask = 0x00002000,
82 - .mask_reg = QEIC_CIMR,
83 - .pri_code = 2,
84 - .pri_reg = QEIC_CIPWCC,
85 - },
86 - [10] = {
87 - .mask = 0x00000040,
88 - .mask_reg = QEIC_CIMR,
89 - .pri_code = 1,
90 - .pri_reg = QEIC_CIPZCC,
91 - },
92 - [11] = {
93 - .mask = 0x00000020,
94 - .mask_reg = QEIC_CIMR,
95 - .pri_code = 2,
96 - .pri_reg = QEIC_CIPZCC,
97 - },
98 - [12] = {
99 - .mask = 0x00000010,
100 - .mask_reg = QEIC_CIMR,
101 - .pri_code = 3,
102 - .pri_reg = QEIC_CIPZCC,
103 - },
104 - [13] = {
105 - .mask = 0x00000008,
106 - .mask_reg = QEIC_CIMR,
107 - .pri_code = 4,
108 - .pri_reg = QEIC_CIPZCC,
109 - },
110 - [14] = {
111 - .mask = 0x00000004,
112 - .mask_reg = QEIC_CIMR,
113 - .pri_code = 5,
114 - .pri_reg = QEIC_CIPZCC,
115 - },
116 - [15] = {
117 - .mask = 0x00000002,
118 - .mask_reg = QEIC_CIMR,
119 - .pri_code = 6,
120 - .pri_reg = QEIC_CIPZCC,
121 - },
122 - [20] = {
123 - .mask = 0x10000000,
124 - .mask_reg = QEIC_CRIMR,
125 - .pri_code = 3,
126 - .pri_reg = QEIC_CIPRTA,
127 - },
128 - [25] = {
129 - .mask = 0x00800000,
130 - .mask_reg = QEIC_CRIMR,
131 - .pri_code = 0,
132 - .pri_reg = QEIC_CIPRTB,
133 - },
134 - [26] = {
135 - .mask = 0x00400000,
136 - .mask_reg = QEIC_CRIMR,
137 - .pri_code = 1,
138 - .pri_reg = QEIC_CIPRTB,
139 - },
140 - [27] = {
141 - .mask = 0x00200000,
142 - .mask_reg = QEIC_CRIMR,
143 - .pri_code = 2,
144 - .pri_reg = QEIC_CIPRTB,
145 - },
146 - [28] = {
147 - .mask = 0x00100000,
148 - .mask_reg = QEIC_CRIMR,
149 - .pri_code = 3,
150 - .pri_reg = QEIC_CIPRTB,
151 - },
152 - [32] = {
153 - .mask = 0x80000000,
154 - .mask_reg = QEIC_CIMR,
155 - .pri_code = 0,
156 - .pri_reg = QEIC_CIPXCC,
157 - },
158 - [33] = {
159 - .mask = 0x40000000,
160 - .mask_reg = QEIC_CIMR,
161 - .pri_code = 1,
162 - .pri_reg = QEIC_CIPXCC,
163 - },
164 - [34] = {
165 - .mask = 0x20000000,
166 - .mask_reg = QEIC_CIMR,
167 - .pri_code = 2,
168 - .pri_reg = QEIC_CIPXCC,
169 - },
170 - [35] = {
171 - .mask = 0x10000000,
172 - .mask_reg = QEIC_CIMR,
173 - .pri_code = 3,
174 - .pri_reg = QEIC_CIPXCC,
175 - },
176 - [36] = {
177 - .mask = 0x08000000,
178 - .mask_reg = QEIC_CIMR,
179 - .pri_code = 4,
180 - .pri_reg = QEIC_CIPXCC,
181 - },
182 - [40] = {
183 - .mask = 0x00800000,
184 - .mask_reg = QEIC_CIMR,
185 - .pri_code = 0,
186 - .pri_reg = QEIC_CIPYCC,
187 - },
188 - [41] = {
189 - .mask = 0x00400000,
190 - .mask_reg = QEIC_CIMR,
191 - .pri_code = 1,
192 - .pri_reg = QEIC_CIPYCC,
193 - },
194 - [42] = {
195 - .mask = 0x00200000,
196 - .mask_reg = QEIC_CIMR,
197 - .pri_code = 2,
198 - .pri_reg = QEIC_CIPYCC,
199 - },
200 - [43] = {
201 - .mask = 0x00100000,
202 - .mask_reg = QEIC_CIMR,
203 - .pri_code = 3,
204 - .pri_reg = QEIC_CIPYCC,
205 - },
206 -};
207 -
208 -static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg)
209 -{
210 - return in_be32(base + (reg >> 2));
211 -}
212 -
213 -static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg,
214 - u32 value)
215 -{
216 - out_be32(base + (reg >> 2), value);
217 -}
218 -
219 -static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
220 -{
221 - return irq_get_chip_data(virq);
222 -}
223 -
224 -static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
225 -{
226 - return irq_data_get_irq_chip_data(d);
227 -}
228 -
229 -static void qe_ic_unmask_irq(struct irq_data *d)
230 -{
231 - struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
232 - unsigned int src = irqd_to_hwirq(d);
233 - unsigned long flags;
234 - u32 temp;
235 -
236 - raw_spin_lock_irqsave(&qe_ic_lock, flags);
237 -
238 - temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
239 - qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
240 - temp | qe_ic_info[src].mask);
241 -
242 - raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
243 -}
244 -
245 -static void qe_ic_mask_irq(struct irq_data *d)
246 -{
247 - struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
248 - unsigned int src = irqd_to_hwirq(d);
249 - unsigned long flags;
250 - u32 temp;
251 -
252 - raw_spin_lock_irqsave(&qe_ic_lock, flags);
253 -
254 - temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
255 - qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
256 - temp & ~qe_ic_info[src].mask);
257 -
258 - /* Flush the above write before enabling interrupts; otherwise,
259 - * spurious interrupts will sometimes happen. To be 100% sure
260 - * that the write has reached the device before interrupts are
261 - * enabled, the mask register would have to be read back; however,
262 - * this is not required for correctness, only to avoid wasting
263 - * time on a large number of spurious interrupts. In testing,
264 - * a sync reduced the observed spurious interrupts to zero.
265 - */
266 - mb();
267 -
268 - raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
269 -}
270 -
271 -static struct irq_chip qe_ic_irq_chip = {
272 - .name = "QEIC",
273 - .irq_unmask = qe_ic_unmask_irq,
274 - .irq_mask = qe_ic_mask_irq,
275 - .irq_mask_ack = qe_ic_mask_irq,
276 -};
277 -
278 -static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
279 - enum irq_domain_bus_token bus_token)
280 -{
281 - /* Exact match, unless qe_ic node is NULL */
282 - struct device_node *of_node = irq_domain_get_of_node(h);
283 - return of_node == NULL || of_node == node;
284 -}
285 -
286 -static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
287 - irq_hw_number_t hw)
288 -{
289 - struct qe_ic *qe_ic = h->host_data;
290 - struct irq_chip *chip;
291 -
292 - if (hw >= ARRAY_SIZE(qe_ic_info)) {
293 - pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
294 - return -EINVAL;
295 - }
296 -
297 - if (qe_ic_info[hw].mask == 0) {
298 - printk(KERN_ERR "Can't map reserved IRQ\n");
299 - return -EINVAL;
300 - }
301 - /* Default chip */
302 - chip = &qe_ic->hc_irq;
303 -
304 - irq_set_chip_data(virq, qe_ic);
305 - irq_set_status_flags(virq, IRQ_LEVEL);
306 -
307 - irq_set_chip_and_handler(virq, chip, handle_level_irq);
308 -
309 - return 0;
310 -}
311 -
312 -static const struct irq_domain_ops qe_ic_host_ops = {
313 - .match = qe_ic_host_match,
314 - .map = qe_ic_host_map,
315 - .xlate = irq_domain_xlate_onetwocell,
316 -};
317 -
318 -/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
319 -unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
320 -{
321 - int irq;
322 -
323 - BUG_ON(qe_ic == NULL);
324 -
325 - /* get the interrupt source vector. */
326 - irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
327 -
328 - if (irq == 0)
329 - return NO_IRQ;
330 -
331 - return irq_linear_revmap(qe_ic->irqhost, irq);
332 -}
333 -
334 -/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
335 -unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
336 -{
337 - int irq;
338 -
339 - BUG_ON(qe_ic == NULL);
340 -
341 - /* get the interrupt source vector. */
342 - irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
343 -
344 - if (irq == 0)
345 - return NO_IRQ;
346 -
347 - return irq_linear_revmap(qe_ic->irqhost, irq);
348 -}
349 -
350 -void __init qe_ic_init(struct device_node *node, unsigned int flags,
351 - void (*low_handler)(struct irq_desc *desc),
352 - void (*high_handler)(struct irq_desc *desc))
353 -{
354 - struct qe_ic *qe_ic;
355 - struct resource res;
356 - u32 temp = 0, ret, high_active = 0;
357 -
358 - ret = of_address_to_resource(node, 0, &res);
359 - if (ret)
360 - return;
361 -
362 - qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
363 - if (qe_ic == NULL)
364 - return;
365 -
366 - qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
367 - &qe_ic_host_ops, qe_ic);
368 - if (qe_ic->irqhost == NULL) {
369 - kfree(qe_ic);
370 - return;
371 - }
372 -
373 - qe_ic->regs = ioremap(res.start, resource_size(&res));
374 -
375 - qe_ic->hc_irq = qe_ic_irq_chip;
376 -
377 - qe_ic->virq_high = irq_of_parse_and_map(node, 0);
378 - qe_ic->virq_low = irq_of_parse_and_map(node, 1);
379 -
380 - if (qe_ic->virq_low == NO_IRQ) {
381 - printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
382 - kfree(qe_ic);
383 - return;
384 - }
385 -
386 - /* default priority scheme is grouped. If spread mode is */
387 - /* required, configure cicr accordingly. */
388 - if (flags & QE_IC_SPREADMODE_GRP_W)
389 - temp |= CICR_GWCC;
390 - if (flags & QE_IC_SPREADMODE_GRP_X)
391 - temp |= CICR_GXCC;
392 - if (flags & QE_IC_SPREADMODE_GRP_Y)
393 - temp |= CICR_GYCC;
394 - if (flags & QE_IC_SPREADMODE_GRP_Z)
395 - temp |= CICR_GZCC;
396 - if (flags & QE_IC_SPREADMODE_GRP_RISCA)
397 - temp |= CICR_GRTA;
398 - if (flags & QE_IC_SPREADMODE_GRP_RISCB)
399 - temp |= CICR_GRTB;
400 -
401 - /* choose destination signal for highest priority interrupt */
402 - if (flags & QE_IC_HIGH_SIGNAL) {
403 - temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
404 - high_active = 1;
405 - }
406 -
407 - qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
408 -
409 - irq_set_handler_data(qe_ic->virq_low, qe_ic);
410 - irq_set_chained_handler(qe_ic->virq_low, low_handler);
411 -
412 - if (qe_ic->virq_high != NO_IRQ &&
413 - qe_ic->virq_high != qe_ic->virq_low) {
414 - irq_set_handler_data(qe_ic->virq_high, qe_ic);
415 - irq_set_chained_handler(qe_ic->virq_high, high_handler);
416 - }
417 -}
418 -
419 -void qe_ic_set_highest_priority(unsigned int virq, int high)
420 -{
421 - struct qe_ic *qe_ic = qe_ic_from_irq(virq);
422 - unsigned int src = virq_to_hw(virq);
423 - u32 temp = 0;
424 -
425 - temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
426 -
427 - temp &= ~CICR_HP_MASK;
428 - temp |= src << CICR_HP_SHIFT;
429 -
430 - temp &= ~CICR_HPIT_MASK;
431 - temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
432 -
433 - qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
434 -}
435 -
436 -/* Set Priority level within its group, from 1 to 8 */
437 -int qe_ic_set_priority(unsigned int virq, unsigned int priority)
438 -{
439 - struct qe_ic *qe_ic = qe_ic_from_irq(virq);
440 - unsigned int src = virq_to_hw(virq);
441 - u32 temp;
442 -
443 - if (priority > 8 || priority == 0)
444 - return -EINVAL;
445 - if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
446 - "%s: Invalid hw irq number for QEIC\n", __func__))
447 - return -EINVAL;
448 - if (qe_ic_info[src].pri_reg == 0)
449 - return -EINVAL;
450 -
451 - temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
452 -
453 - if (priority < 4) {
454 - temp &= ~(0x7 << (32 - priority * 3));
455 - temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
456 - } else {
457 - temp &= ~(0x7 << (24 - priority * 3));
458 - temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
459 - }
460 -
461 - qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
462 -
463 - return 0;
464 -}
465 -
466 -/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
467 -int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
468 -{
469 - struct qe_ic *qe_ic = qe_ic_from_irq(virq);
470 - unsigned int src = virq_to_hw(virq);
471 - u32 temp, control_reg = QEIC_CICNR, shift = 0;
472 -
473 - if (priority > 2 || priority == 0)
474 - return -EINVAL;
475 - if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
476 - "%s: Invalid hw irq number for QEIC\n", __func__))
477 - return -EINVAL;
478 -
479 - switch (qe_ic_info[src].pri_reg) {
480 - case QEIC_CIPZCC:
481 - shift = CICNR_ZCC1T_SHIFT;
482 - break;
483 - case QEIC_CIPWCC:
484 - shift = CICNR_WCC1T_SHIFT;
485 - break;
486 - case QEIC_CIPYCC:
487 - shift = CICNR_YCC1T_SHIFT;
488 - break;
489 - case QEIC_CIPXCC:
490 - shift = CICNR_XCC1T_SHIFT;
491 - break;
492 - case QEIC_CIPRTA:
493 - shift = CRICR_RTA1T_SHIFT;
494 - control_reg = QEIC_CRICR;
495 - break;
496 - case QEIC_CIPRTB:
497 - shift = CRICR_RTB1T_SHIFT;
498 - control_reg = QEIC_CRICR;
499 - break;
500 - default:
501 - return -EINVAL;
502 - }
503 -
504 - shift += (2 - priority) * 2;
505 - temp = qe_ic_read(qe_ic->regs, control_reg);
506 - temp &= ~(SIGNAL_MASK << shift);
507 - temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
508 - qe_ic_write(qe_ic->regs, control_reg, temp);
509 -
510 - return 0;
511 -}
512 -
513 -static struct bus_type qe_ic_subsys = {
514 - .name = "qe_ic",
515 - .dev_name = "qe_ic",
516 -};
517 -
518 -static struct device device_qe_ic = {
519 - .id = 0,
520 - .bus = &qe_ic_subsys,
521 -};
522 -
523 -static int __init init_qe_ic_sysfs(void)
524 -{
525 - int rc;
526 -
527 - printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
528 -
529 - rc = subsys_system_register(&qe_ic_subsys, NULL);
530 - if (rc) {
531 - printk(KERN_ERR "Failed registering qe_ic sys class\n");
532 - return -ENODEV;
533 - }
534 - rc = device_register(&device_qe_ic);
535 - if (rc) {
536 - printk(KERN_ERR "Failed registering qe_ic sys device\n");
537 - return -ENODEV;
538 - }
539 - return 0;
540 -}
541 -
542 -subsys_initcall(init_qe_ic_sysfs);
543 --- /dev/null
544 +++ b/drivers/irqchip/irq-qeic.c
545 @@ -0,0 +1,605 @@
546 +/*
547 + * drivers/irqchip/irq-qeic.c
548 + *
549 + * Copyright (C) 2016 Freescale Semiconductor, Inc. All rights reserved.
550 + *
551 + * Author: Li Yang <leoli@freescale.com>
552 + * Based on code from Shlomi Gridish <gridish@freescale.com>
553 + *
554 + * QUICC ENGINE Interrupt Controller
555 + *
556 + * This program is free software; you can redistribute it and/or modify it
557 + * under the terms of the GNU General Public License as published by the
558 + * Free Software Foundation; either version 2 of the License, or (at your
559 + * option) any later version.
560 + */
561 +
562 +#include <linux/of_irq.h>
563 +#include <linux/of_address.h>
564 +#include <linux/kernel.h>
565 +#include <linux/init.h>
566 +#include <linux/irqdomain.h>
567 +#include <linux/irqchip.h>
568 +#include <linux/errno.h>
569 +#include <linux/of_address.h>
570 +#include <linux/of_irq.h>
571 +#include <linux/reboot.h>
572 +#include <linux/slab.h>
573 +#include <linux/stddef.h>
574 +#include <linux/sched.h>
575 +#include <linux/signal.h>
576 +#include <linux/device.h>
577 +#include <linux/spinlock.h>
578 +#include <linux/irq.h>
579 +#include <asm/io.h>
580 +
581 +#define NR_QE_IC_INTS 64
582 +
583 +/* QE IC registers offset */
584 +#define QEIC_CICR 0x00
585 +#define QEIC_CIVEC 0x04
586 +#define QEIC_CRIPNR 0x08
587 +#define QEIC_CIPNR 0x0c
588 +#define QEIC_CIPXCC 0x10
589 +#define QEIC_CIPYCC 0x14
590 +#define QEIC_CIPWCC 0x18
591 +#define QEIC_CIPZCC 0x1c
592 +#define QEIC_CIMR 0x20
593 +#define QEIC_CRIMR 0x24
594 +#define QEIC_CICNR 0x28
595 +#define QEIC_CIPRTA 0x30
596 +#define QEIC_CIPRTB 0x34
597 +#define QEIC_CRICR 0x3c
598 +#define QEIC_CHIVEC 0x60
599 +
600 +/* Interrupt priority registers */
601 +#define CIPCC_SHIFT_PRI0 29
602 +#define CIPCC_SHIFT_PRI1 26
603 +#define CIPCC_SHIFT_PRI2 23
604 +#define CIPCC_SHIFT_PRI3 20
605 +#define CIPCC_SHIFT_PRI4 13
606 +#define CIPCC_SHIFT_PRI5 10
607 +#define CIPCC_SHIFT_PRI6 7
608 +#define CIPCC_SHIFT_PRI7 4
609 +
610 +/* CICR priority modes */
611 +#define CICR_GWCC 0x00040000
612 +#define CICR_GXCC 0x00020000
613 +#define CICR_GYCC 0x00010000
614 +#define CICR_GZCC 0x00080000
615 +#define CICR_GRTA 0x00200000
616 +#define CICR_GRTB 0x00400000
617 +#define CICR_HPIT_SHIFT 8
618 +#define CICR_HPIT_MASK 0x00000300
619 +#define CICR_HP_SHIFT 24
620 +#define CICR_HP_MASK 0x3f000000
621 +
622 +/* CICNR */
623 +#define CICNR_WCC1T_SHIFT 20
624 +#define CICNR_ZCC1T_SHIFT 28
625 +#define CICNR_YCC1T_SHIFT 12
626 +#define CICNR_XCC1T_SHIFT 4
627 +
628 +/* CRICR */
629 +#define CRICR_RTA1T_SHIFT 20
630 +#define CRICR_RTB1T_SHIFT 28
631 +
632 +/* Signal indicator */
633 +#define SIGNAL_MASK 3
634 +#define SIGNAL_HIGH 2
635 +#define SIGNAL_LOW 0
636 +
637 +#define NUM_OF_QE_IC_GROUPS 6
638 +
639 +/* Flags when we init the QE IC */
640 +#define QE_IC_SPREADMODE_GRP_W 0x00000001
641 +#define QE_IC_SPREADMODE_GRP_X 0x00000002
642 +#define QE_IC_SPREADMODE_GRP_Y 0x00000004
643 +#define QE_IC_SPREADMODE_GRP_Z 0x00000008
644 +#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010
645 +#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020
646 +
647 +#define QE_IC_LOW_SIGNAL 0x00000100
648 +#define QE_IC_HIGH_SIGNAL 0x00000200
649 +
650 +#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000
651 +#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000
652 +#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000
653 +#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000
654 +#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000
655 +#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000
656 +#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000
657 +#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000
658 +#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000
659 +#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000
660 +#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000
661 +#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000
662 +#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12)
663 +
664 +/* QE interrupt sources groups */
665 +enum qe_ic_grp_id {
666 + QE_IC_GRP_W = 0, /* QE interrupt controller group W */
667 + QE_IC_GRP_X, /* QE interrupt controller group X */
668 + QE_IC_GRP_Y, /* QE interrupt controller group Y */
669 + QE_IC_GRP_Z, /* QE interrupt controller group Z */
670 + QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */
671 + QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
672 +};
673 +
674 +struct qe_ic {
675 + /* Control registers offset */
676 + u32 __iomem *regs;
677 +
678 + /* The remapper for this QEIC */
679 + struct irq_domain *irqhost;
680 +
681 + /* The "linux" controller struct */
682 + struct irq_chip hc_irq;
683 +
684 + /* VIRQ numbers of QE high/low irqs */
685 + unsigned int virq_high;
686 + unsigned int virq_low;
687 +};
688 +
689 +/*
690 + * QE interrupt controller internal structure
691 + */
692 +struct qe_ic_info {
693 + /* location of this source at the QIMR register. */
694 + u32 mask;
695 +
696 + /* Mask register offset */
697 + u32 mask_reg;
698 +
699 + /*
700 + * for grouped interrupts sources - the interrupt
701 + * code as appears at the group priority register
702 + */
703 + u8 pri_code;
704 +
705 + /* Group priority register offset */
706 + u32 pri_reg;
707 +};
708 +
709 +static DEFINE_RAW_SPINLOCK(qe_ic_lock);
710 +
711 +static struct qe_ic_info qe_ic_info[] = {
712 + [1] = {
713 + .mask = 0x00008000,
714 + .mask_reg = QEIC_CIMR,
715 + .pri_code = 0,
716 + .pri_reg = QEIC_CIPWCC,
717 + },
718 + [2] = {
719 + .mask = 0x00004000,
720 + .mask_reg = QEIC_CIMR,
721 + .pri_code = 1,
722 + .pri_reg = QEIC_CIPWCC,
723 + },
724 + [3] = {
725 + .mask = 0x00002000,
726 + .mask_reg = QEIC_CIMR,
727 + .pri_code = 2,
728 + .pri_reg = QEIC_CIPWCC,
729 + },
730 + [10] = {
731 + .mask = 0x00000040,
732 + .mask_reg = QEIC_CIMR,
733 + .pri_code = 1,
734 + .pri_reg = QEIC_CIPZCC,
735 + },
736 + [11] = {
737 + .mask = 0x00000020,
738 + .mask_reg = QEIC_CIMR,
739 + .pri_code = 2,
740 + .pri_reg = QEIC_CIPZCC,
741 + },
742 + [12] = {
743 + .mask = 0x00000010,
744 + .mask_reg = QEIC_CIMR,
745 + .pri_code = 3,
746 + .pri_reg = QEIC_CIPZCC,
747 + },
748 + [13] = {
749 + .mask = 0x00000008,
750 + .mask_reg = QEIC_CIMR,
751 + .pri_code = 4,
752 + .pri_reg = QEIC_CIPZCC,
753 + },
754 + [14] = {
755 + .mask = 0x00000004,
756 + .mask_reg = QEIC_CIMR,
757 + .pri_code = 5,
758 + .pri_reg = QEIC_CIPZCC,
759 + },
760 + [15] = {
761 + .mask = 0x00000002,
762 + .mask_reg = QEIC_CIMR,
763 + .pri_code = 6,
764 + .pri_reg = QEIC_CIPZCC,
765 + },
766 + [20] = {
767 + .mask = 0x10000000,
768 + .mask_reg = QEIC_CRIMR,
769 + .pri_code = 3,
770 + .pri_reg = QEIC_CIPRTA,
771 + },
772 + [25] = {
773 + .mask = 0x00800000,
774 + .mask_reg = QEIC_CRIMR,
775 + .pri_code = 0,
776 + .pri_reg = QEIC_CIPRTB,
777 + },
778 + [26] = {
779 + .mask = 0x00400000,
780 + .mask_reg = QEIC_CRIMR,
781 + .pri_code = 1,
782 + .pri_reg = QEIC_CIPRTB,
783 + },
784 + [27] = {
785 + .mask = 0x00200000,
786 + .mask_reg = QEIC_CRIMR,
787 + .pri_code = 2,
788 + .pri_reg = QEIC_CIPRTB,
789 + },
790 + [28] = {
791 + .mask = 0x00100000,
792 + .mask_reg = QEIC_CRIMR,
793 + .pri_code = 3,
794 + .pri_reg = QEIC_CIPRTB,
795 + },
796 + [32] = {
797 + .mask = 0x80000000,
798 + .mask_reg = QEIC_CIMR,
799 + .pri_code = 0,
800 + .pri_reg = QEIC_CIPXCC,
801 + },
802 + [33] = {
803 + .mask = 0x40000000,
804 + .mask_reg = QEIC_CIMR,
805 + .pri_code = 1,
806 + .pri_reg = QEIC_CIPXCC,
807 + },
808 + [34] = {
809 + .mask = 0x20000000,
810 + .mask_reg = QEIC_CIMR,
811 + .pri_code = 2,
812 + .pri_reg = QEIC_CIPXCC,
813 + },
814 + [35] = {
815 + .mask = 0x10000000,
816 + .mask_reg = QEIC_CIMR,
817 + .pri_code = 3,
818 + .pri_reg = QEIC_CIPXCC,
819 + },
820 + [36] = {
821 + .mask = 0x08000000,
822 + .mask_reg = QEIC_CIMR,
823 + .pri_code = 4,
824 + .pri_reg = QEIC_CIPXCC,
825 + },
826 + [40] = {
827 + .mask = 0x00800000,
828 + .mask_reg = QEIC_CIMR,
829 + .pri_code = 0,
830 + .pri_reg = QEIC_CIPYCC,
831 + },
832 + [41] = {
833 + .mask = 0x00400000,
834 + .mask_reg = QEIC_CIMR,
835 + .pri_code = 1,
836 + .pri_reg = QEIC_CIPYCC,
837 + },
838 + [42] = {
839 + .mask = 0x00200000,
840 + .mask_reg = QEIC_CIMR,
841 + .pri_code = 2,
842 + .pri_reg = QEIC_CIPYCC,
843 + },
844 + [43] = {
845 + .mask = 0x00100000,
846 + .mask_reg = QEIC_CIMR,
847 + .pri_code = 3,
848 + .pri_reg = QEIC_CIPYCC,
849 + },
850 +};
851 +
852 +static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
853 +{
854 + return ioread32be(base + (reg >> 2));
855 +}
856 +
857 +static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
858 + u32 value)
859 +{
860 + iowrite32be(value, base + (reg >> 2));
861 +}
862 +
863 +static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
864 +{
865 + return irq_get_chip_data(virq);
866 +}
867 +
868 +static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
869 +{
870 + return irq_data_get_irq_chip_data(d);
871 +}
872 +
873 +static void qe_ic_unmask_irq(struct irq_data *d)
874 +{
875 + struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
876 + unsigned int src = irqd_to_hwirq(d);
877 + unsigned long flags;
878 + u32 temp;
879 +
880 + raw_spin_lock_irqsave(&qe_ic_lock, flags);
881 +
882 + temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
883 + qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
884 + temp | qe_ic_info[src].mask);
885 +
886 + raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
887 +}
888 +
889 +static void qe_ic_mask_irq(struct irq_data *d)
890 +{
891 + struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
892 + unsigned int src = irqd_to_hwirq(d);
893 + unsigned long flags;
894 + u32 temp;
895 +
896 + raw_spin_lock_irqsave(&qe_ic_lock, flags);
897 +
898 + temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
899 + qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
900 + temp & ~qe_ic_info[src].mask);
901 +
902 + /* Flush the above write before enabling interrupts; otherwise,
903 + * spurious interrupts will sometimes happen. To be 100% sure
904 + * that the write has reached the device before interrupts are
905 + * enabled, the mask register would have to be read back; however,
906 + * this is not required for correctness, only to avoid wasting
907 + * time on a large number of spurious interrupts. In testing,
908 + * a sync reduced the observed spurious interrupts to zero.
909 + */
910 + mb();
911 +
912 + raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
913 +}
914 +
915 +static struct irq_chip qe_ic_irq_chip = {
916 + .name = "QEIC",
917 + .irq_unmask = qe_ic_unmask_irq,
918 + .irq_mask = qe_ic_mask_irq,
919 + .irq_mask_ack = qe_ic_mask_irq,
920 +};
921 +
922 +static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
923 + enum irq_domain_bus_token bus_token)
924 +{
925 + /* Exact match, unless qe_ic node is NULL */
926 + struct device_node *of_node = irq_domain_get_of_node(h);
927 + return of_node == NULL || of_node == node;
928 +}
929 +
930 +static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
931 + irq_hw_number_t hw)
932 +{
933 + struct qe_ic *qe_ic = h->host_data;
934 + struct irq_chip *chip;
935 +
936 + if (hw >= ARRAY_SIZE(qe_ic_info)) {
937 + pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
938 + return -EINVAL;
939 + }
940 +
941 + if (qe_ic_info[hw].mask == 0) {
942 + printk(KERN_ERR "Can't map reserved IRQ\n");
943 + return -EINVAL;
944 + }
945 + /* Default chip */
946 + chip = &qe_ic->hc_irq;
947 +
948 + irq_set_chip_data(virq, qe_ic);
949 + irq_set_status_flags(virq, IRQ_LEVEL);
950 +
951 + irq_set_chip_and_handler(virq, chip, handle_level_irq);
952 +
953 + return 0;
954 +}
955 +
956 +static const struct irq_domain_ops qe_ic_host_ops = {
957 + .match = qe_ic_host_match,
958 + .map = qe_ic_host_map,
959 + .xlate = irq_domain_xlate_onetwocell,
960 +};
961 +
962 +/* Return an interrupt vector or 0 if no interrupt is pending. */
963 +static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
964 +{
965 + int irq;
966 +
967 + BUG_ON(qe_ic == NULL);
968 +
969 + /* get the interrupt source vector. */
970 + irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
971 +
972 + if (irq == 0)
973 + return 0;
974 +
975 + return irq_linear_revmap(qe_ic->irqhost, irq);
976 +}
977 +
978 +/* Return an interrupt vector or 0 if no interrupt is pending. */
979 +static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
980 +{
981 + int irq;
982 +
983 + BUG_ON(qe_ic == NULL);
984 +
985 + /* get the interrupt source vector. */
986 + irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
987 +
988 + if (irq == 0)
989 + return 0;
990 +
991 + return irq_linear_revmap(qe_ic->irqhost, irq);
992 +}
993 +
994 +static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
995 +{
996 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
997 + unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
998 +
999 + if (cascade_irq != 0)
1000 + generic_handle_irq(cascade_irq);
1001 +}
1002 +
1003 +static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
1004 +{
1005 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1006 + unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
1007 +
1008 + if (cascade_irq != 0)
1009 + generic_handle_irq(cascade_irq);
1010 +}
1011 +
1012 +static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
1013 +{
1014 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1015 + unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
1016 + struct irq_chip *chip = irq_desc_get_chip(desc);
1017 +
1018 + if (cascade_irq != 0)
1019 + generic_handle_irq(cascade_irq);
1020 +
1021 + chip->irq_eoi(&desc->irq_data);
1022 +}
1023 +
1024 +static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
1025 +{
1026 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1027 + unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
1028 + struct irq_chip *chip = irq_desc_get_chip(desc);
1029 +
1030 + if (cascade_irq != 0)
1031 + generic_handle_irq(cascade_irq);
1032 +
1033 + chip->irq_eoi(&desc->irq_data);
1034 +}
1035 +
1036 +static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
1037 +{
1038 + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1039 + unsigned int cascade_irq;
1040 + struct irq_chip *chip = irq_desc_get_chip(desc);
1041 +
1042 + cascade_irq = qe_ic_get_high_irq(qe_ic);
1043 + if (cascade_irq == 0)
1044 + cascade_irq = qe_ic_get_low_irq(qe_ic);
1045 +
1046 + if (cascade_irq != 0)
1047 + generic_handle_irq(cascade_irq);
1048 +
1049 + chip->irq_eoi(&desc->irq_data);
1050 +}
1051 +
1052 +static int __init qe_ic_init(struct device_node *node, unsigned int flags)
1053 +{
1054 + struct qe_ic *qe_ic;
1055 + struct resource res;
1056 + u32 temp = 0, high_active = 0;
1057 + int ret = 0;
1058 +
1059 + if (!node)
1060 + return -ENODEV;
1061 +
1062 + ret = of_address_to_resource(node, 0, &res);
1063 + if (ret) {
1064 + ret = -ENODEV;
1065 + goto err_put_node;
1066 + }
1067 +
1068 + qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
1069 + if (qe_ic == NULL) {
1070 + ret = -ENOMEM;
1071 + goto err_put_node;
1072 + }
1073 +
1074 + qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
1075 + &qe_ic_host_ops, qe_ic);
1076 + if (qe_ic->irqhost == NULL) {
1077 + ret = -ENOMEM;
1078 + goto err_free_qe_ic;
1079 + }
1080 +
1081 + qe_ic->regs = ioremap(res.start, resource_size(&res));
1082 +
1083 + qe_ic->hc_irq = qe_ic_irq_chip;
1084 +
1085 + qe_ic->virq_high = irq_of_parse_and_map(node, 0);
1086 + qe_ic->virq_low = irq_of_parse_and_map(node, 1);
1087 +
1088 + if (qe_ic->virq_low == 0) {
1089 + pr_err("Failed to map QE_IC low IRQ\n");
1090 + ret = -ENOMEM;
1091 + goto err_domain_remove;
1092 + }
1093 +
1094 + /* default priority scheme is grouped. If spread mode is */
1095 + /* required, configure cicr accordingly. */
1096 + if (flags & QE_IC_SPREADMODE_GRP_W)
1097 + temp |= CICR_GWCC;
1098 + if (flags & QE_IC_SPREADMODE_GRP_X)
1099 + temp |= CICR_GXCC;
1100 + if (flags & QE_IC_SPREADMODE_GRP_Y)
1101 + temp |= CICR_GYCC;
1102 + if (flags & QE_IC_SPREADMODE_GRP_Z)
1103 + temp |= CICR_GZCC;
1104 + if (flags & QE_IC_SPREADMODE_GRP_RISCA)
1105 + temp |= CICR_GRTA;
1106 + if (flags & QE_IC_SPREADMODE_GRP_RISCB)
1107 + temp |= CICR_GRTB;
1108 +
1109 + /* choose destination signal for highest priority interrupt */
1110 + if (flags & QE_IC_HIGH_SIGNAL) {
1111 + temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
1112 + high_active = 1;
1113 + }
1114 +
1115 + qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
1116 +
1117 + irq_set_handler_data(qe_ic->virq_low, qe_ic);
1118 + irq_set_chained_handler(qe_ic->virq_low, qe_ic_cascade_low_mpic);
1119 +
1120 + if (qe_ic->virq_high != 0 &&
1121 + qe_ic->virq_high != qe_ic->virq_low) {
1122 + irq_set_handler_data(qe_ic->virq_high, qe_ic);
1123 + irq_set_chained_handler(qe_ic->virq_high,
1124 + qe_ic_cascade_high_mpic);
1125 + }
1126 + of_node_put(node);
1127 + return 0;
1128 +
1129 +err_domain_remove:
1130 + irq_domain_remove(qe_ic->irqhost);
1131 +err_free_qe_ic:
1132 + kfree(qe_ic);
1133 +err_put_node:
1134 + of_node_put(node);
1135 + return ret;
1136 +}
1137 +
1138 +static int __init init_qe_ic(struct device_node *node,
1139 + struct device_node *parent)
1140 +{
1141 + int ret;
1142 +
1143 + ret = qe_ic_init(node, 0);
1144 + if (ret)
1145 + return ret;
1146 +
1147 + return 0;
1148 +}
1149 +
1150 +IRQCHIP_DECLARE(qeic, "fsl,qe-ic", init_qe_ic);
1151 --- a/drivers/soc/fsl/qe/Kconfig
1152 +++ b/drivers/soc/fsl/qe/Kconfig
1153 @@ -4,7 +4,7 @@
1154
1155 config QUICC_ENGINE
1156 bool "Freescale QUICC Engine (QE) Support"
1157 - depends on FSL_SOC && PPC32
1158 + depends on OF && HAS_IOMEM
1159 select GENERIC_ALLOCATOR
1160 select CRC32
1161 help
1162 --- a/drivers/soc/fsl/qe/Makefile
1163 +++ b/drivers/soc/fsl/qe/Makefile
1164 @@ -2,7 +2,7 @@
1165 #
1166 # Makefile for the linux ppc-specific parts of QE
1167 #
1168 -obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o
1169 +obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_io.o
1170 obj-$(CONFIG_CPM) += qe_common.o
1171 obj-$(CONFIG_UCC) += ucc.o
1172 obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
1173 --- a/drivers/soc/fsl/qe/qe.c
1174 +++ b/drivers/soc/fsl/qe/qe.c
1175 @@ -33,8 +33,6 @@
1176 #include <asm/pgtable.h>
1177 #include <soc/fsl/qe/immap_qe.h>
1178 #include <soc/fsl/qe/qe.h>
1179 -#include <asm/prom.h>
1180 -#include <asm/rheap.h>
1181
1182 static void qe_snums_init(void);
1183 static int qe_sdma_init(void);
1184 @@ -107,15 +105,27 @@ void qe_reset(void)
1185 panic("sdma init failed!");
1186 }
1187
1188 +/* issue commands to QE, return 0 on success while -EIO on error
1189 + *
1190 + * @cmd: the command code, should be QE_INIT_TX_RX, QE_STOP_TX and so on
1191 + * @device: which sub-block will run the command, QE_CR_SUBBLOCK_UCCFAST1 - 8
1192 + * , QE_CR_SUBBLOCK_UCCSLOW1 - 8, QE_CR_SUBBLOCK_MCC1 - 3,
1193 + * QE_CR_SUBBLOCK_IDMA1 - 4 and such on.
1194 + * @mcn_protocol: specifies mode for the command for non-MCC, should be
1195 + * QE_CR_PROTOCOL_HDLC_TRANSPARENT, QE_CR_PROTOCOL_QMC, QE_CR_PROTOCOL_UART
1196 + * and such on.
1197 + * @cmd_input: command related data.
1198 + */
1199 int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
1200 {
1201 unsigned long flags;
1202 u8 mcn_shift = 0, dev_shift = 0;
1203 - u32 ret;
1204 + int ret;
1205 + int i;
1206
1207 spin_lock_irqsave(&qe_lock, flags);
1208 if (cmd == QE_RESET) {
1209 - out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
1210 + iowrite32be((cmd | QE_CR_FLG), &qe_immr->cp.cecr);
1211 } else {
1212 if (cmd == QE_ASSIGN_PAGE) {
1213 /* Here device is the SNUM, not sub-block */
1214 @@ -132,20 +142,26 @@ int qe_issue_cmd(u32 cmd, u32 device, u8
1215 mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
1216 }
1217
1218 - out_be32(&qe_immr->cp.cecdr, cmd_input);
1219 - out_be32(&qe_immr->cp.cecr,
1220 - (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
1221 - mcn_protocol << mcn_shift));
1222 + iowrite32be(cmd_input, &qe_immr->cp.cecdr);
1223 + iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) |
1224 + (u32)mcn_protocol << mcn_shift), &qe_immr->cp.cecr);
1225 }
1226
1227 /* wait for the QE_CR_FLG to clear */
1228 - ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
1229 - 100, 0);
1230 + ret = -EIO;
1231 + for (i = 0; i < 100; i++) {
1232 + if ((ioread32be(&qe_immr->cp.cecr) & QE_CR_FLG) == 0) {
1233 + ret = 0;
1234 + break;
1235 + }
1236 + udelay(1);
1237 + }
1238 +
1239 /* On timeout (e.g. failure), the expression will be false (ret == 0),
1240 otherwise it will be true (ret == 1). */
1241 spin_unlock_irqrestore(&qe_lock, flags);
1242
1243 - return ret == 1;
1244 + return ret;
1245 }
1246 EXPORT_SYMBOL(qe_issue_cmd);
1247
1248 @@ -170,6 +186,8 @@ unsigned int qe_get_brg_clk(void)
1249 int size;
1250 const u32 *prop;
1251 unsigned int mod;
1252 + u32 val;
1253 + int ret;
1254
1255 if (brg_clk)
1256 return brg_clk;
1257 @@ -181,9 +199,9 @@ unsigned int qe_get_brg_clk(void)
1258 return brg_clk;
1259 }
1260
1261 - prop = of_get_property(qe, "brg-frequency", &size);
1262 - if (prop && size == sizeof(*prop))
1263 - brg_clk = *prop;
1264 + ret = of_property_read_u32(qe, "brg-frequency", &val);
1265 + if (!ret)
1266 + brg_clk = val;
1267
1268 of_node_put(qe);
1269
1270 @@ -229,14 +247,16 @@ int qe_setbrg(enum qe_clock brg, unsigne
1271 /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
1272 that the BRG divisor must be even if you're not using divide-by-16
1273 mode. */
1274 +#ifdef CONFIG_PPC
1275 if (pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x))
1276 if (!div16 && (divisor & 1) && (divisor > 3))
1277 divisor++;
1278 +#endif
1279
1280 tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
1281 QE_BRGC_ENABLE | div16;
1282
1283 - out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
1284 + iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
1285
1286 return 0;
1287 }
1288 @@ -370,9 +390,9 @@ static int qe_sdma_init(void)
1289 return -ENOMEM;
1290 }
1291
1292 - out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
1293 - out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
1294 - (0x1 << QE_SDMR_CEN_SHIFT)));
1295 + iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK, &sdma->sdebcr);
1296 + iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
1297 + &sdma->sdmr);
1298
1299 return 0;
1300 }
1301 @@ -410,14 +430,14 @@ static void qe_upload_microcode(const vo
1302 "uploading microcode '%s'\n", ucode->id);
1303
1304 /* Use auto-increment */
1305 - out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
1306 - QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
1307 + iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE |
1308 + QE_IRAM_IADD_BADDR, &qe_immr->iram.iadd);
1309
1310 for (i = 0; i < be32_to_cpu(ucode->count); i++)
1311 - out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
1312 + iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
1313
1314 /* Set I-RAM Ready Register */
1315 - out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
1316 + iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
1317 }
1318
1319 /*
1320 @@ -502,7 +522,7 @@ int qe_upload_firmware(const struct qe_f
1321 * If the microcode calls for it, split the I-RAM.
1322 */
1323 if (!firmware->split)
1324 - setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
1325 + qe_setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
1326
1327 if (firmware->soc.model)
1328 printk(KERN_INFO
1329 @@ -536,11 +556,11 @@ int qe_upload_firmware(const struct qe_f
1330 u32 trap = be32_to_cpu(ucode->traps[j]);
1331
1332 if (trap)
1333 - out_be32(&qe_immr->rsp[i].tibcr[j], trap);
1334 + iowrite32be(trap, &qe_immr->rsp[i].tibcr[j]);
1335 }
1336
1337 /* Enable traps */
1338 - out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
1339 + iowrite32be(be32_to_cpu(ucode->eccr), &qe_immr->rsp[i].eccr);
1340 }
1341
1342 qe_firmware_uploaded = 1;
1343 @@ -659,9 +679,9 @@ EXPORT_SYMBOL(qe_get_num_of_risc);
1344 unsigned int qe_get_num_of_snums(void)
1345 {
1346 struct device_node *qe;
1347 - int size;
1348 unsigned int num_of_snums;
1349 - const u32 *prop;
1350 + u32 val;
1351 + int ret;
1352
1353 num_of_snums = 28; /* The default number of snum for threads is 28 */
1354 qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
1355 @@ -675,9 +695,9 @@ unsigned int qe_get_num_of_snums(void)
1356 return num_of_snums;
1357 }
1358
1359 - prop = of_get_property(qe, "fsl,qe-num-snums", &size);
1360 - if (prop && size == sizeof(*prop)) {
1361 - num_of_snums = *prop;
1362 + ret = of_property_read_u32(qe, "fsl,qe-num-snums", &val);
1363 + if (!ret) {
1364 + num_of_snums = val;
1365 if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
1366 /* No QE ever has fewer than 28 SNUMs */
1367 pr_err("QE: number of snum is invalid\n");
1368 --- a/drivers/soc/fsl/qe/qe_ic.h
1369 +++ /dev/null
1370 @@ -1,103 +0,0 @@
1371 -/*
1372 - * drivers/soc/fsl/qe/qe_ic.h
1373 - *
1374 - * QUICC ENGINE Interrupt Controller Header
1375 - *
1376 - * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
1377 - *
1378 - * Author: Li Yang <leoli@freescale.com>
1379 - * Based on code from Shlomi Gridish <gridish@freescale.com>
1380 - *
1381 - * This program is free software; you can redistribute it and/or modify it
1382 - * under the terms of the GNU General Public License as published by the
1383 - * Free Software Foundation; either version 2 of the License, or (at your
1384 - * option) any later version.
1385 - */
1386 -#ifndef _POWERPC_SYSDEV_QE_IC_H
1387 -#define _POWERPC_SYSDEV_QE_IC_H
1388 -
1389 -#include <soc/fsl/qe/qe_ic.h>
1390 -
1391 -#define NR_QE_IC_INTS 64
1392 -
1393 -/* QE IC registers offset */
1394 -#define QEIC_CICR 0x00
1395 -#define QEIC_CIVEC 0x04
1396 -#define QEIC_CRIPNR 0x08
1397 -#define QEIC_CIPNR 0x0c
1398 -#define QEIC_CIPXCC 0x10
1399 -#define QEIC_CIPYCC 0x14
1400 -#define QEIC_CIPWCC 0x18
1401 -#define QEIC_CIPZCC 0x1c
1402 -#define QEIC_CIMR 0x20
1403 -#define QEIC_CRIMR 0x24
1404 -#define QEIC_CICNR 0x28
1405 -#define QEIC_CIPRTA 0x30
1406 -#define QEIC_CIPRTB 0x34
1407 -#define QEIC_CRICR 0x3c
1408 -#define QEIC_CHIVEC 0x60
1409 -
1410 -/* Interrupt priority registers */
1411 -#define CIPCC_SHIFT_PRI0 29
1412 -#define CIPCC_SHIFT_PRI1 26
1413 -#define CIPCC_SHIFT_PRI2 23
1414 -#define CIPCC_SHIFT_PRI3 20
1415 -#define CIPCC_SHIFT_PRI4 13
1416 -#define CIPCC_SHIFT_PRI5 10
1417 -#define CIPCC_SHIFT_PRI6 7
1418 -#define CIPCC_SHIFT_PRI7 4
1419 -
1420 -/* CICR priority modes */
1421 -#define CICR_GWCC 0x00040000
1422 -#define CICR_GXCC 0x00020000
1423 -#define CICR_GYCC 0x00010000
1424 -#define CICR_GZCC 0x00080000
1425 -#define CICR_GRTA 0x00200000
1426 -#define CICR_GRTB 0x00400000
1427 -#define CICR_HPIT_SHIFT 8
1428 -#define CICR_HPIT_MASK 0x00000300
1429 -#define CICR_HP_SHIFT 24
1430 -#define CICR_HP_MASK 0x3f000000
1431 -
1432 -/* CICNR */
1433 -#define CICNR_WCC1T_SHIFT 20
1434 -#define CICNR_ZCC1T_SHIFT 28
1435 -#define CICNR_YCC1T_SHIFT 12
1436 -#define CICNR_XCC1T_SHIFT 4
1437 -
1438 -/* CRICR */
1439 -#define CRICR_RTA1T_SHIFT 20
1440 -#define CRICR_RTB1T_SHIFT 28
1441 -
1442 -/* Signal indicator */
1443 -#define SIGNAL_MASK 3
1444 -#define SIGNAL_HIGH 2
1445 -#define SIGNAL_LOW 0
1446 -
1447 -struct qe_ic {
1448 - /* Control registers offset */
1449 - volatile u32 __iomem *regs;
1450 -
1451 - /* The remapper for this QEIC */
1452 - struct irq_domain *irqhost;
1453 -
1454 - /* The "linux" controller struct */
1455 - struct irq_chip hc_irq;
1456 -
1457 - /* VIRQ numbers of QE high/low irqs */
1458 - unsigned int virq_high;
1459 - unsigned int virq_low;
1460 -};
1461 -
1462 -/*
1463 - * QE interrupt controller internal structure
1464 - */
1465 -struct qe_ic_info {
1466 - u32 mask; /* location of this source at the QIMR register. */
1467 - u32 mask_reg; /* Mask register offset */
1468 - u8 pri_code; /* for grouped interrupts sources - the interrupt
1469 - code as appears at the group priority register */
1470 - u32 pri_reg; /* Group priority register offset */
1471 -};
1472 -
1473 -#endif /* _POWERPC_SYSDEV_QE_IC_H */
1474 --- a/drivers/soc/fsl/qe/qe_io.c
1475 +++ b/drivers/soc/fsl/qe/qe_io.c
1476 @@ -22,8 +22,6 @@
1477
1478 #include <asm/io.h>
1479 #include <soc/fsl/qe/qe.h>
1480 -#include <asm/prom.h>
1481 -#include <sysdev/fsl_soc.h>
1482
1483 #undef DEBUG
1484
1485 @@ -61,16 +59,16 @@ void __par_io_config_pin(struct qe_pio_r
1486 pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
1487
1488 /* Set open drain, if required */
1489 - tmp_val = in_be32(&par_io->cpodr);
1490 + tmp_val = ioread32be(&par_io->cpodr);
1491 if (open_drain)
1492 - out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
1493 + iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
1494 else
1495 - out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
1496 + iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
1497
1498 /* define direction */
1499 tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
1500 - in_be32(&par_io->cpdir2) :
1501 - in_be32(&par_io->cpdir1);
1502 + ioread32be(&par_io->cpdir2) :
1503 + ioread32be(&par_io->cpdir1);
1504
1505 /* get all bits mask for 2 bit per port */
1506 pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
1507 @@ -82,34 +80,30 @@ void __par_io_config_pin(struct qe_pio_r
1508
1509 /* clear and set 2 bits mask */
1510 if (pin > (QE_PIO_PINS / 2) - 1) {
1511 - out_be32(&par_io->cpdir2,
1512 - ~pin_mask2bits & tmp_val);
1513 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
1514 tmp_val &= ~pin_mask2bits;
1515 - out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
1516 + iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
1517 } else {
1518 - out_be32(&par_io->cpdir1,
1519 - ~pin_mask2bits & tmp_val);
1520 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
1521 tmp_val &= ~pin_mask2bits;
1522 - out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
1523 + iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
1524 }
1525 /* define pin assignment */
1526 tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
1527 - in_be32(&par_io->cppar2) :
1528 - in_be32(&par_io->cppar1);
1529 + ioread32be(&par_io->cppar2) :
1530 + ioread32be(&par_io->cppar1);
1531
1532 new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
1533 (pin % (QE_PIO_PINS / 2) + 1) * 2));
1534 /* clear and set 2 bits mask */
1535 if (pin > (QE_PIO_PINS / 2) - 1) {
1536 - out_be32(&par_io->cppar2,
1537 - ~pin_mask2bits & tmp_val);
1538 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
1539 tmp_val &= ~pin_mask2bits;
1540 - out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
1541 + iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
1542 } else {
1543 - out_be32(&par_io->cppar1,
1544 - ~pin_mask2bits & tmp_val);
1545 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
1546 tmp_val &= ~pin_mask2bits;
1547 - out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
1548 + iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
1549 }
1550 }
1551 EXPORT_SYMBOL(__par_io_config_pin);
1552 @@ -137,12 +131,12 @@ int par_io_data_set(u8 port, u8 pin, u8
1553 /* calculate pin location */
1554 pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
1555
1556 - tmp_val = in_be32(&par_io[port].cpdata);
1557 + tmp_val = ioread32be(&par_io[port].cpdata);
1558
1559 if (val == 0) /* clear */
1560 - out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
1561 + iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
1562 else /* set */
1563 - out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
1564 + iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
1565
1566 return 0;
1567 }
1568 --- a/drivers/soc/fsl/qe/qe_tdm.c
1569 +++ b/drivers/soc/fsl/qe/qe_tdm.c
1570 @@ -228,10 +228,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm,
1571 &siram[siram_entry_id * 32 + 0x200 + i]);
1572 }
1573
1574 - setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
1575 - SIR_LAST);
1576 - setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
1577 - SIR_LAST);
1578 + qe_setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
1579 + SIR_LAST);
1580 + qe_setbits16(&siram[(siram_entry_id * 32) + 0x200 +
1581 + (utdm->num_of_ts - 1)], SIR_LAST);
1582
1583 /* Set SIxMR register */
1584 sixmr = SIMR_SAD(siram_entry_id);
1585 --- a/drivers/soc/fsl/qe/ucc.c
1586 +++ b/drivers/soc/fsl/qe/ucc.c
1587 @@ -39,7 +39,7 @@ int ucc_set_qe_mux_mii_mng(unsigned int
1588 return -EINVAL;
1589
1590 spin_lock_irqsave(&cmxgcr_lock, flags);
1591 - clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
1592 + qe_clrsetbits32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
1593 ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
1594 spin_unlock_irqrestore(&cmxgcr_lock, flags);
1595
1596 @@ -84,7 +84,7 @@ int ucc_set_type(unsigned int ucc_num, e
1597 return -EINVAL;
1598 }
1599
1600 - clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
1601 + qe_clrsetbits8(guemr, UCC_GUEMR_MODE_MASK,
1602 UCC_GUEMR_SET_RESERVED3 | speed);
1603
1604 return 0;
1605 @@ -113,9 +113,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned
1606 get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
1607
1608 if (set)
1609 - setbits32(cmxucr, mask << shift);
1610 + qe_setbits32(cmxucr, mask << shift);
1611 else
1612 - clrbits32(cmxucr, mask << shift);
1613 + qe_clrbits32(cmxucr, mask << shift);
1614
1615 return 0;
1616 }
1617 @@ -211,7 +211,7 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc
1618 if (mode == COMM_DIR_RX)
1619 shift += 4;
1620
1621 - clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
1622 + qe_clrsetbits32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
1623 clock_bits << shift);
1624
1625 return 0;
1626 --- a/drivers/soc/fsl/qe/ucc_fast.c
1627 +++ b/drivers/soc/fsl/qe/ucc_fast.c
1628 @@ -33,41 +33,41 @@ void ucc_fast_dump_regs(struct ucc_fast_
1629 printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
1630
1631 printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
1632 - &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
1633 + &uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr));
1634 printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
1635 - &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
1636 + &uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr));
1637 printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
1638 - &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
1639 + &uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr));
1640 printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
1641 - &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
1642 + &uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr));
1643 printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
1644 - &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
1645 + &uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce));
1646 printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
1647 - &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
1648 + &uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm));
1649 printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
1650 - &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
1651 + &uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs));
1652 printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
1653 - &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
1654 + &uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb));
1655 printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
1656 - &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
1657 + &uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs));
1658 printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
1659 - &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
1660 + &uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet));
1661 printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
1662 - &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
1663 + &uccf->uf_regs->urfset, ioread16be(&uccf->uf_regs->urfset));
1664 printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
1665 - &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
1666 + &uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb));
1667 printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
1668 - &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
1669 + &uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs));
1670 printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
1671 - &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
1672 + &uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet));
1673 printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
1674 - &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
1675 + &uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt));
1676 printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
1677 - &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
1678 + &uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt));
1679 printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
1680 - &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
1681 + &uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry));
1682 printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
1683 - &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
1684 + &uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr));
1685 }
1686 EXPORT_SYMBOL(ucc_fast_dump_regs);
1687
1688 @@ -89,7 +89,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subbloc
1689
1690 void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
1691 {
1692 - out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
1693 + iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
1694 }
1695 EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
1696
1697 @@ -101,7 +101,7 @@ void ucc_fast_enable(struct ucc_fast_pri
1698 uf_regs = uccf->uf_regs;
1699
1700 /* Enable reception and/or transmission on this UCC. */
1701 - gumr = in_be32(&uf_regs->gumr);
1702 + gumr = ioread32be(&uf_regs->gumr);
1703 if (mode & COMM_DIR_TX) {
1704 gumr |= UCC_FAST_GUMR_ENT;
1705 uccf->enabled_tx = 1;
1706 @@ -110,7 +110,7 @@ void ucc_fast_enable(struct ucc_fast_pri
1707 gumr |= UCC_FAST_GUMR_ENR;
1708 uccf->enabled_rx = 1;
1709 }
1710 - out_be32(&uf_regs->gumr, gumr);
1711 + iowrite32be(gumr, &uf_regs->gumr);
1712 }
1713 EXPORT_SYMBOL(ucc_fast_enable);
1714
1715 @@ -122,7 +122,7 @@ void ucc_fast_disable(struct ucc_fast_pr
1716 uf_regs = uccf->uf_regs;
1717
1718 /* Disable reception and/or transmission on this UCC. */
1719 - gumr = in_be32(&uf_regs->gumr);
1720 + gumr = ioread32be(&uf_regs->gumr);
1721 if (mode & COMM_DIR_TX) {
1722 gumr &= ~UCC_FAST_GUMR_ENT;
1723 uccf->enabled_tx = 0;
1724 @@ -131,7 +131,7 @@ void ucc_fast_disable(struct ucc_fast_pr
1725 gumr &= ~UCC_FAST_GUMR_ENR;
1726 uccf->enabled_rx = 0;
1727 }
1728 - out_be32(&uf_regs->gumr, gumr);
1729 + iowrite32be(gumr, &uf_regs->gumr);
1730 }
1731 EXPORT_SYMBOL(ucc_fast_disable);
1732
1733 @@ -263,12 +263,13 @@ int ucc_fast_init(struct ucc_fast_info *
1734 gumr |= uf_info->tenc;
1735 gumr |= uf_info->tcrc;
1736 gumr |= uf_info->mode;
1737 - out_be32(&uf_regs->gumr, gumr);
1738 + iowrite32be(gumr, &uf_regs->gumr);
1739
1740 /* Allocate memory for Tx Virtual Fifo */
1741 uccf->ucc_fast_tx_virtual_fifo_base_offset =
1742 qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
1743 - if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
1744 + if (IS_ERR_VALUE((unsigned long)uccf->
1745 + ucc_fast_tx_virtual_fifo_base_offset)) {
1746 printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
1747 __func__);
1748 uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
1749 @@ -281,7 +282,8 @@ int ucc_fast_init(struct ucc_fast_info *
1750 qe_muram_alloc(uf_info->urfs +
1751 UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
1752 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
1753 - if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
1754 + if (IS_ERR_VALUE((unsigned long)uccf->
1755 + ucc_fast_rx_virtual_fifo_base_offset)) {
1756 printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
1757 __func__);
1758 uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
1759 @@ -290,15 +292,15 @@ int ucc_fast_init(struct ucc_fast_info *
1760 }
1761
1762 /* Set Virtual Fifo registers */
1763 - out_be16(&uf_regs->urfs, uf_info->urfs);
1764 - out_be16(&uf_regs->urfet, uf_info->urfet);
1765 - out_be16(&uf_regs->urfset, uf_info->urfset);
1766 - out_be16(&uf_regs->utfs, uf_info->utfs);
1767 - out_be16(&uf_regs->utfet, uf_info->utfet);
1768 - out_be16(&uf_regs->utftt, uf_info->utftt);
1769 + iowrite16be(uf_info->urfs, &uf_regs->urfs);
1770 + iowrite16be(uf_info->urfet, &uf_regs->urfet);
1771 + iowrite16be(uf_info->urfset, &uf_regs->urfset);
1772 + iowrite16be(uf_info->utfs, &uf_regs->utfs);
1773 + iowrite16be(uf_info->utfet, &uf_regs->utfet);
1774 + iowrite16be(uf_info->utftt, &uf_regs->utftt);
1775 /* utfb, urfb are offsets from MURAM base */
1776 - out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
1777 - out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
1778 + iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
1779 + iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
1780
1781 /* Mux clocking */
1782 /* Grant Support */
1783 @@ -366,14 +368,14 @@ int ucc_fast_init(struct ucc_fast_info *
1784 }
1785
1786 /* Set interrupt mask register at UCC level. */
1787 - out_be32(&uf_regs->uccm, uf_info->uccm_mask);
1788 + iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
1789
1790 /* First, clear anything pending at UCC level,
1791 * otherwise, old garbage may come through
1792 * as soon as the dam is opened. */
1793
1794 /* Writing '1' clears */
1795 - out_be32(&uf_regs->ucce, 0xffffffff);
1796 + iowrite32be(0xffffffff, &uf_regs->ucce);
1797
1798 *uccf_ret = uccf;
1799 return 0;
1800 --- a/drivers/tty/serial/ucc_uart.c
1801 +++ b/drivers/tty/serial/ucc_uart.c
1802 @@ -34,6 +34,7 @@
1803 #include <soc/fsl/qe/ucc_slow.h>
1804
1805 #include <linux/firmware.h>
1806 +#include <asm/cpm.h>
1807 #include <asm/reg.h>
1808
1809 /*
1810 --- a/include/soc/fsl/qe/qe.h
1811 +++ b/include/soc/fsl/qe/qe.h
1812 @@ -21,7 +21,6 @@
1813 #include <linux/spinlock.h>
1814 #include <linux/errno.h>
1815 #include <linux/err.h>
1816 -#include <asm/cpm.h>
1817 #include <soc/fsl/qe/immap_qe.h>
1818 #include <linux/of.h>
1819 #include <linux/of_address.h>
1820 --- a/include/soc/fsl/qe/qe_ic.h
1821 +++ /dev/null
1822 @@ -1,139 +0,0 @@
1823 -/*
1824 - * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
1825 - *
1826 - * Authors: Shlomi Gridish <gridish@freescale.com>
1827 - * Li Yang <leoli@freescale.com>
1828 - *
1829 - * Description:
1830 - * QE IC external definitions and structure.
1831 - *
1832 - * This program is free software; you can redistribute it and/or modify it
1833 - * under the terms of the GNU General Public License as published by the
1834 - * Free Software Foundation; either version 2 of the License, or (at your
1835 - * option) any later version.
1836 - */
1837 -#ifndef _ASM_POWERPC_QE_IC_H
1838 -#define _ASM_POWERPC_QE_IC_H
1839 -
1840 -#include <linux/irq.h>
1841 -
1842 -struct device_node;
1843 -struct qe_ic;
1844 -
1845 -#define NUM_OF_QE_IC_GROUPS 6
1846 -
1847 -/* Flags when we init the QE IC */
1848 -#define QE_IC_SPREADMODE_GRP_W 0x00000001
1849 -#define QE_IC_SPREADMODE_GRP_X 0x00000002
1850 -#define QE_IC_SPREADMODE_GRP_Y 0x00000004
1851 -#define QE_IC_SPREADMODE_GRP_Z 0x00000008
1852 -#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010
1853 -#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020
1854 -
1855 -#define QE_IC_LOW_SIGNAL 0x00000100
1856 -#define QE_IC_HIGH_SIGNAL 0x00000200
1857 -
1858 -#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000
1859 -#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000
1860 -#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000
1861 -#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000
1862 -#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000
1863 -#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000
1864 -#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000
1865 -#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000
1866 -#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000
1867 -#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000
1868 -#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000
1869 -#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000
1870 -#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12)
1871 -
1872 -/* QE interrupt sources groups */
1873 -enum qe_ic_grp_id {
1874 - QE_IC_GRP_W = 0, /* QE interrupt controller group W */
1875 - QE_IC_GRP_X, /* QE interrupt controller group X */
1876 - QE_IC_GRP_Y, /* QE interrupt controller group Y */
1877 - QE_IC_GRP_Z, /* QE interrupt controller group Z */
1878 - QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */
1879 - QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
1880 -};
1881 -
1882 -#ifdef CONFIG_QUICC_ENGINE
1883 -void qe_ic_init(struct device_node *node, unsigned int flags,
1884 - void (*low_handler)(struct irq_desc *desc),
1885 - void (*high_handler)(struct irq_desc *desc));
1886 -unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
1887 -unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
1888 -#else
1889 -static inline void qe_ic_init(struct device_node *node, unsigned int flags,
1890 - void (*low_handler)(struct irq_desc *desc),
1891 - void (*high_handler)(struct irq_desc *desc))
1892 -{}
1893 -static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
1894 -{ return 0; }
1895 -static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
1896 -{ return 0; }
1897 -#endif /* CONFIG_QUICC_ENGINE */
1898 -
1899 -void qe_ic_set_highest_priority(unsigned int virq, int high);
1900 -int qe_ic_set_priority(unsigned int virq, unsigned int priority);
1901 -int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
1902 -
1903 -static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
1904 -{
1905 - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1906 - unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
1907 -
1908 - if (cascade_irq != NO_IRQ)
1909 - generic_handle_irq(cascade_irq);
1910 -}
1911 -
1912 -static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
1913 -{
1914 - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1915 - unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
1916 -
1917 - if (cascade_irq != NO_IRQ)
1918 - generic_handle_irq(cascade_irq);
1919 -}
1920 -
1921 -static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
1922 -{
1923 - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1924 - unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
1925 - struct irq_chip *chip = irq_desc_get_chip(desc);
1926 -
1927 - if (cascade_irq != NO_IRQ)
1928 - generic_handle_irq(cascade_irq);
1929 -
1930 - chip->irq_eoi(&desc->irq_data);
1931 -}
1932 -
1933 -static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
1934 -{
1935 - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1936 - unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
1937 - struct irq_chip *chip = irq_desc_get_chip(desc);
1938 -
1939 - if (cascade_irq != NO_IRQ)
1940 - generic_handle_irq(cascade_irq);
1941 -
1942 - chip->irq_eoi(&desc->irq_data);
1943 -}
1944 -
1945 -static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
1946 -{
1947 - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
1948 - unsigned int cascade_irq;
1949 - struct irq_chip *chip = irq_desc_get_chip(desc);
1950 -
1951 - cascade_irq = qe_ic_get_high_irq(qe_ic);
1952 - if (cascade_irq == NO_IRQ)
1953 - cascade_irq = qe_ic_get_low_irq(qe_ic);
1954 -
1955 - if (cascade_irq != NO_IRQ)
1956 - generic_handle_irq(cascade_irq);
1957 -
1958 - chip->irq_eoi(&desc->irq_data);
1959 -}
1960 -
1961 -#endif /* _ASM_POWERPC_QE_IC_H */