1 /*
2 * linux/arch/arm/mach-pxa/pxa3xx.c
3 *
4 * code specific to pxa3xx aka Monahans
5 *
6 * Copyright (C) 2006 Marvell International Ltd.
7 *
8 * 2007-09-02: eric miao <eric.miao@marvell.com>
9 * initial version
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15 #include <linux/dmaengine.h>
16 #include <linux/dma/pxa-dma.h>
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/gpio-pxa.h>
21 #include <linux/pm.h>
22 #include <linux/platform_device.h>
23 #include <linux/irq.h>
24 #include <linux/irqchip.h>
25 #include <linux/io.h>
26 #include <linux/of.h>
27 #include <linux/syscore_ops.h>
28 #include <linux/platform_data/i2c-pxa.h>
29 #include <linux/platform_data/mmp_dma.h>
30
31 #include <asm/mach/map.h>
32 #include <asm/suspend.h>
33 #include <mach/hardware.h>
34 #include <mach/pxa3xx-regs.h>
35 #include <mach/reset.h>
36 #include <linux/platform_data/usb-ohci-pxa27x.h>
37 #include "pm.h"
38 #include <mach/dma.h>
39 #include <mach/smemc.h>
40 #include <mach/irqs.h>
41
42 #include "generic.h"
43 #include "devices.h"
44
45 #define PECR_IE(n) ((1 << ((n) * 2)) << 28)
46 #define PECR_IS(n) ((1 << ((n) * 2)) << 29)
47
48 extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
49
50 /*
51 * NAND NFC: DFI bus arbitration subset
52 */
53 #define NDCR (*(volatile u32 __iomem*)(NAND_VIRT + 0))
54 #define NDCR_ND_ARB_EN (1 << 12)
55 #define NDCR_ND_ARB_CNTL (1 << 19)
56
57 #ifdef CONFIG_PM
58
59 #define ISRAM_START 0x5c000000
60 #define ISRAM_SIZE SZ_256K
61
62 static void __iomem *sram;
63 static unsigned long wakeup_src;
64
65 /*
66 * Enter a standby mode (S0D1C2 or S0D2C2). Upon wakeup, the dynamic
67 * memory controller has to be reinitialised, so we place some code
68 * in the SRAM to perform this function.
69 *
70 * We disable FIQs across the standby - otherwise, we might receive a
71 * FIQ while the SDRAM is unavailable.
72 */
pxa3xx_cpu_standby(unsigned int pwrmode)73 static void pxa3xx_cpu_standby(unsigned int pwrmode)
74 {
75 void (*fn)(unsigned int) = (void __force *)(sram + 0x8000);
76
77 memcpy_toio(sram + 0x8000, pm_enter_standby_start,
78 pm_enter_standby_end - pm_enter_standby_start);
79
80 AD2D0SR = ~0;
81 AD2D1SR = ~0;
82 AD2D0ER = wakeup_src;
83 AD2D1ER = 0;
84 ASCR = ASCR;
85 ARSR = ARSR;
86
87 local_fiq_disable();
88 fn(pwrmode);
89 local_fiq_enable();
90
91 AD2D0ER = 0;
92 AD2D1ER = 0;
93 }
94
95 /*
96 * NOTE: currently, the OBM (OEM Boot Module) binary comes along with
97 * PXA3xx development kits assumes that the resuming process continues
98 * with the address stored within the first 4 bytes of SDRAM. The PSPR
99 * register is used privately by BootROM and OBM, and _must_ be set to
100 * 0x5c014000 for the moment.
101 */
pxa3xx_cpu_pm_suspend(void)102 static void pxa3xx_cpu_pm_suspend(void)
103 {
104 volatile unsigned long *p = (volatile void *)0xc0000000;
105 unsigned long saved_data = *p;
106 #ifndef CONFIG_IWMMXT
107 u64 acc0;
108
109 asm volatile(".arch_extension xscale\n\t"
110 "mra %Q0, %R0, acc0" : "=r" (acc0));
111 #endif
112
113 /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
114 CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
115 CKENB |= 1 << (CKEN_HSIO2 & 0x1f);
116
117 /* clear and setup wakeup source */
118 AD3SR = ~0;
119 AD3ER = wakeup_src;
120 ASCR = ASCR;
121 ARSR = ARSR;
122
123 PCFR |= (1u << 13); /* L1_DIS */
124 PCFR &= ~((1u << 12) | (1u << 1)); /* L0_EN | SL_ROD */
125
126 PSPR = 0x5c014000;
127
128 /* overwrite with the resume address */
129 *p = __pa_symbol(cpu_resume);
130
131 cpu_suspend(0, pxa3xx_finish_suspend);
132
133 *p = saved_data;
134
135 AD3ER = 0;
136
137 #ifndef CONFIG_IWMMXT
138 asm volatile(".arch_extension xscale\n\t"
139 "mar acc0, %Q0, %R0" : "=r" (acc0));
140 #endif
141 }
142
pxa3xx_cpu_pm_enter(suspend_state_t state)143 static void pxa3xx_cpu_pm_enter(suspend_state_t state)
144 {
145 /*
146 * Don't sleep if no wakeup sources are defined
147 */
148 if (wakeup_src == 0) {
149 printk(KERN_ERR "Not suspending: no wakeup sources\n");
150 return;
151 }
152
153 switch (state) {
154 case PM_SUSPEND_STANDBY:
155 pxa3xx_cpu_standby(PXA3xx_PM_S0D2C2);
156 break;
157
158 case PM_SUSPEND_MEM:
159 pxa3xx_cpu_pm_suspend();
160 break;
161 }
162 }
163
pxa3xx_cpu_pm_valid(suspend_state_t state)164 static int pxa3xx_cpu_pm_valid(suspend_state_t state)
165 {
166 return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
167 }
168
169 static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns = {
170 .valid = pxa3xx_cpu_pm_valid,
171 .enter = pxa3xx_cpu_pm_enter,
172 };
173
pxa3xx_init_pm(void)174 static void __init pxa3xx_init_pm(void)
175 {
176 sram = ioremap(ISRAM_START, ISRAM_SIZE);
177 if (!sram) {
178 printk(KERN_ERR "Unable to map ISRAM: disabling standby/suspend\n");
179 return;
180 }
181
182 /*
183 * Since we copy wakeup code into the SRAM, we need to ensure
184 * that it is preserved over the low power modes. Note: bit 8
185 * is undocumented in the developer manual, but must be set.
186 */
187 AD1R |= ADXR_L2 | ADXR_R0;
188 AD2R |= ADXR_L2 | ADXR_R0;
189 AD3R |= ADXR_L2 | ADXR_R0;
190
191 /*
192 * Clear the resume enable registers.
193 */
194 AD1D0ER = 0;
195 AD2D0ER = 0;
196 AD2D1ER = 0;
197 AD3ER = 0;
198
199 pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns;
200 }
201
pxa3xx_set_wake(struct irq_data * d,unsigned int on)202 static int pxa3xx_set_wake(struct irq_data *d, unsigned int on)
203 {
204 unsigned long flags, mask = 0;
205
206 switch (d->irq) {
207 case IRQ_SSP3:
208 mask = ADXER_MFP_WSSP3;
209 break;
210 case IRQ_MSL:
211 mask = ADXER_WMSL0;
212 break;
213 case IRQ_USBH2:
214 case IRQ_USBH1:
215 mask = ADXER_WUSBH;
216 break;
217 case IRQ_KEYPAD:
218 mask = ADXER_WKP;
219 break;
220 case IRQ_AC97:
221 mask = ADXER_MFP_WAC97;
222 break;
223 case IRQ_USIM:
224 mask = ADXER_WUSIM0;
225 break;
226 case IRQ_SSP2:
227 mask = ADXER_MFP_WSSP2;
228 break;
229 case IRQ_I2C:
230 mask = ADXER_MFP_WI2C;
231 break;
232 case IRQ_STUART:
233 mask = ADXER_MFP_WUART3;
234 break;
235 case IRQ_BTUART:
236 mask = ADXER_MFP_WUART2;
237 break;
238 case IRQ_FFUART:
239 mask = ADXER_MFP_WUART1;
240 break;
241 case IRQ_MMC:
242 mask = ADXER_MFP_WMMC1;
243 break;
244 case IRQ_SSP:
245 mask = ADXER_MFP_WSSP1;
246 break;
247 case IRQ_RTCAlrm:
248 mask = ADXER_WRTC;
249 break;
250 case IRQ_SSP4:
251 mask = ADXER_MFP_WSSP4;
252 break;
253 case IRQ_TSI:
254 mask = ADXER_WTSI;
255 break;
256 case IRQ_USIM2:
257 mask = ADXER_WUSIM1;
258 break;
259 case IRQ_MMC2:
260 mask = ADXER_MFP_WMMC2;
261 break;
262 case IRQ_NAND:
263 mask = ADXER_MFP_WFLASH;
264 break;
265 case IRQ_USB2:
266 mask = ADXER_WUSB2;
267 break;
268 case IRQ_WAKEUP0:
269 mask = ADXER_WEXTWAKE0;
270 break;
271 case IRQ_WAKEUP1:
272 mask = ADXER_WEXTWAKE1;
273 break;
274 case IRQ_MMC3:
275 mask = ADXER_MFP_GEN12;
276 break;
277 default:
278 return -EINVAL;
279 }
280
281 local_irq_save(flags);
282 if (on)
283 wakeup_src |= mask;
284 else
285 wakeup_src &= ~mask;
286 local_irq_restore(flags);
287
288 return 0;
289 }
290 #else
pxa3xx_init_pm(void)291 static inline void pxa3xx_init_pm(void) {}
292 #define pxa3xx_set_wake NULL
293 #endif
294
pxa_ack_ext_wakeup(struct irq_data * d)295 static void pxa_ack_ext_wakeup(struct irq_data *d)
296 {
297 PECR |= PECR_IS(d->irq - IRQ_WAKEUP0);
298 }
299
pxa_mask_ext_wakeup(struct irq_data * d)300 static void pxa_mask_ext_wakeup(struct irq_data *d)
301 {
302 pxa_mask_irq(d);
303 PECR &= ~PECR_IE(d->irq - IRQ_WAKEUP0);
304 }
305
pxa_unmask_ext_wakeup(struct irq_data * d)306 static void pxa_unmask_ext_wakeup(struct irq_data *d)
307 {
308 pxa_unmask_irq(d);
309 PECR |= PECR_IE(d->irq - IRQ_WAKEUP0);
310 }
311
pxa_set_ext_wakeup_type(struct irq_data * d,unsigned int flow_type)312 static int pxa_set_ext_wakeup_type(struct irq_data *d, unsigned int flow_type)
313 {
314 if (flow_type & IRQ_TYPE_EDGE_RISING)
315 PWER |= 1 << (d->irq - IRQ_WAKEUP0);
316
317 if (flow_type & IRQ_TYPE_EDGE_FALLING)
318 PWER |= 1 << (d->irq - IRQ_WAKEUP0 + 2);
319
320 return 0;
321 }
322
323 static struct irq_chip pxa_ext_wakeup_chip = {
324 .name = "WAKEUP",
325 .irq_ack = pxa_ack_ext_wakeup,
326 .irq_mask = pxa_mask_ext_wakeup,
327 .irq_unmask = pxa_unmask_ext_wakeup,
328 .irq_set_type = pxa_set_ext_wakeup_type,
329 };
330
pxa_init_ext_wakeup_irq(int (* fn)(struct irq_data *,unsigned int))331 static void __init pxa_init_ext_wakeup_irq(int (*fn)(struct irq_data *,
332 unsigned int))
333 {
334 int irq;
335
336 for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) {
337 irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip,
338 handle_edge_irq);
339 irq_clear_status_flags(irq, IRQ_NOREQUEST);
340 }
341
342 pxa_ext_wakeup_chip.irq_set_wake = fn;
343 }
344
__pxa3xx_init_irq(void)345 static void __init __pxa3xx_init_irq(void)
346 {
347 /* enable CP6 access */
348 u32 value;
349 __asm__ __volatile__("mrc p15, 0, %0, c15, c1, 0\n": "=r"(value));
350 value |= (1 << 6);
351 __asm__ __volatile__("mcr p15, 0, %0, c15, c1, 0\n": :"r"(value));
352
353 pxa_init_ext_wakeup_irq(pxa3xx_set_wake);
354 }
355
pxa3xx_init_irq(void)356 void __init pxa3xx_init_irq(void)
357 {
358 __pxa3xx_init_irq();
359 pxa_init_irq(56, pxa3xx_set_wake);
360 }
361
362 #ifdef CONFIG_OF
363 static int __init __init
pxa3xx_dt_init_irq(struct device_node * node,struct device_node * parent)364 pxa3xx_dt_init_irq(struct device_node *node, struct device_node *parent)
365 {
366 __pxa3xx_init_irq();
367 pxa_dt_irq_init(pxa3xx_set_wake);
368 set_handle_irq(ichp_handle_irq);
369
370 return 0;
371 }
372 IRQCHIP_DECLARE(pxa3xx_intc, "marvell,pxa-intc", pxa3xx_dt_init_irq);
373 #endif /* CONFIG_OF */
374
375 static struct map_desc pxa3xx_io_desc[] __initdata = {
376 { /* Mem Ctl */
377 .virtual = (unsigned long)SMEMC_VIRT,
378 .pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE),
379 .length = SMEMC_SIZE,
380 .type = MT_DEVICE
381 }, {
382 .virtual = (unsigned long)NAND_VIRT,
383 .pfn = __phys_to_pfn(NAND_PHYS),
384 .length = NAND_SIZE,
385 .type = MT_DEVICE
386 },
387 };
388
pxa3xx_map_io(void)389 void __init pxa3xx_map_io(void)
390 {
391 pxa_map_io();
392 iotable_init(ARRAY_AND_SIZE(pxa3xx_io_desc));
393 pxa3xx_get_clk_frequency_khz(1);
394 }
395
396 /*
397 * device registration specific to PXA3xx.
398 */
399
pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data * info)400 void __init pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info)
401 {
402 pxa_register_device(&pxa3xx_device_i2c_power, info);
403 }
404
405 static struct pxa_gpio_platform_data pxa3xx_gpio_pdata = {
406 .irq_base = PXA_GPIO_TO_IRQ(0),
407 };
408
409 static struct platform_device *devices[] __initdata = {
410 &pxa27x_device_udc,
411 &pxa_device_pmu,
412 &pxa_device_i2s,
413 &pxa_device_asoc_ssp1,
414 &pxa_device_asoc_ssp2,
415 &pxa_device_asoc_ssp3,
416 &pxa_device_asoc_ssp4,
417 &pxa_device_asoc_platform,
418 &pxa_device_rtc,
419 &pxa3xx_device_ssp1,
420 &pxa3xx_device_ssp2,
421 &pxa3xx_device_ssp3,
422 &pxa3xx_device_ssp4,
423 &pxa27x_device_pwm0,
424 &pxa27x_device_pwm1,
425 };
426
427 static const struct dma_slave_map pxa3xx_slave_map[] = {
428 /* PXA25x, PXA27x and PXA3xx common entries */
429 { "pxa2xx-ac97", "pcm_pcm_mic_mono", PDMA_FILTER_PARAM(LOWEST, 8) },
430 { "pxa2xx-ac97", "pcm_pcm_aux_mono_in", PDMA_FILTER_PARAM(LOWEST, 9) },
431 { "pxa2xx-ac97", "pcm_pcm_aux_mono_out",
432 PDMA_FILTER_PARAM(LOWEST, 10) },
433 { "pxa2xx-ac97", "pcm_pcm_stereo_in", PDMA_FILTER_PARAM(LOWEST, 11) },
434 { "pxa2xx-ac97", "pcm_pcm_stereo_out", PDMA_FILTER_PARAM(LOWEST, 12) },
435 { "pxa-ssp-dai.0", "rx", PDMA_FILTER_PARAM(LOWEST, 13) },
436 { "pxa-ssp-dai.0", "tx", PDMA_FILTER_PARAM(LOWEST, 14) },
437 { "pxa-ssp-dai.1", "rx", PDMA_FILTER_PARAM(LOWEST, 15) },
438 { "pxa-ssp-dai.1", "tx", PDMA_FILTER_PARAM(LOWEST, 16) },
439 { "pxa2xx-ir", "rx", PDMA_FILTER_PARAM(LOWEST, 17) },
440 { "pxa2xx-ir", "tx", PDMA_FILTER_PARAM(LOWEST, 18) },
441 { "pxa2xx-mci.0", "rx", PDMA_FILTER_PARAM(LOWEST, 21) },
442 { "pxa2xx-mci.0", "tx", PDMA_FILTER_PARAM(LOWEST, 22) },
443 { "pxa-ssp-dai.2", "rx", PDMA_FILTER_PARAM(LOWEST, 66) },
444 { "pxa-ssp-dai.2", "tx", PDMA_FILTER_PARAM(LOWEST, 67) },
445
446 /* PXA3xx specific map */
447 { "pxa-ssp-dai.3", "rx", PDMA_FILTER_PARAM(LOWEST, 2) },
448 { "pxa-ssp-dai.3", "tx", PDMA_FILTER_PARAM(LOWEST, 3) },
449 { "pxa2xx-mci.1", "rx", PDMA_FILTER_PARAM(LOWEST, 93) },
450 { "pxa2xx-mci.1", "tx", PDMA_FILTER_PARAM(LOWEST, 94) },
451 { "pxa3xx-nand", "data", PDMA_FILTER_PARAM(LOWEST, 97) },
452 { "pxa2xx-mci.2", "rx", PDMA_FILTER_PARAM(LOWEST, 100) },
453 { "pxa2xx-mci.2", "tx", PDMA_FILTER_PARAM(LOWEST, 101) },
454 };
455
456 static struct mmp_dma_platdata pxa3xx_dma_pdata = {
457 .dma_channels = 32,
458 .nb_requestors = 100,
459 .slave_map = pxa3xx_slave_map,
460 .slave_map_cnt = ARRAY_SIZE(pxa3xx_slave_map),
461 };
462
pxa3xx_init(void)463 static int __init pxa3xx_init(void)
464 {
465 int ret = 0;
466
467 if (cpu_is_pxa3xx()) {
468
469 reset_status = ARSR;
470
471 /*
472 * clear RDH bit every time after reset
473 *
474 * Note: the last 3 bits DxS are write-1-to-clear so carefully
475 * preserve them here in case they will be referenced later
476 */
477 ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
478
479 /*
480 * Disable DFI bus arbitration, to prevent a system bus lock if
481 * somebody disables the NAND clock (unused clock) while this
482 * bit remains set.
483 */
484 NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
485
486 pxa3xx_init_pm();
487
488 enable_irq_wake(IRQ_WAKEUP0);
489 if (cpu_is_pxa320())
490 enable_irq_wake(IRQ_WAKEUP1);
491
492 register_syscore_ops(&pxa_irq_syscore_ops);
493 register_syscore_ops(&pxa3xx_mfp_syscore_ops);
494
495 if (of_have_populated_dt())
496 return 0;
497
498 pxa2xx_set_dmac_info(&pxa3xx_dma_pdata);
499 ret = platform_add_devices(devices, ARRAY_SIZE(devices));
500 if (ret)
501 return ret;
502 if (cpu_is_pxa300() || cpu_is_pxa310() || cpu_is_pxa320()) {
503 platform_device_add_data(&pxa3xx_device_gpio,
504 &pxa3xx_gpio_pdata,
505 sizeof(pxa3xx_gpio_pdata));
506 ret = platform_device_register(&pxa3xx_device_gpio);
507 }
508 }
509
510 return ret;
511 }
512
513 postcore_initcall(pxa3xx_init);
514