]> git.openfabrics.org - ~shefty/rdma-dev.git/blob - arch/arm/mach-exynos/common.c
ARM: EXYNOS: Remove unused static uart resource information
[~shefty/rdma-dev.git] / arch / arm / mach-exynos / common.c
1 /*
2  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3  *              http://www.samsung.com
4  *
5  * Common Codes for EXYNOS
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/io.h>
16 #include <linux/device.h>
17 #include <linux/gpio.h>
18 #include <linux/sched.h>
19 #include <linux/serial_core.h>
20 #include <linux/of.h>
21 #include <linux/of_irq.h>
22 #include <linux/export.h>
23 #include <linux/irqdomain.h>
24 #include <linux/of_address.h>
25
26 #include <asm/proc-fns.h>
27 #include <asm/exception.h>
28 #include <asm/hardware/cache-l2x0.h>
29 #include <asm/hardware/gic.h>
30 #include <asm/mach/map.h>
31 #include <asm/mach/irq.h>
32 #include <asm/cacheflush.h>
33
34 #include <mach/regs-irq.h>
35 #include <mach/regs-pmu.h>
36 #include <mach/regs-gpio.h>
37 #include <mach/pmu.h>
38
39 #include <plat/cpu.h>
40 #include <plat/clock.h>
41 #include <plat/devs.h>
42 #include <plat/pm.h>
43 #include <plat/sdhci.h>
44 #include <plat/gpio-cfg.h>
45 #include <plat/adc-core.h>
46 #include <plat/fb-core.h>
47 #include <plat/fimc-core.h>
48 #include <plat/iic-core.h>
49 #include <plat/tv-core.h>
50 #include <plat/regs-serial.h>
51
52 #include "common.h"
53 #define L2_AUX_VAL 0x7C470001
54 #define L2_AUX_MASK 0xC200ffff
55
56 static const char name_exynos4210[] = "EXYNOS4210";
57 static const char name_exynos4212[] = "EXYNOS4212";
58 static const char name_exynos4412[] = "EXYNOS4412";
59 static const char name_exynos5250[] = "EXYNOS5250";
60
61 static void exynos4_map_io(void);
62 static void exynos5_map_io(void);
63 static void exynos4_init_clocks(int xtal);
64 static void exynos5_init_clocks(int xtal);
65 static void exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no);
66 static int exynos_init(void);
67
68 static struct cpu_table cpu_ids[] __initdata = {
69         {
70                 .idcode         = EXYNOS4210_CPU_ID,
71                 .idmask         = EXYNOS4_CPU_MASK,
72                 .map_io         = exynos4_map_io,
73                 .init_clocks    = exynos4_init_clocks,
74                 .init_uarts     = exynos4_init_uarts,
75                 .init           = exynos_init,
76                 .name           = name_exynos4210,
77         }, {
78                 .idcode         = EXYNOS4212_CPU_ID,
79                 .idmask         = EXYNOS4_CPU_MASK,
80                 .map_io         = exynos4_map_io,
81                 .init_clocks    = exynos4_init_clocks,
82                 .init_uarts     = exynos4_init_uarts,
83                 .init           = exynos_init,
84                 .name           = name_exynos4212,
85         }, {
86                 .idcode         = EXYNOS4412_CPU_ID,
87                 .idmask         = EXYNOS4_CPU_MASK,
88                 .map_io         = exynos4_map_io,
89                 .init_clocks    = exynos4_init_clocks,
90                 .init_uarts     = exynos4_init_uarts,
91                 .init           = exynos_init,
92                 .name           = name_exynos4412,
93         }, {
94                 .idcode         = EXYNOS5250_SOC_ID,
95                 .idmask         = EXYNOS5_SOC_MASK,
96                 .map_io         = exynos5_map_io,
97                 .init_clocks    = exynos5_init_clocks,
98                 .init           = exynos_init,
99                 .name           = name_exynos5250,
100         },
101 };
102
103 /* Initial IO mappings */
104
105 static struct map_desc exynos_iodesc[] __initdata = {
106         {
107                 .virtual        = (unsigned long)S5P_VA_CHIPID,
108                 .pfn            = __phys_to_pfn(EXYNOS_PA_CHIPID),
109                 .length         = SZ_4K,
110                 .type           = MT_DEVICE,
111         },
112 };
113
114 static struct map_desc exynos4_iodesc[] __initdata = {
115         {
116                 .virtual        = (unsigned long)S3C_VA_SYS,
117                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSCON),
118                 .length         = SZ_64K,
119                 .type           = MT_DEVICE,
120         }, {
121                 .virtual        = (unsigned long)S3C_VA_TIMER,
122                 .pfn            = __phys_to_pfn(EXYNOS4_PA_TIMER),
123                 .length         = SZ_16K,
124                 .type           = MT_DEVICE,
125         }, {
126                 .virtual        = (unsigned long)S3C_VA_WATCHDOG,
127                 .pfn            = __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
128                 .length         = SZ_4K,
129                 .type           = MT_DEVICE,
130         }, {
131                 .virtual        = (unsigned long)S5P_VA_SROMC,
132                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SROMC),
133                 .length         = SZ_4K,
134                 .type           = MT_DEVICE,
135         }, {
136                 .virtual        = (unsigned long)S5P_VA_SYSTIMER,
137                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
138                 .length         = SZ_4K,
139                 .type           = MT_DEVICE,
140         }, {
141                 .virtual        = (unsigned long)S5P_VA_PMU,
142                 .pfn            = __phys_to_pfn(EXYNOS4_PA_PMU),
143                 .length         = SZ_64K,
144                 .type           = MT_DEVICE,
145         }, {
146                 .virtual        = (unsigned long)S5P_VA_COMBINER_BASE,
147                 .pfn            = __phys_to_pfn(EXYNOS4_PA_COMBINER),
148                 .length         = SZ_4K,
149                 .type           = MT_DEVICE,
150         }, {
151                 .virtual        = (unsigned long)S5P_VA_GIC_CPU,
152                 .pfn            = __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
153                 .length         = SZ_64K,
154                 .type           = MT_DEVICE,
155         }, {
156                 .virtual        = (unsigned long)S5P_VA_GIC_DIST,
157                 .pfn            = __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
158                 .length         = SZ_64K,
159                 .type           = MT_DEVICE,
160         }, {
161                 .virtual        = (unsigned long)S3C_VA_UART,
162                 .pfn            = __phys_to_pfn(EXYNOS4_PA_UART),
163                 .length         = SZ_512K,
164                 .type           = MT_DEVICE,
165         }, {
166                 .virtual        = (unsigned long)S5P_VA_CMU,
167                 .pfn            = __phys_to_pfn(EXYNOS4_PA_CMU),
168                 .length         = SZ_128K,
169                 .type           = MT_DEVICE,
170         }, {
171                 .virtual        = (unsigned long)S5P_VA_COREPERI_BASE,
172                 .pfn            = __phys_to_pfn(EXYNOS4_PA_COREPERI),
173                 .length         = SZ_8K,
174                 .type           = MT_DEVICE,
175         }, {
176                 .virtual        = (unsigned long)S5P_VA_L2CC,
177                 .pfn            = __phys_to_pfn(EXYNOS4_PA_L2CC),
178                 .length         = SZ_4K,
179                 .type           = MT_DEVICE,
180         }, {
181                 .virtual        = (unsigned long)S5P_VA_DMC0,
182                 .pfn            = __phys_to_pfn(EXYNOS4_PA_DMC0),
183                 .length         = SZ_64K,
184                 .type           = MT_DEVICE,
185         }, {
186                 .virtual        = (unsigned long)S5P_VA_DMC1,
187                 .pfn            = __phys_to_pfn(EXYNOS4_PA_DMC1),
188                 .length         = SZ_64K,
189                 .type           = MT_DEVICE,
190         }, {
191                 .virtual        = (unsigned long)S3C_VA_USB_HSPHY,
192                 .pfn            = __phys_to_pfn(EXYNOS4_PA_HSPHY),
193                 .length         = SZ_4K,
194                 .type           = MT_DEVICE,
195         },
196 };
197
198 static struct map_desc exynos4_iodesc0[] __initdata = {
199         {
200                 .virtual        = (unsigned long)S5P_VA_SYSRAM,
201                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
202                 .length         = SZ_4K,
203                 .type           = MT_DEVICE,
204         },
205 };
206
207 static struct map_desc exynos4_iodesc1[] __initdata = {
208         {
209                 .virtual        = (unsigned long)S5P_VA_SYSRAM,
210                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
211                 .length         = SZ_4K,
212                 .type           = MT_DEVICE,
213         },
214 };
215
216 static struct map_desc exynos5_iodesc[] __initdata = {
217         {
218                 .virtual        = (unsigned long)S3C_VA_SYS,
219                 .pfn            = __phys_to_pfn(EXYNOS5_PA_SYSCON),
220                 .length         = SZ_64K,
221                 .type           = MT_DEVICE,
222         }, {
223                 .virtual        = (unsigned long)S3C_VA_TIMER,
224                 .pfn            = __phys_to_pfn(EXYNOS5_PA_TIMER),
225                 .length         = SZ_16K,
226                 .type           = MT_DEVICE,
227         }, {
228                 .virtual        = (unsigned long)S3C_VA_WATCHDOG,
229                 .pfn            = __phys_to_pfn(EXYNOS5_PA_WATCHDOG),
230                 .length         = SZ_4K,
231                 .type           = MT_DEVICE,
232         }, {
233                 .virtual        = (unsigned long)S5P_VA_SROMC,
234                 .pfn            = __phys_to_pfn(EXYNOS5_PA_SROMC),
235                 .length         = SZ_4K,
236                 .type           = MT_DEVICE,
237         }, {
238                 .virtual        = (unsigned long)S5P_VA_SYSTIMER,
239                 .pfn            = __phys_to_pfn(EXYNOS5_PA_SYSTIMER),
240                 .length         = SZ_4K,
241                 .type           = MT_DEVICE,
242         }, {
243                 .virtual        = (unsigned long)S5P_VA_SYSRAM,
244                 .pfn            = __phys_to_pfn(EXYNOS5_PA_SYSRAM),
245                 .length         = SZ_4K,
246                 .type           = MT_DEVICE,
247         }, {
248                 .virtual        = (unsigned long)S5P_VA_CMU,
249                 .pfn            = __phys_to_pfn(EXYNOS5_PA_CMU),
250                 .length         = 144 * SZ_1K,
251                 .type           = MT_DEVICE,
252         }, {
253                 .virtual        = (unsigned long)S5P_VA_PMU,
254                 .pfn            = __phys_to_pfn(EXYNOS5_PA_PMU),
255                 .length         = SZ_64K,
256                 .type           = MT_DEVICE,
257         }, {
258                 .virtual        = (unsigned long)S3C_VA_UART,
259                 .pfn            = __phys_to_pfn(EXYNOS5_PA_UART),
260                 .length         = SZ_512K,
261                 .type           = MT_DEVICE,
262         },
263 };
264
265 void exynos4_restart(char mode, const char *cmd)
266 {
267         __raw_writel(0x1, S5P_SWRESET);
268 }
269
270 void exynos5_restart(char mode, const char *cmd)
271 {
272         __raw_writel(0x1, EXYNOS_SWRESET);
273 }
274
275 void __init exynos_init_late(void)
276 {
277         exynos_pm_late_initcall();
278 }
279
280 /*
281  * exynos_map_io
282  *
283  * register the standard cpu IO areas
284  */
285
286 void __init exynos_init_io(struct map_desc *mach_desc, int size)
287 {
288         /* initialize the io descriptors we need for initialization */
289         iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
290         if (mach_desc)
291                 iotable_init(mach_desc, size);
292
293         /* detect cpu id and rev. */
294         s5p_init_cpu(S5P_VA_CHIPID);
295
296         s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
297 }
298
299 static void __init exynos4_map_io(void)
300 {
301         iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
302
303         if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
304                 iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
305         else
306                 iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
307
308         /* initialize device information early */
309         exynos4_default_sdhci0();
310         exynos4_default_sdhci1();
311         exynos4_default_sdhci2();
312         exynos4_default_sdhci3();
313
314         s3c_adc_setname("samsung-adc-v3");
315
316         s3c_fimc_setname(0, "exynos4-fimc");
317         s3c_fimc_setname(1, "exynos4-fimc");
318         s3c_fimc_setname(2, "exynos4-fimc");
319         s3c_fimc_setname(3, "exynos4-fimc");
320
321         s3c_sdhci_setname(0, "exynos4-sdhci");
322         s3c_sdhci_setname(1, "exynos4-sdhci");
323         s3c_sdhci_setname(2, "exynos4-sdhci");
324         s3c_sdhci_setname(3, "exynos4-sdhci");
325
326         /* The I2C bus controllers are directly compatible with s3c2440 */
327         s3c_i2c0_setname("s3c2440-i2c");
328         s3c_i2c1_setname("s3c2440-i2c");
329         s3c_i2c2_setname("s3c2440-i2c");
330
331         s5p_fb_setname(0, "exynos4-fb");
332         s5p_hdmi_setname("exynos4-hdmi");
333 }
334
335 static void __init exynos5_map_io(void)
336 {
337         iotable_init(exynos5_iodesc, ARRAY_SIZE(exynos5_iodesc));
338
339         s3c_device_i2c0.resource[0].start = EXYNOS5_PA_IIC(0);
340         s3c_device_i2c0.resource[0].end   = EXYNOS5_PA_IIC(0) + SZ_4K - 1;
341         s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC;
342         s3c_device_i2c0.resource[1].end   = EXYNOS5_IRQ_IIC;
343
344         s3c_sdhci_setname(0, "exynos4-sdhci");
345         s3c_sdhci_setname(1, "exynos4-sdhci");
346         s3c_sdhci_setname(2, "exynos4-sdhci");
347         s3c_sdhci_setname(3, "exynos4-sdhci");
348
349         /* The I2C bus controllers are directly compatible with s3c2440 */
350         s3c_i2c0_setname("s3c2440-i2c");
351         s3c_i2c1_setname("s3c2440-i2c");
352         s3c_i2c2_setname("s3c2440-i2c");
353 }
354
355 static void __init exynos4_init_clocks(int xtal)
356 {
357         printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
358
359         s3c24xx_register_baseclocks(xtal);
360         s5p_register_clocks(xtal);
361
362         if (soc_is_exynos4210())
363                 exynos4210_register_clocks();
364         else if (soc_is_exynos4212() || soc_is_exynos4412())
365                 exynos4212_register_clocks();
366
367         exynos4_register_clocks();
368         exynos4_setup_clocks();
369 }
370
371 static void __init exynos5_init_clocks(int xtal)
372 {
373         printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
374
375         s3c24xx_register_baseclocks(xtal);
376         s5p_register_clocks(xtal);
377
378         exynos5_register_clocks();
379         exynos5_setup_clocks();
380 }
381
382 #define COMBINER_ENABLE_SET     0x0
383 #define COMBINER_ENABLE_CLEAR   0x4
384 #define COMBINER_INT_STATUS     0xC
385
386 static DEFINE_SPINLOCK(irq_controller_lock);
387
388 struct combiner_chip_data {
389         unsigned int irq_offset;
390         unsigned int irq_mask;
391         void __iomem *base;
392 };
393
394 static struct irq_domain *combiner_irq_domain;
395 static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
396
397 static inline void __iomem *combiner_base(struct irq_data *data)
398 {
399         struct combiner_chip_data *combiner_data =
400                 irq_data_get_irq_chip_data(data);
401
402         return combiner_data->base;
403 }
404
405 static void combiner_mask_irq(struct irq_data *data)
406 {
407         u32 mask = 1 << (data->hwirq % 32);
408
409         __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
410 }
411
412 static void combiner_unmask_irq(struct irq_data *data)
413 {
414         u32 mask = 1 << (data->hwirq % 32);
415
416         __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
417 }
418
419 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
420 {
421         struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
422         struct irq_chip *chip = irq_get_chip(irq);
423         unsigned int cascade_irq, combiner_irq;
424         unsigned long status;
425
426         chained_irq_enter(chip, desc);
427
428         spin_lock(&irq_controller_lock);
429         status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
430         spin_unlock(&irq_controller_lock);
431         status &= chip_data->irq_mask;
432
433         if (status == 0)
434                 goto out;
435
436         combiner_irq = __ffs(status);
437
438         cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
439         if (unlikely(cascade_irq >= NR_IRQS))
440                 do_bad_IRQ(cascade_irq, desc);
441         else
442                 generic_handle_irq(cascade_irq);
443
444  out:
445         chained_irq_exit(chip, desc);
446 }
447
448 static struct irq_chip combiner_chip = {
449         .name           = "COMBINER",
450         .irq_mask       = combiner_mask_irq,
451         .irq_unmask     = combiner_unmask_irq,
452 };
453
454 static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
455 {
456         unsigned int max_nr;
457
458         if (soc_is_exynos5250())
459                 max_nr = EXYNOS5_MAX_COMBINER_NR;
460         else
461                 max_nr = EXYNOS4_MAX_COMBINER_NR;
462
463         if (combiner_nr >= max_nr)
464                 BUG();
465         if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
466                 BUG();
467         irq_set_chained_handler(irq, combiner_handle_cascade_irq);
468 }
469
470 static void __init combiner_init_one(unsigned int combiner_nr,
471                                      void __iomem *base)
472 {
473         combiner_data[combiner_nr].base = base;
474         combiner_data[combiner_nr].irq_offset = irq_find_mapping(
475                 combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
476         combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
477
478         /* Disable all interrupts */
479         __raw_writel(combiner_data[combiner_nr].irq_mask,
480                      base + COMBINER_ENABLE_CLEAR);
481 }
482
483 #ifdef CONFIG_OF
484 static int combiner_irq_domain_xlate(struct irq_domain *d,
485                                      struct device_node *controller,
486                                      const u32 *intspec, unsigned int intsize,
487                                      unsigned long *out_hwirq,
488                                      unsigned int *out_type)
489 {
490         if (d->of_node != controller)
491                 return -EINVAL;
492
493         if (intsize < 2)
494                 return -EINVAL;
495
496         *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
497         *out_type = 0;
498
499         return 0;
500 }
501 #else
502 static int combiner_irq_domain_xlate(struct irq_domain *d,
503                                      struct device_node *controller,
504                                      const u32 *intspec, unsigned int intsize,
505                                      unsigned long *out_hwirq,
506                                      unsigned int *out_type)
507 {
508         return -EINVAL;
509 }
510 #endif
511
512 static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
513                                    irq_hw_number_t hw)
514 {
515         irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
516         irq_set_chip_data(irq, &combiner_data[hw >> 3]);
517         set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
518
519         return 0;
520 }
521
522 static struct irq_domain_ops combiner_irq_domain_ops = {
523         .xlate  = combiner_irq_domain_xlate,
524         .map    = combiner_irq_domain_map,
525 };
526
527 static void __init combiner_init(void __iomem *combiner_base,
528                                  struct device_node *np)
529 {
530         int i, irq, irq_base;
531         unsigned int max_nr, nr_irq;
532
533         if (np) {
534                 if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
535                         pr_warning("%s: number of combiners not specified, "
536                                 "setting default as %d.\n",
537                                 __func__, EXYNOS4_MAX_COMBINER_NR);
538                         max_nr = EXYNOS4_MAX_COMBINER_NR;
539                 }
540         } else {
541                 max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
542                                                 EXYNOS4_MAX_COMBINER_NR;
543         }
544         nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
545
546         irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
547         if (IS_ERR_VALUE(irq_base)) {
548                 irq_base = COMBINER_IRQ(0, 0);
549                 pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
550         }
551
552         combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
553                                 &combiner_irq_domain_ops, &combiner_data);
554         if (WARN_ON(!combiner_irq_domain)) {
555                 pr_warning("%s: irq domain init failed\n", __func__);
556                 return;
557         }
558
559         for (i = 0; i < max_nr; i++) {
560                 combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
561                 irq = IRQ_SPI(i);
562 #ifdef CONFIG_OF
563                 if (np)
564                         irq = irq_of_parse_and_map(np, i);
565 #endif
566                 combiner_cascade_irq(i, irq);
567         }
568 }
569
570 #ifdef CONFIG_OF
571 int __init combiner_of_init(struct device_node *np, struct device_node *parent)
572 {
573         void __iomem *combiner_base;
574
575         combiner_base = of_iomap(np, 0);
576         if (!combiner_base) {
577                 pr_err("%s: failed to map combiner registers\n", __func__);
578                 return -ENXIO;
579         }
580
581         combiner_init(combiner_base, np);
582
583         return 0;
584 }
585
586 static const struct of_device_id exynos4_dt_irq_match[] = {
587         { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
588         { .compatible = "samsung,exynos4210-combiner",
589                         .data = combiner_of_init, },
590         {},
591 };
592 #endif
593
594 void __init exynos4_init_irq(void)
595 {
596         unsigned int gic_bank_offset;
597
598         gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
599
600         if (!of_have_populated_dt())
601                 gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL);
602 #ifdef CONFIG_OF
603         else
604                 of_irq_init(exynos4_dt_irq_match);
605 #endif
606
607         if (!of_have_populated_dt())
608                 combiner_init(S5P_VA_COMBINER_BASE, NULL);
609
610         /*
611          * The parameters of s5p_init_irq() are for VIC init.
612          * Theses parameters should be NULL and 0 because EXYNOS4
613          * uses GIC instead of VIC.
614          */
615         s5p_init_irq(NULL, 0);
616 }
617
618 void __init exynos5_init_irq(void)
619 {
620 #ifdef CONFIG_OF
621         of_irq_init(exynos4_dt_irq_match);
622 #endif
623         /*
624          * The parameters of s5p_init_irq() are for VIC init.
625          * Theses parameters should be NULL and 0 because EXYNOS4
626          * uses GIC instead of VIC.
627          */
628         s5p_init_irq(NULL, 0);
629 }
630
631 struct bus_type exynos_subsys = {
632         .name           = "exynos-core",
633         .dev_name       = "exynos-core",
634 };
635
636 static struct device exynos4_dev = {
637         .bus    = &exynos_subsys,
638 };
639
640 static int __init exynos_core_init(void)
641 {
642         return subsys_system_register(&exynos_subsys, NULL);
643 }
644 core_initcall(exynos_core_init);
645
646 #ifdef CONFIG_CACHE_L2X0
647 static int __init exynos4_l2x0_cache_init(void)
648 {
649         int ret;
650
651         if (soc_is_exynos5250())
652                 return 0;
653
654         ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
655         if (!ret) {
656                 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
657                 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
658                 return 0;
659         }
660
661         if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) {
662                 l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC;
663                 /* TAG, Data Latency Control: 2 cycles */
664                 l2x0_saved_regs.tag_latency = 0x110;
665
666                 if (soc_is_exynos4212() || soc_is_exynos4412())
667                         l2x0_saved_regs.data_latency = 0x120;
668                 else
669                         l2x0_saved_regs.data_latency = 0x110;
670
671                 l2x0_saved_regs.prefetch_ctrl = 0x30000007;
672                 l2x0_saved_regs.pwr_ctrl =
673                         (L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN);
674
675                 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
676
677                 __raw_writel(l2x0_saved_regs.tag_latency,
678                                 S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
679                 __raw_writel(l2x0_saved_regs.data_latency,
680                                 S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
681
682                 /* L2X0 Prefetch Control */
683                 __raw_writel(l2x0_saved_regs.prefetch_ctrl,
684                                 S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
685
686                 /* L2X0 Power Control */
687                 __raw_writel(l2x0_saved_regs.pwr_ctrl,
688                                 S5P_VA_L2CC + L2X0_POWER_CTRL);
689
690                 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
691                 clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs));
692         }
693
694         l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK);
695         return 0;
696 }
697 early_initcall(exynos4_l2x0_cache_init);
698 #endif
699
700 static int __init exynos_init(void)
701 {
702         printk(KERN_INFO "EXYNOS: Initializing architecture\n");
703
704         return device_register(&exynos4_dev);
705 }
706
707 /* uart registration process */
708
709 static void __init exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no)
710 {
711         struct s3c2410_uartcfg *tcfg = cfg;
712         u32 ucnt;
713
714         for (ucnt = 0; ucnt < no; ucnt++, tcfg++)
715                 tcfg->has_fracval = 1;
716
717         s3c24xx_init_uartdevs("exynos4210-uart", exynos4_uart_resources, cfg, no);
718 }
719
720 static void __iomem *exynos_eint_base;
721
722 static DEFINE_SPINLOCK(eint_lock);
723
724 static unsigned int eint0_15_data[16];
725
726 static inline int exynos4_irq_to_gpio(unsigned int irq)
727 {
728         if (irq < IRQ_EINT(0))
729                 return -EINVAL;
730
731         irq -= IRQ_EINT(0);
732         if (irq < 8)
733                 return EXYNOS4_GPX0(irq);
734
735         irq -= 8;
736         if (irq < 8)
737                 return EXYNOS4_GPX1(irq);
738
739         irq -= 8;
740         if (irq < 8)
741                 return EXYNOS4_GPX2(irq);
742
743         irq -= 8;
744         if (irq < 8)
745                 return EXYNOS4_GPX3(irq);
746
747         return -EINVAL;
748 }
749
750 static inline int exynos5_irq_to_gpio(unsigned int irq)
751 {
752         if (irq < IRQ_EINT(0))
753                 return -EINVAL;
754
755         irq -= IRQ_EINT(0);
756         if (irq < 8)
757                 return EXYNOS5_GPX0(irq);
758
759         irq -= 8;
760         if (irq < 8)
761                 return EXYNOS5_GPX1(irq);
762
763         irq -= 8;
764         if (irq < 8)
765                 return EXYNOS5_GPX2(irq);
766
767         irq -= 8;
768         if (irq < 8)
769                 return EXYNOS5_GPX3(irq);
770
771         return -EINVAL;
772 }
773
774 static unsigned int exynos4_eint0_15_src_int[16] = {
775         EXYNOS4_IRQ_EINT0,
776         EXYNOS4_IRQ_EINT1,
777         EXYNOS4_IRQ_EINT2,
778         EXYNOS4_IRQ_EINT3,
779         EXYNOS4_IRQ_EINT4,
780         EXYNOS4_IRQ_EINT5,
781         EXYNOS4_IRQ_EINT6,
782         EXYNOS4_IRQ_EINT7,
783         EXYNOS4_IRQ_EINT8,
784         EXYNOS4_IRQ_EINT9,
785         EXYNOS4_IRQ_EINT10,
786         EXYNOS4_IRQ_EINT11,
787         EXYNOS4_IRQ_EINT12,
788         EXYNOS4_IRQ_EINT13,
789         EXYNOS4_IRQ_EINT14,
790         EXYNOS4_IRQ_EINT15,
791 };
792
793 static unsigned int exynos5_eint0_15_src_int[16] = {
794         EXYNOS5_IRQ_EINT0,
795         EXYNOS5_IRQ_EINT1,
796         EXYNOS5_IRQ_EINT2,
797         EXYNOS5_IRQ_EINT3,
798         EXYNOS5_IRQ_EINT4,
799         EXYNOS5_IRQ_EINT5,
800         EXYNOS5_IRQ_EINT6,
801         EXYNOS5_IRQ_EINT7,
802         EXYNOS5_IRQ_EINT8,
803         EXYNOS5_IRQ_EINT9,
804         EXYNOS5_IRQ_EINT10,
805         EXYNOS5_IRQ_EINT11,
806         EXYNOS5_IRQ_EINT12,
807         EXYNOS5_IRQ_EINT13,
808         EXYNOS5_IRQ_EINT14,
809         EXYNOS5_IRQ_EINT15,
810 };
811 static inline void exynos_irq_eint_mask(struct irq_data *data)
812 {
813         u32 mask;
814
815         spin_lock(&eint_lock);
816         mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
817         mask |= EINT_OFFSET_BIT(data->irq);
818         __raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
819         spin_unlock(&eint_lock);
820 }
821
822 static void exynos_irq_eint_unmask(struct irq_data *data)
823 {
824         u32 mask;
825
826         spin_lock(&eint_lock);
827         mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
828         mask &= ~(EINT_OFFSET_BIT(data->irq));
829         __raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
830         spin_unlock(&eint_lock);
831 }
832
833 static inline void exynos_irq_eint_ack(struct irq_data *data)
834 {
835         __raw_writel(EINT_OFFSET_BIT(data->irq),
836                      EINT_PEND(exynos_eint_base, data->irq));
837 }
838
839 static void exynos_irq_eint_maskack(struct irq_data *data)
840 {
841         exynos_irq_eint_mask(data);
842         exynos_irq_eint_ack(data);
843 }
844
845 static int exynos_irq_eint_set_type(struct irq_data *data, unsigned int type)
846 {
847         int offs = EINT_OFFSET(data->irq);
848         int shift;
849         u32 ctrl, mask;
850         u32 newvalue = 0;
851
852         switch (type) {
853         case IRQ_TYPE_EDGE_RISING:
854                 newvalue = S5P_IRQ_TYPE_EDGE_RISING;
855                 break;
856
857         case IRQ_TYPE_EDGE_FALLING:
858                 newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
859                 break;
860
861         case IRQ_TYPE_EDGE_BOTH:
862                 newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
863                 break;
864
865         case IRQ_TYPE_LEVEL_LOW:
866                 newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
867                 break;
868
869         case IRQ_TYPE_LEVEL_HIGH:
870                 newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
871                 break;
872
873         default:
874                 printk(KERN_ERR "No such irq type %d", type);
875                 return -EINVAL;
876         }
877
878         shift = (offs & 0x7) * 4;
879         mask = 0x7 << shift;
880
881         spin_lock(&eint_lock);
882         ctrl = __raw_readl(EINT_CON(exynos_eint_base, data->irq));
883         ctrl &= ~mask;
884         ctrl |= newvalue << shift;
885         __raw_writel(ctrl, EINT_CON(exynos_eint_base, data->irq));
886         spin_unlock(&eint_lock);
887
888         if (soc_is_exynos5250())
889                 s3c_gpio_cfgpin(exynos5_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
890         else
891                 s3c_gpio_cfgpin(exynos4_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
892
893         return 0;
894 }
895
896 static struct irq_chip exynos_irq_eint = {
897         .name           = "exynos-eint",
898         .irq_mask       = exynos_irq_eint_mask,
899         .irq_unmask     = exynos_irq_eint_unmask,
900         .irq_mask_ack   = exynos_irq_eint_maskack,
901         .irq_ack        = exynos_irq_eint_ack,
902         .irq_set_type   = exynos_irq_eint_set_type,
903 #ifdef CONFIG_PM
904         .irq_set_wake   = s3c_irqext_wake,
905 #endif
906 };
907
908 /*
909  * exynos4_irq_demux_eint
910  *
911  * This function demuxes the IRQ from from EINTs 16 to 31.
912  * It is designed to be inlined into the specific handler
913  * s5p_irq_demux_eintX_Y.
914  *
915  * Each EINT pend/mask registers handle eight of them.
916  */
917 static inline void exynos_irq_demux_eint(unsigned int start)
918 {
919         unsigned int irq;
920
921         u32 status = __raw_readl(EINT_PEND(exynos_eint_base, start));
922         u32 mask = __raw_readl(EINT_MASK(exynos_eint_base, start));
923
924         status &= ~mask;
925         status &= 0xff;
926
927         while (status) {
928                 irq = fls(status) - 1;
929                 generic_handle_irq(irq + start);
930                 status &= ~(1 << irq);
931         }
932 }
933
934 static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
935 {
936         struct irq_chip *chip = irq_get_chip(irq);
937         chained_irq_enter(chip, desc);
938         exynos_irq_demux_eint(IRQ_EINT(16));
939         exynos_irq_demux_eint(IRQ_EINT(24));
940         chained_irq_exit(chip, desc);
941 }
942
943 static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
944 {
945         u32 *irq_data = irq_get_handler_data(irq);
946         struct irq_chip *chip = irq_get_chip(irq);
947
948         chained_irq_enter(chip, desc);
949         chip->irq_mask(&desc->irq_data);
950
951         if (chip->irq_ack)
952                 chip->irq_ack(&desc->irq_data);
953
954         generic_handle_irq(*irq_data);
955
956         chip->irq_unmask(&desc->irq_data);
957         chained_irq_exit(chip, desc);
958 }
959
960 static int __init exynos_init_irq_eint(void)
961 {
962         int irq;
963
964         if (soc_is_exynos5250())
965                 exynos_eint_base = ioremap(EXYNOS5_PA_GPIO1, SZ_4K);
966         else
967                 exynos_eint_base = ioremap(EXYNOS4_PA_GPIO2, SZ_4K);
968
969         if (exynos_eint_base == NULL) {
970                 pr_err("unable to ioremap for EINT base address\n");
971                 return -ENOMEM;
972         }
973
974         for (irq = 0 ; irq <= 31 ; irq++) {
975                 irq_set_chip_and_handler(IRQ_EINT(irq), &exynos_irq_eint,
976                                          handle_level_irq);
977                 set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
978         }
979
980         irq_set_chained_handler(EXYNOS_IRQ_EINT16_31, exynos_irq_demux_eint16_31);
981
982         for (irq = 0 ; irq <= 15 ; irq++) {
983                 eint0_15_data[irq] = IRQ_EINT(irq);
984
985                 if (soc_is_exynos5250()) {
986                         irq_set_handler_data(exynos5_eint0_15_src_int[irq],
987                                              &eint0_15_data[irq]);
988                         irq_set_chained_handler(exynos5_eint0_15_src_int[irq],
989                                                 exynos_irq_eint0_15);
990                 } else {
991                         irq_set_handler_data(exynos4_eint0_15_src_int[irq],
992                                              &eint0_15_data[irq]);
993                         irq_set_chained_handler(exynos4_eint0_15_src_int[irq],
994                                                 exynos_irq_eint0_15);
995                 }
996         }
997
998         return 0;
999 }
1000 arch_initcall(exynos_init_irq_eint);