Merge tag 'drivers2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[~shefty/rdma-dev.git] / arch / arm / mach-exynos / common.c
1 /*
2  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3  *              http://www.samsung.com
4  *
5  * Common Codes for EXYNOS
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/io.h>
16 #include <linux/device.h>
17 #include <linux/gpio.h>
18 #include <linux/sched.h>
19 #include <linux/serial_core.h>
20 #include <linux/of.h>
21 #include <linux/of_irq.h>
22
23 #include <asm/proc-fns.h>
24 #include <asm/exception.h>
25 #include <asm/hardware/cache-l2x0.h>
26 #include <asm/hardware/gic.h>
27 #include <asm/mach/map.h>
28 #include <asm/mach/irq.h>
29 #include <asm/cacheflush.h>
30
31 #include <mach/regs-irq.h>
32 #include <mach/regs-pmu.h>
33 #include <mach/regs-gpio.h>
34 #include <mach/pmu.h>
35
36 #include <plat/cpu.h>
37 #include <plat/clock.h>
38 #include <plat/devs.h>
39 #include <plat/pm.h>
40 #include <plat/sdhci.h>
41 #include <plat/gpio-cfg.h>
42 #include <plat/adc-core.h>
43 #include <plat/fb-core.h>
44 #include <plat/fimc-core.h>
45 #include <plat/iic-core.h>
46 #include <plat/tv-core.h>
47 #include <plat/regs-serial.h>
48
49 #include "common.h"
50 #define L2_AUX_VAL 0x7C470001
51 #define L2_AUX_MASK 0xC200ffff
52
53 static const char name_exynos4210[] = "EXYNOS4210";
54 static const char name_exynos4212[] = "EXYNOS4212";
55 static const char name_exynos4412[] = "EXYNOS4412";
56
57 static struct cpu_table cpu_ids[] __initdata = {
58         {
59                 .idcode         = EXYNOS4210_CPU_ID,
60                 .idmask         = EXYNOS4_CPU_MASK,
61                 .map_io         = exynos4_map_io,
62                 .init_clocks    = exynos4_init_clocks,
63                 .init_uarts     = exynos4_init_uarts,
64                 .init           = exynos_init,
65                 .name           = name_exynos4210,
66         }, {
67                 .idcode         = EXYNOS4212_CPU_ID,
68                 .idmask         = EXYNOS4_CPU_MASK,
69                 .map_io         = exynos4_map_io,
70                 .init_clocks    = exynos4_init_clocks,
71                 .init_uarts     = exynos4_init_uarts,
72                 .init           = exynos_init,
73                 .name           = name_exynos4212,
74         }, {
75                 .idcode         = EXYNOS4412_CPU_ID,
76                 .idmask         = EXYNOS4_CPU_MASK,
77                 .map_io         = exynos4_map_io,
78                 .init_clocks    = exynos4_init_clocks,
79                 .init_uarts     = exynos4_init_uarts,
80                 .init           = exynos_init,
81                 .name           = name_exynos4412,
82         },
83 };
84
85 /* Initial IO mappings */
86
87 static struct map_desc exynos_iodesc[] __initdata = {
88         {
89                 .virtual        = (unsigned long)S5P_VA_CHIPID,
90                 .pfn            = __phys_to_pfn(EXYNOS4_PA_CHIPID),
91                 .length         = SZ_4K,
92                 .type           = MT_DEVICE,
93         }, {
94                 .virtual        = (unsigned long)S3C_VA_SYS,
95                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSCON),
96                 .length         = SZ_64K,
97                 .type           = MT_DEVICE,
98         }, {
99                 .virtual        = (unsigned long)S3C_VA_TIMER,
100                 .pfn            = __phys_to_pfn(EXYNOS4_PA_TIMER),
101                 .length         = SZ_16K,
102                 .type           = MT_DEVICE,
103         }, {
104                 .virtual        = (unsigned long)S3C_VA_WATCHDOG,
105                 .pfn            = __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
106                 .length         = SZ_4K,
107                 .type           = MT_DEVICE,
108         }, {
109                 .virtual        = (unsigned long)S5P_VA_SROMC,
110                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SROMC),
111                 .length         = SZ_4K,
112                 .type           = MT_DEVICE,
113         }, {
114                 .virtual        = (unsigned long)S5P_VA_SYSTIMER,
115                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
116                 .length         = SZ_4K,
117                 .type           = MT_DEVICE,
118         }, {
119                 .virtual        = (unsigned long)S5P_VA_PMU,
120                 .pfn            = __phys_to_pfn(EXYNOS4_PA_PMU),
121                 .length         = SZ_64K,
122                 .type           = MT_DEVICE,
123         }, {
124                 .virtual        = (unsigned long)S5P_VA_COMBINER_BASE,
125                 .pfn            = __phys_to_pfn(EXYNOS4_PA_COMBINER),
126                 .length         = SZ_4K,
127                 .type           = MT_DEVICE,
128         }, {
129                 .virtual        = (unsigned long)S5P_VA_GIC_CPU,
130                 .pfn            = __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
131                 .length         = SZ_64K,
132                 .type           = MT_DEVICE,
133         }, {
134                 .virtual        = (unsigned long)S5P_VA_GIC_DIST,
135                 .pfn            = __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
136                 .length         = SZ_64K,
137                 .type           = MT_DEVICE,
138         }, {
139                 .virtual        = (unsigned long)S3C_VA_UART,
140                 .pfn            = __phys_to_pfn(EXYNOS4_PA_UART),
141                 .length         = SZ_512K,
142                 .type           = MT_DEVICE,
143         },
144 };
145
146 static struct map_desc exynos4_iodesc[] __initdata = {
147         {
148                 .virtual        = (unsigned long)S5P_VA_CMU,
149                 .pfn            = __phys_to_pfn(EXYNOS4_PA_CMU),
150                 .length         = SZ_128K,
151                 .type           = MT_DEVICE,
152         }, {
153                 .virtual        = (unsigned long)S5P_VA_COREPERI_BASE,
154                 .pfn            = __phys_to_pfn(EXYNOS4_PA_COREPERI),
155                 .length         = SZ_8K,
156                 .type           = MT_DEVICE,
157         }, {
158                 .virtual        = (unsigned long)S5P_VA_L2CC,
159                 .pfn            = __phys_to_pfn(EXYNOS4_PA_L2CC),
160                 .length         = SZ_4K,
161                 .type           = MT_DEVICE,
162         }, {
163                 .virtual        = (unsigned long)S5P_VA_DMC0,
164                 .pfn            = __phys_to_pfn(EXYNOS4_PA_DMC0),
165                 .length         = SZ_64K,
166                 .type           = MT_DEVICE,
167         }, {
168                 .virtual        = (unsigned long)S5P_VA_DMC1,
169                 .pfn            = __phys_to_pfn(EXYNOS4_PA_DMC1),
170                 .length         = SZ_64K,
171                 .type           = MT_DEVICE,
172         }, {
173                 .virtual        = (unsigned long)S3C_VA_USB_HSPHY,
174                 .pfn            = __phys_to_pfn(EXYNOS4_PA_HSPHY),
175                 .length         = SZ_4K,
176                 .type           = MT_DEVICE,
177         },
178 };
179
180 static struct map_desc exynos4_iodesc0[] __initdata = {
181         {
182                 .virtual        = (unsigned long)S5P_VA_SYSRAM,
183                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
184                 .length         = SZ_4K,
185                 .type           = MT_DEVICE,
186         },
187 };
188
189 static struct map_desc exynos4_iodesc1[] __initdata = {
190         {
191                 .virtual        = (unsigned long)S5P_VA_SYSRAM,
192                 .pfn            = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
193                 .length         = SZ_4K,
194                 .type           = MT_DEVICE,
195         },
196 };
197
198 void exynos4_restart(char mode, const char *cmd)
199 {
200         __raw_writel(0x1, S5P_SWRESET);
201 }
202
203 /*
204  * exynos_map_io
205  *
206  * register the standard cpu IO areas
207  */
208
209 void __init exynos_init_io(struct map_desc *mach_desc, int size)
210 {
211         /* initialize the io descriptors we need for initialization */
212         iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
213         if (mach_desc)
214                 iotable_init(mach_desc, size);
215
216         /* detect cpu id and rev. */
217         s5p_init_cpu(S5P_VA_CHIPID);
218
219         s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
220 }
221
222 void __init exynos4_map_io(void)
223 {
224         iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
225
226         if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
227                 iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
228         else
229                 iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
230
231         /* initialize device information early */
232         exynos4_default_sdhci0();
233         exynos4_default_sdhci1();
234         exynos4_default_sdhci2();
235         exynos4_default_sdhci3();
236
237         s3c_adc_setname("samsung-adc-v3");
238
239         s3c_fimc_setname(0, "exynos4-fimc");
240         s3c_fimc_setname(1, "exynos4-fimc");
241         s3c_fimc_setname(2, "exynos4-fimc");
242         s3c_fimc_setname(3, "exynos4-fimc");
243
244         /* The I2C bus controllers are directly compatible with s3c2440 */
245         s3c_i2c0_setname("s3c2440-i2c");
246         s3c_i2c1_setname("s3c2440-i2c");
247         s3c_i2c2_setname("s3c2440-i2c");
248
249         s5p_fb_setname(0, "exynos4-fb");
250         s5p_hdmi_setname("exynos4-hdmi");
251 }
252
253 void __init exynos4_init_clocks(int xtal)
254 {
255         printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
256
257         s3c24xx_register_baseclocks(xtal);
258         s5p_register_clocks(xtal);
259
260         if (soc_is_exynos4210())
261                 exynos4210_register_clocks();
262         else if (soc_is_exynos4212() || soc_is_exynos4412())
263                 exynos4212_register_clocks();
264
265         exynos4_register_clocks();
266         exynos4_setup_clocks();
267 }
268
269 #define COMBINER_ENABLE_SET     0x0
270 #define COMBINER_ENABLE_CLEAR   0x4
271 #define COMBINER_INT_STATUS     0xC
272
273 static DEFINE_SPINLOCK(irq_controller_lock);
274
275 struct combiner_chip_data {
276         unsigned int irq_offset;
277         unsigned int irq_mask;
278         void __iomem *base;
279 };
280
281 static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
282
283 static inline void __iomem *combiner_base(struct irq_data *data)
284 {
285         struct combiner_chip_data *combiner_data =
286                 irq_data_get_irq_chip_data(data);
287
288         return combiner_data->base;
289 }
290
291 static void combiner_mask_irq(struct irq_data *data)
292 {
293         u32 mask = 1 << (data->irq % 32);
294
295         __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
296 }
297
298 static void combiner_unmask_irq(struct irq_data *data)
299 {
300         u32 mask = 1 << (data->irq % 32);
301
302         __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
303 }
304
305 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
306 {
307         struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
308         struct irq_chip *chip = irq_get_chip(irq);
309         unsigned int cascade_irq, combiner_irq;
310         unsigned long status;
311
312         chained_irq_enter(chip, desc);
313
314         spin_lock(&irq_controller_lock);
315         status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
316         spin_unlock(&irq_controller_lock);
317         status &= chip_data->irq_mask;
318
319         if (status == 0)
320                 goto out;
321
322         combiner_irq = __ffs(status);
323
324         cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
325         if (unlikely(cascade_irq >= NR_IRQS))
326                 do_bad_IRQ(cascade_irq, desc);
327         else
328                 generic_handle_irq(cascade_irq);
329
330  out:
331         chained_irq_exit(chip, desc);
332 }
333
334 static struct irq_chip combiner_chip = {
335         .name           = "COMBINER",
336         .irq_mask       = combiner_mask_irq,
337         .irq_unmask     = combiner_unmask_irq,
338 };
339
340 static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
341 {
342         if (combiner_nr >= MAX_COMBINER_NR)
343                 BUG();
344         if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
345                 BUG();
346         irq_set_chained_handler(irq, combiner_handle_cascade_irq);
347 }
348
349 static void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
350                           unsigned int irq_start)
351 {
352         unsigned int i;
353
354         if (combiner_nr >= MAX_COMBINER_NR)
355                 BUG();
356
357         combiner_data[combiner_nr].base = base;
358         combiner_data[combiner_nr].irq_offset = irq_start;
359         combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
360
361         /* Disable all interrupts */
362
363         __raw_writel(combiner_data[combiner_nr].irq_mask,
364                      base + COMBINER_ENABLE_CLEAR);
365
366         /* Setup the Linux IRQ subsystem */
367
368         for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
369                                 + MAX_IRQ_IN_COMBINER; i++) {
370                 irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
371                 irq_set_chip_data(i, &combiner_data[combiner_nr]);
372                 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
373         }
374 }
375
376 #ifdef CONFIG_OF
377 static const struct of_device_id exynos4_dt_irq_match[] = {
378         { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
379         {},
380 };
381 #endif
382
383 void __init exynos4_init_irq(void)
384 {
385         int irq;
386         unsigned int gic_bank_offset;
387
388         gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
389
390         if (!of_have_populated_dt())
391                 gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL);
392 #ifdef CONFIG_OF
393         else
394                 of_irq_init(exynos4_dt_irq_match);
395 #endif
396
397         for (irq = 0; irq < MAX_COMBINER_NR; irq++) {
398
399                 combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
400                                 COMBINER_IRQ(irq, 0));
401                 combiner_cascade_irq(irq, IRQ_SPI(irq));
402         }
403
404         /*
405          * The parameters of s5p_init_irq() are for VIC init.
406          * Theses parameters should be NULL and 0 because EXYNOS4
407          * uses GIC instead of VIC.
408          */
409         s5p_init_irq(NULL, 0);
410 }
411
412 struct bus_type exynos4_subsys = {
413         .name           = "exynos4-core",
414         .dev_name       = "exynos4-core",
415 };
416
417 static struct device exynos4_dev = {
418         .bus    = &exynos4_subsys,
419 };
420
421 static int __init exynos4_core_init(void)
422 {
423         return subsys_system_register(&exynos4_subsys, NULL);
424 }
425 core_initcall(exynos4_core_init);
426
427 #ifdef CONFIG_CACHE_L2X0
428 static int __init exynos4_l2x0_cache_init(void)
429 {
430         int ret;
431         ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
432         if (!ret) {
433                 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
434                 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
435                 return 0;
436         }
437
438         if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) {
439                 l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC;
440                 /* TAG, Data Latency Control: 2 cycles */
441                 l2x0_saved_regs.tag_latency = 0x110;
442
443                 if (soc_is_exynos4212() || soc_is_exynos4412())
444                         l2x0_saved_regs.data_latency = 0x120;
445                 else
446                         l2x0_saved_regs.data_latency = 0x110;
447
448                 l2x0_saved_regs.prefetch_ctrl = 0x30000007;
449                 l2x0_saved_regs.pwr_ctrl =
450                         (L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN);
451
452                 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
453
454                 __raw_writel(l2x0_saved_regs.tag_latency,
455                                 S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
456                 __raw_writel(l2x0_saved_regs.data_latency,
457                                 S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
458
459                 /* L2X0 Prefetch Control */
460                 __raw_writel(l2x0_saved_regs.prefetch_ctrl,
461                                 S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
462
463                 /* L2X0 Power Control */
464                 __raw_writel(l2x0_saved_regs.pwr_ctrl,
465                                 S5P_VA_L2CC + L2X0_POWER_CTRL);
466
467                 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
468                 clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs));
469         }
470
471         l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK);
472         return 0;
473 }
474
475 early_initcall(exynos4_l2x0_cache_init);
476 #endif
477
478 int __init exynos_init(void)
479 {
480         printk(KERN_INFO "EXYNOS: Initializing architecture\n");
481         return device_register(&exynos4_dev);
482 }
483
484 /* uart registration process */
485
486 void __init exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no)
487 {
488         struct s3c2410_uartcfg *tcfg = cfg;
489         u32 ucnt;
490
491         for (ucnt = 0; ucnt < no; ucnt++, tcfg++)
492                 tcfg->has_fracval = 1;
493
494         s3c24xx_init_uartdevs("exynos4210-uart", s5p_uart_resources, cfg, no);
495 }
496
497 static DEFINE_SPINLOCK(eint_lock);
498
499 static unsigned int eint0_15_data[16];
500
501 static unsigned int exynos4_get_irq_nr(unsigned int number)
502 {
503         u32 ret = 0;
504
505         switch (number) {
506         case 0 ... 3:
507                 ret = (number + IRQ_EINT0);
508                 break;
509         case 4 ... 7:
510                 ret = (number + (IRQ_EINT4 - 4));
511                 break;
512         case 8 ... 15:
513                 ret = (number + (IRQ_EINT8 - 8));
514                 break;
515         default:
516                 printk(KERN_ERR "number available : %d\n", number);
517         }
518
519         return ret;
520 }
521
522 static inline void exynos4_irq_eint_mask(struct irq_data *data)
523 {
524         u32 mask;
525
526         spin_lock(&eint_lock);
527         mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
528         mask |= eint_irq_to_bit(data->irq);
529         __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
530         spin_unlock(&eint_lock);
531 }
532
533 static void exynos4_irq_eint_unmask(struct irq_data *data)
534 {
535         u32 mask;
536
537         spin_lock(&eint_lock);
538         mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
539         mask &= ~(eint_irq_to_bit(data->irq));
540         __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
541         spin_unlock(&eint_lock);
542 }
543
544 static inline void exynos4_irq_eint_ack(struct irq_data *data)
545 {
546         __raw_writel(eint_irq_to_bit(data->irq),
547                      S5P_EINT_PEND(EINT_REG_NR(data->irq)));
548 }
549
550 static void exynos4_irq_eint_maskack(struct irq_data *data)
551 {
552         exynos4_irq_eint_mask(data);
553         exynos4_irq_eint_ack(data);
554 }
555
556 static int exynos4_irq_eint_set_type(struct irq_data *data, unsigned int type)
557 {
558         int offs = EINT_OFFSET(data->irq);
559         int shift;
560         u32 ctrl, mask;
561         u32 newvalue = 0;
562
563         switch (type) {
564         case IRQ_TYPE_EDGE_RISING:
565                 newvalue = S5P_IRQ_TYPE_EDGE_RISING;
566                 break;
567
568         case IRQ_TYPE_EDGE_FALLING:
569                 newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
570                 break;
571
572         case IRQ_TYPE_EDGE_BOTH:
573                 newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
574                 break;
575
576         case IRQ_TYPE_LEVEL_LOW:
577                 newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
578                 break;
579
580         case IRQ_TYPE_LEVEL_HIGH:
581                 newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
582                 break;
583
584         default:
585                 printk(KERN_ERR "No such irq type %d", type);
586                 return -EINVAL;
587         }
588
589         shift = (offs & 0x7) * 4;
590         mask = 0x7 << shift;
591
592         spin_lock(&eint_lock);
593         ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
594         ctrl &= ~mask;
595         ctrl |= newvalue << shift;
596         __raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
597         spin_unlock(&eint_lock);
598
599         switch (offs) {
600         case 0 ... 7:
601                 s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
602                 break;
603         case 8 ... 15:
604                 s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
605                 break;
606         case 16 ... 23:
607                 s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
608                 break;
609         case 24 ... 31:
610                 s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
611                 break;
612         default:
613                 printk(KERN_ERR "No such irq number %d", offs);
614         }
615
616         return 0;
617 }
618
619 static struct irq_chip exynos4_irq_eint = {
620         .name           = "exynos4-eint",
621         .irq_mask       = exynos4_irq_eint_mask,
622         .irq_unmask     = exynos4_irq_eint_unmask,
623         .irq_mask_ack   = exynos4_irq_eint_maskack,
624         .irq_ack        = exynos4_irq_eint_ack,
625         .irq_set_type   = exynos4_irq_eint_set_type,
626 #ifdef CONFIG_PM
627         .irq_set_wake   = s3c_irqext_wake,
628 #endif
629 };
630
631 /*
632  * exynos4_irq_demux_eint
633  *
634  * This function demuxes the IRQ from from EINTs 16 to 31.
635  * It is designed to be inlined into the specific handler
636  * s5p_irq_demux_eintX_Y.
637  *
638  * Each EINT pend/mask registers handle eight of them.
639  */
640 static inline void exynos4_irq_demux_eint(unsigned int start)
641 {
642         unsigned int irq;
643
644         u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
645         u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
646
647         status &= ~mask;
648         status &= 0xff;
649
650         while (status) {
651                 irq = fls(status) - 1;
652                 generic_handle_irq(irq + start);
653                 status &= ~(1 << irq);
654         }
655 }
656
657 static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
658 {
659         struct irq_chip *chip = irq_get_chip(irq);
660         chained_irq_enter(chip, desc);
661         exynos4_irq_demux_eint(IRQ_EINT(16));
662         exynos4_irq_demux_eint(IRQ_EINT(24));
663         chained_irq_exit(chip, desc);
664 }
665
666 static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
667 {
668         u32 *irq_data = irq_get_handler_data(irq);
669         struct irq_chip *chip = irq_get_chip(irq);
670
671         chained_irq_enter(chip, desc);
672         chip->irq_mask(&desc->irq_data);
673
674         if (chip->irq_ack)
675                 chip->irq_ack(&desc->irq_data);
676
677         generic_handle_irq(*irq_data);
678
679         chip->irq_unmask(&desc->irq_data);
680         chained_irq_exit(chip, desc);
681 }
682
683 static int __init exynos4_init_irq_eint(void)
684 {
685         int irq;
686
687         for (irq = 0 ; irq <= 31 ; irq++) {
688                 irq_set_chip_and_handler(IRQ_EINT(irq), &exynos4_irq_eint,
689                                          handle_level_irq);
690                 set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
691         }
692
693         irq_set_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31);
694
695         for (irq = 0 ; irq <= 15 ; irq++) {
696                 eint0_15_data[irq] = IRQ_EINT(irq);
697
698                 irq_set_handler_data(exynos4_get_irq_nr(irq),
699                                      &eint0_15_data[irq]);
700                 irq_set_chained_handler(exynos4_get_irq_nr(irq),
701                                         exynos4_irq_eint0_15);
702         }
703
704         return 0;
705 }
706 arch_initcall(exynos4_init_irq_eint);