]> git.openfabrics.org - ~shefty/rdma-dev.git/blob - arch/arm/plat-omap/dma.c
omap: dma: Support for prefetch in destination synchronizedtransfer
[~shefty/rdma-dev.git] / arch / arm / plat-omap / dma.c
1 /*
2  * linux/arch/arm/plat-omap/dma.c
3  *
4  * Copyright (C) 2003 - 2008 Nokia Corporation
5  * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7  * Graphics DMA and LCD DMA graphics tranformations
8  * by Imre Deak <imre.deak@nokia.com>
9  * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10  * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
11  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
12  *
13  * Copyright (C) 2009 Texas Instruments
14  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
15  *
16  * Support functions for the OMAP internal DMA channels.
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License version 2 as
20  * published by the Free Software Foundation.
21  *
22  */
23
24 #include <linux/module.h>
25 #include <linux/init.h>
26 #include <linux/sched.h>
27 #include <linux/spinlock.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/io.h>
32 #include <linux/slab.h>
33
34 #include <asm/system.h>
35 #include <mach/hardware.h>
36 #include <plat/dma.h>
37
38 #include <plat/tc.h>
39
40 #undef DEBUG
41
42 #ifndef CONFIG_ARCH_OMAP1
43 enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
44         DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
45 };
46
47 enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
48 #endif
49
50 #define OMAP_DMA_ACTIVE                 0x01
51 #define OMAP2_DMA_CSR_CLEAR_MASK        0xffe
52
53 #define OMAP_FUNC_MUX_ARM_BASE          (0xfffe1000 + 0xec)
54
55 static int enable_1510_mode;
56
57 static struct omap_dma_global_context_registers {
58         u32 dma_irqenable_l0;
59         u32 dma_ocp_sysconfig;
60         u32 dma_gcr;
61 } omap_dma_global_context;
62
63 struct omap_dma_lch {
64         int next_lch;
65         int dev_id;
66         u16 saved_csr;
67         u16 enabled_irqs;
68         const char *dev_name;
69         void (*callback)(int lch, u16 ch_status, void *data);
70         void *data;
71
72 #ifndef CONFIG_ARCH_OMAP1
73         /* required for Dynamic chaining */
74         int prev_linked_ch;
75         int next_linked_ch;
76         int state;
77         int chain_id;
78
79         int status;
80 #endif
81         long flags;
82 };
83
84 struct dma_link_info {
85         int *linked_dmach_q;
86         int no_of_lchs_linked;
87
88         int q_count;
89         int q_tail;
90         int q_head;
91
92         int chain_state;
93         int chain_mode;
94
95 };
96
97 static struct dma_link_info *dma_linked_lch;
98
99 #ifndef CONFIG_ARCH_OMAP1
100
101 /* Chain handling macros */
102 #define OMAP_DMA_CHAIN_QINIT(chain_id)                                  \
103         do {                                                            \
104                 dma_linked_lch[chain_id].q_head =                       \
105                 dma_linked_lch[chain_id].q_tail =                       \
106                 dma_linked_lch[chain_id].q_count = 0;                   \
107         } while (0)
108 #define OMAP_DMA_CHAIN_QFULL(chain_id)                                  \
109                 (dma_linked_lch[chain_id].no_of_lchs_linked ==          \
110                 dma_linked_lch[chain_id].q_count)
111 #define OMAP_DMA_CHAIN_QLAST(chain_id)                                  \
112         do {                                                            \
113                 ((dma_linked_lch[chain_id].no_of_lchs_linked-1) ==      \
114                 dma_linked_lch[chain_id].q_count)                       \
115         } while (0)
116 #define OMAP_DMA_CHAIN_QEMPTY(chain_id)                                 \
117                 (0 == dma_linked_lch[chain_id].q_count)
118 #define __OMAP_DMA_CHAIN_INCQ(end)                                      \
119         ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
120 #define OMAP_DMA_CHAIN_INCQHEAD(chain_id)                               \
121         do {                                                            \
122                 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
123                 dma_linked_lch[chain_id].q_count--;                     \
124         } while (0)
125
126 #define OMAP_DMA_CHAIN_INCQTAIL(chain_id)                               \
127         do {                                                            \
128                 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
129                 dma_linked_lch[chain_id].q_count++; \
130         } while (0)
131 #endif
132
133 static int dma_lch_count;
134 static int dma_chan_count;
135 static int omap_dma_reserve_channels;
136
137 static spinlock_t dma_chan_lock;
138 static struct omap_dma_lch *dma_chan;
139 static void __iomem *omap_dma_base;
140
141 static const u8 omap1_dma_irq[OMAP1_LOGICAL_DMA_CH_COUNT] = {
142         INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
143         INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
144         INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
145         INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
146         INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
147 };
148
149 static inline void disable_lnk(int lch);
150 static void omap_disable_channel_irq(int lch);
151 static inline void omap_enable_channel_irq(int lch);
152
153 #define REVISIT_24XX()          printk(KERN_ERR "FIXME: no %s on 24xx\n", \
154                                                 __func__);
155
156 #define dma_read(reg)                                                   \
157 ({                                                                      \
158         u32 __val;                                                      \
159         if (cpu_class_is_omap1())                                       \
160                 __val = __raw_readw(omap_dma_base + OMAP1_DMA_##reg);   \
161         else                                                            \
162                 __val = __raw_readl(omap_dma_base + OMAP_DMA4_##reg);   \
163         __val;                                                          \
164 })
165
166 #define dma_write(val, reg)                                             \
167 ({                                                                      \
168         if (cpu_class_is_omap1())                                       \
169                 __raw_writew((u16)(val), omap_dma_base + OMAP1_DMA_##reg); \
170         else                                                            \
171                 __raw_writel((val), omap_dma_base + OMAP_DMA4_##reg);   \
172 })
173
174 #ifdef CONFIG_ARCH_OMAP15XX
175 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
176 int omap_dma_in_1510_mode(void)
177 {
178         return enable_1510_mode;
179 }
180 #else
181 #define omap_dma_in_1510_mode()         0
182 #endif
183
184 #ifdef CONFIG_ARCH_OMAP1
185 static inline int get_gdma_dev(int req)
186 {
187         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
188         int shift = ((req - 1) % 5) * 6;
189
190         return ((omap_readl(reg) >> shift) & 0x3f) + 1;
191 }
192
193 static inline void set_gdma_dev(int req, int dev)
194 {
195         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
196         int shift = ((req - 1) % 5) * 6;
197         u32 l;
198
199         l = omap_readl(reg);
200         l &= ~(0x3f << shift);
201         l |= (dev - 1) << shift;
202         omap_writel(l, reg);
203 }
204 #else
205 #define set_gdma_dev(req, dev)  do {} while (0)
206 #endif
207
208 /* Omap1 only */
209 static void clear_lch_regs(int lch)
210 {
211         int i;
212         void __iomem *lch_base = omap_dma_base + OMAP1_DMA_CH_BASE(lch);
213
214         for (i = 0; i < 0x2c; i += 2)
215                 __raw_writew(0, lch_base + i);
216 }
217
218 void omap_set_dma_priority(int lch, int dst_port, int priority)
219 {
220         unsigned long reg;
221         u32 l;
222
223         if (cpu_class_is_omap1()) {
224                 switch (dst_port) {
225                 case OMAP_DMA_PORT_OCP_T1:      /* FFFECC00 */
226                         reg = OMAP_TC_OCPT1_PRIOR;
227                         break;
228                 case OMAP_DMA_PORT_OCP_T2:      /* FFFECCD0 */
229                         reg = OMAP_TC_OCPT2_PRIOR;
230                         break;
231                 case OMAP_DMA_PORT_EMIFF:       /* FFFECC08 */
232                         reg = OMAP_TC_EMIFF_PRIOR;
233                         break;
234                 case OMAP_DMA_PORT_EMIFS:       /* FFFECC04 */
235                         reg = OMAP_TC_EMIFS_PRIOR;
236                         break;
237                 default:
238                         BUG();
239                         return;
240                 }
241                 l = omap_readl(reg);
242                 l &= ~(0xf << 8);
243                 l |= (priority & 0xf) << 8;
244                 omap_writel(l, reg);
245         }
246
247         if (cpu_class_is_omap2()) {
248                 u32 ccr;
249
250                 ccr = dma_read(CCR(lch));
251                 if (priority)
252                         ccr |= (1 << 6);
253                 else
254                         ccr &= ~(1 << 6);
255                 dma_write(ccr, CCR(lch));
256         }
257 }
258 EXPORT_SYMBOL(omap_set_dma_priority);
259
260 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
261                                   int frame_count, int sync_mode,
262                                   int dma_trigger, int src_or_dst_synch)
263 {
264         u32 l;
265
266         l = dma_read(CSDP(lch));
267         l &= ~0x03;
268         l |= data_type;
269         dma_write(l, CSDP(lch));
270
271         if (cpu_class_is_omap1()) {
272                 u16 ccr;
273
274                 ccr = dma_read(CCR(lch));
275                 ccr &= ~(1 << 5);
276                 if (sync_mode == OMAP_DMA_SYNC_FRAME)
277                         ccr |= 1 << 5;
278                 dma_write(ccr, CCR(lch));
279
280                 ccr = dma_read(CCR2(lch));
281                 ccr &= ~(1 << 2);
282                 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
283                         ccr |= 1 << 2;
284                 dma_write(ccr, CCR2(lch));
285         }
286
287         if (cpu_class_is_omap2() && dma_trigger) {
288                 u32 val;
289
290                 val = dma_read(CCR(lch));
291
292                 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
293                 val &= ~((1 << 23) | (3 << 19) | 0x1f);
294                 val |= (dma_trigger & ~0x1f) << 14;
295                 val |= dma_trigger & 0x1f;
296
297                 if (sync_mode & OMAP_DMA_SYNC_FRAME)
298                         val |= 1 << 5;
299                 else
300                         val &= ~(1 << 5);
301
302                 if (sync_mode & OMAP_DMA_SYNC_BLOCK)
303                         val |= 1 << 18;
304                 else
305                         val &= ~(1 << 18);
306
307                 if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
308                         val &= ~(1 << 24);      /* dest synch */
309                         val |= (1 << 23);       /* Prefetch */
310                 } else if (src_or_dst_synch) {
311                         val |= 1 << 24;         /* source synch */
312                 } else {
313                         val &= ~(1 << 24);      /* dest synch */
314                 }
315                 dma_write(val, CCR(lch));
316         }
317
318         dma_write(elem_count, CEN(lch));
319         dma_write(frame_count, CFN(lch));
320 }
321 EXPORT_SYMBOL(omap_set_dma_transfer_params);
322
323 void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
324 {
325         BUG_ON(omap_dma_in_1510_mode());
326
327         if (cpu_class_is_omap1()) {
328                 u16 w;
329
330                 w = dma_read(CCR2(lch));
331                 w &= ~0x03;
332
333                 switch (mode) {
334                 case OMAP_DMA_CONSTANT_FILL:
335                         w |= 0x01;
336                         break;
337                 case OMAP_DMA_TRANSPARENT_COPY:
338                         w |= 0x02;
339                         break;
340                 case OMAP_DMA_COLOR_DIS:
341                         break;
342                 default:
343                         BUG();
344                 }
345                 dma_write(w, CCR2(lch));
346
347                 w = dma_read(LCH_CTRL(lch));
348                 w &= ~0x0f;
349                 /* Default is channel type 2D */
350                 if (mode) {
351                         dma_write((u16)color, COLOR_L(lch));
352                         dma_write((u16)(color >> 16), COLOR_U(lch));
353                         w |= 1;         /* Channel type G */
354                 }
355                 dma_write(w, LCH_CTRL(lch));
356         }
357
358         if (cpu_class_is_omap2()) {
359                 u32 val;
360
361                 val = dma_read(CCR(lch));
362                 val &= ~((1 << 17) | (1 << 16));
363
364                 switch (mode) {
365                 case OMAP_DMA_CONSTANT_FILL:
366                         val |= 1 << 16;
367                         break;
368                 case OMAP_DMA_TRANSPARENT_COPY:
369                         val |= 1 << 17;
370                         break;
371                 case OMAP_DMA_COLOR_DIS:
372                         break;
373                 default:
374                         BUG();
375                 }
376                 dma_write(val, CCR(lch));
377
378                 color &= 0xffffff;
379                 dma_write(color, COLOR(lch));
380         }
381 }
382 EXPORT_SYMBOL(omap_set_dma_color_mode);
383
384 void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
385 {
386         if (cpu_class_is_omap2()) {
387                 u32 csdp;
388
389                 csdp = dma_read(CSDP(lch));
390                 csdp &= ~(0x3 << 16);
391                 csdp |= (mode << 16);
392                 dma_write(csdp, CSDP(lch));
393         }
394 }
395 EXPORT_SYMBOL(omap_set_dma_write_mode);
396
397 void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
398 {
399         if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
400                 u32 l;
401
402                 l = dma_read(LCH_CTRL(lch));
403                 l &= ~0x7;
404                 l |= mode;
405                 dma_write(l, LCH_CTRL(lch));
406         }
407 }
408 EXPORT_SYMBOL(omap_set_dma_channel_mode);
409
410 /* Note that src_port is only for omap1 */
411 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
412                              unsigned long src_start,
413                              int src_ei, int src_fi)
414 {
415         u32 l;
416
417         if (cpu_class_is_omap1()) {
418                 u16 w;
419
420                 w = dma_read(CSDP(lch));
421                 w &= ~(0x1f << 2);
422                 w |= src_port << 2;
423                 dma_write(w, CSDP(lch));
424         }
425
426         l = dma_read(CCR(lch));
427         l &= ~(0x03 << 12);
428         l |= src_amode << 12;
429         dma_write(l, CCR(lch));
430
431         if (cpu_class_is_omap1()) {
432                 dma_write(src_start >> 16, CSSA_U(lch));
433                 dma_write((u16)src_start, CSSA_L(lch));
434         }
435
436         if (cpu_class_is_omap2())
437                 dma_write(src_start, CSSA(lch));
438
439         dma_write(src_ei, CSEI(lch));
440         dma_write(src_fi, CSFI(lch));
441 }
442 EXPORT_SYMBOL(omap_set_dma_src_params);
443
444 void omap_set_dma_params(int lch, struct omap_dma_channel_params *params)
445 {
446         omap_set_dma_transfer_params(lch, params->data_type,
447                                      params->elem_count, params->frame_count,
448                                      params->sync_mode, params->trigger,
449                                      params->src_or_dst_synch);
450         omap_set_dma_src_params(lch, params->src_port,
451                                 params->src_amode, params->src_start,
452                                 params->src_ei, params->src_fi);
453
454         omap_set_dma_dest_params(lch, params->dst_port,
455                                  params->dst_amode, params->dst_start,
456                                  params->dst_ei, params->dst_fi);
457         if (params->read_prio || params->write_prio)
458                 omap_dma_set_prio_lch(lch, params->read_prio,
459                                       params->write_prio);
460 }
461 EXPORT_SYMBOL(omap_set_dma_params);
462
463 void omap_set_dma_src_index(int lch, int eidx, int fidx)
464 {
465         if (cpu_class_is_omap2())
466                 return;
467
468         dma_write(eidx, CSEI(lch));
469         dma_write(fidx, CSFI(lch));
470 }
471 EXPORT_SYMBOL(omap_set_dma_src_index);
472
473 void omap_set_dma_src_data_pack(int lch, int enable)
474 {
475         u32 l;
476
477         l = dma_read(CSDP(lch));
478         l &= ~(1 << 6);
479         if (enable)
480                 l |= (1 << 6);
481         dma_write(l, CSDP(lch));
482 }
483 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
484
485 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
486 {
487         unsigned int burst = 0;
488         u32 l;
489
490         l = dma_read(CSDP(lch));
491         l &= ~(0x03 << 7);
492
493         switch (burst_mode) {
494         case OMAP_DMA_DATA_BURST_DIS:
495                 break;
496         case OMAP_DMA_DATA_BURST_4:
497                 if (cpu_class_is_omap2())
498                         burst = 0x1;
499                 else
500                         burst = 0x2;
501                 break;
502         case OMAP_DMA_DATA_BURST_8:
503                 if (cpu_class_is_omap2()) {
504                         burst = 0x2;
505                         break;
506                 }
507                 /*
508                  * not supported by current hardware on OMAP1
509                  * w |= (0x03 << 7);
510                  * fall through
511                  */
512         case OMAP_DMA_DATA_BURST_16:
513                 if (cpu_class_is_omap2()) {
514                         burst = 0x3;
515                         break;
516                 }
517                 /*
518                  * OMAP1 don't support burst 16
519                  * fall through
520                  */
521         default:
522                 BUG();
523         }
524
525         l |= (burst << 7);
526         dma_write(l, CSDP(lch));
527 }
528 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
529
530 /* Note that dest_port is only for OMAP1 */
531 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
532                               unsigned long dest_start,
533                               int dst_ei, int dst_fi)
534 {
535         u32 l;
536
537         if (cpu_class_is_omap1()) {
538                 l = dma_read(CSDP(lch));
539                 l &= ~(0x1f << 9);
540                 l |= dest_port << 9;
541                 dma_write(l, CSDP(lch));
542         }
543
544         l = dma_read(CCR(lch));
545         l &= ~(0x03 << 14);
546         l |= dest_amode << 14;
547         dma_write(l, CCR(lch));
548
549         if (cpu_class_is_omap1()) {
550                 dma_write(dest_start >> 16, CDSA_U(lch));
551                 dma_write(dest_start, CDSA_L(lch));
552         }
553
554         if (cpu_class_is_omap2())
555                 dma_write(dest_start, CDSA(lch));
556
557         dma_write(dst_ei, CDEI(lch));
558         dma_write(dst_fi, CDFI(lch));
559 }
560 EXPORT_SYMBOL(omap_set_dma_dest_params);
561
562 void omap_set_dma_dest_index(int lch, int eidx, int fidx)
563 {
564         if (cpu_class_is_omap2())
565                 return;
566
567         dma_write(eidx, CDEI(lch));
568         dma_write(fidx, CDFI(lch));
569 }
570 EXPORT_SYMBOL(omap_set_dma_dest_index);
571
572 void omap_set_dma_dest_data_pack(int lch, int enable)
573 {
574         u32 l;
575
576         l = dma_read(CSDP(lch));
577         l &= ~(1 << 13);
578         if (enable)
579                 l |= 1 << 13;
580         dma_write(l, CSDP(lch));
581 }
582 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
583
584 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
585 {
586         unsigned int burst = 0;
587         u32 l;
588
589         l = dma_read(CSDP(lch));
590         l &= ~(0x03 << 14);
591
592         switch (burst_mode) {
593         case OMAP_DMA_DATA_BURST_DIS:
594                 break;
595         case OMAP_DMA_DATA_BURST_4:
596                 if (cpu_class_is_omap2())
597                         burst = 0x1;
598                 else
599                         burst = 0x2;
600                 break;
601         case OMAP_DMA_DATA_BURST_8:
602                 if (cpu_class_is_omap2())
603                         burst = 0x2;
604                 else
605                         burst = 0x3;
606                 break;
607         case OMAP_DMA_DATA_BURST_16:
608                 if (cpu_class_is_omap2()) {
609                         burst = 0x3;
610                         break;
611                 }
612                 /*
613                  * OMAP1 don't support burst 16
614                  * fall through
615                  */
616         default:
617                 printk(KERN_ERR "Invalid DMA burst mode\n");
618                 BUG();
619                 return;
620         }
621         l |= (burst << 14);
622         dma_write(l, CSDP(lch));
623 }
624 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
625
626 static inline void omap_enable_channel_irq(int lch)
627 {
628         u32 status;
629
630         /* Clear CSR */
631         if (cpu_class_is_omap1())
632                 status = dma_read(CSR(lch));
633         else if (cpu_class_is_omap2())
634                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
635
636         /* Enable some nice interrupts. */
637         dma_write(dma_chan[lch].enabled_irqs, CICR(lch));
638 }
639
640 static void omap_disable_channel_irq(int lch)
641 {
642         if (cpu_class_is_omap2())
643                 dma_write(0, CICR(lch));
644 }
645
646 void omap_enable_dma_irq(int lch, u16 bits)
647 {
648         dma_chan[lch].enabled_irqs |= bits;
649 }
650 EXPORT_SYMBOL(omap_enable_dma_irq);
651
652 void omap_disable_dma_irq(int lch, u16 bits)
653 {
654         dma_chan[lch].enabled_irqs &= ~bits;
655 }
656 EXPORT_SYMBOL(omap_disable_dma_irq);
657
658 static inline void enable_lnk(int lch)
659 {
660         u32 l;
661
662         l = dma_read(CLNK_CTRL(lch));
663
664         if (cpu_class_is_omap1())
665                 l &= ~(1 << 14);
666
667         /* Set the ENABLE_LNK bits */
668         if (dma_chan[lch].next_lch != -1)
669                 l = dma_chan[lch].next_lch | (1 << 15);
670
671 #ifndef CONFIG_ARCH_OMAP1
672         if (cpu_class_is_omap2())
673                 if (dma_chan[lch].next_linked_ch != -1)
674                         l = dma_chan[lch].next_linked_ch | (1 << 15);
675 #endif
676
677         dma_write(l, CLNK_CTRL(lch));
678 }
679
680 static inline void disable_lnk(int lch)
681 {
682         u32 l;
683
684         l = dma_read(CLNK_CTRL(lch));
685
686         /* Disable interrupts */
687         if (cpu_class_is_omap1()) {
688                 dma_write(0, CICR(lch));
689                 /* Set the STOP_LNK bit */
690                 l |= 1 << 14;
691         }
692
693         if (cpu_class_is_omap2()) {
694                 omap_disable_channel_irq(lch);
695                 /* Clear the ENABLE_LNK bit */
696                 l &= ~(1 << 15);
697         }
698
699         dma_write(l, CLNK_CTRL(lch));
700         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
701 }
702
703 static inline void omap2_enable_irq_lch(int lch)
704 {
705         u32 val;
706         unsigned long flags;
707
708         if (!cpu_class_is_omap2())
709                 return;
710
711         spin_lock_irqsave(&dma_chan_lock, flags);
712         val = dma_read(IRQENABLE_L0);
713         val |= 1 << lch;
714         dma_write(val, IRQENABLE_L0);
715         spin_unlock_irqrestore(&dma_chan_lock, flags);
716 }
717
718 static inline void omap2_disable_irq_lch(int lch)
719 {
720         u32 val;
721         unsigned long flags;
722
723         if (!cpu_class_is_omap2())
724                 return;
725
726         spin_lock_irqsave(&dma_chan_lock, flags);
727         val = dma_read(IRQENABLE_L0);
728         val &= ~(1 << lch);
729         dma_write(val, IRQENABLE_L0);
730         spin_unlock_irqrestore(&dma_chan_lock, flags);
731 }
732
733 int omap_request_dma(int dev_id, const char *dev_name,
734                      void (*callback)(int lch, u16 ch_status, void *data),
735                      void *data, int *dma_ch_out)
736 {
737         int ch, free_ch = -1;
738         unsigned long flags;
739         struct omap_dma_lch *chan;
740
741         spin_lock_irqsave(&dma_chan_lock, flags);
742         for (ch = 0; ch < dma_chan_count; ch++) {
743                 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
744                         free_ch = ch;
745                         if (dev_id == 0)
746                                 break;
747                 }
748         }
749         if (free_ch == -1) {
750                 spin_unlock_irqrestore(&dma_chan_lock, flags);
751                 return -EBUSY;
752         }
753         chan = dma_chan + free_ch;
754         chan->dev_id = dev_id;
755
756         if (cpu_class_is_omap1())
757                 clear_lch_regs(free_ch);
758
759         if (cpu_class_is_omap2())
760                 omap_clear_dma(free_ch);
761
762         spin_unlock_irqrestore(&dma_chan_lock, flags);
763
764         chan->dev_name = dev_name;
765         chan->callback = callback;
766         chan->data = data;
767         chan->flags = 0;
768
769 #ifndef CONFIG_ARCH_OMAP1
770         if (cpu_class_is_omap2()) {
771                 chan->chain_id = -1;
772                 chan->next_linked_ch = -1;
773         }
774 #endif
775
776         chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
777
778         if (cpu_class_is_omap1())
779                 chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
780         else if (cpu_class_is_omap2())
781                 chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
782                         OMAP2_DMA_TRANS_ERR_IRQ;
783
784         if (cpu_is_omap16xx()) {
785                 /* If the sync device is set, configure it dynamically. */
786                 if (dev_id != 0) {
787                         set_gdma_dev(free_ch + 1, dev_id);
788                         dev_id = free_ch + 1;
789                 }
790                 /*
791                  * Disable the 1510 compatibility mode and set the sync device
792                  * id.
793                  */
794                 dma_write(dev_id | (1 << 10), CCR(free_ch));
795         } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
796                 dma_write(dev_id, CCR(free_ch));
797         }
798
799         if (cpu_class_is_omap2()) {
800                 omap2_enable_irq_lch(free_ch);
801                 omap_enable_channel_irq(free_ch);
802                 /* Clear the CSR register and IRQ status register */
803                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(free_ch));
804                 dma_write(1 << free_ch, IRQSTATUS_L0);
805         }
806
807         *dma_ch_out = free_ch;
808
809         return 0;
810 }
811 EXPORT_SYMBOL(omap_request_dma);
812
813 void omap_free_dma(int lch)
814 {
815         unsigned long flags;
816
817         if (dma_chan[lch].dev_id == -1) {
818                 pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
819                        lch);
820                 return;
821         }
822
823         if (cpu_class_is_omap1()) {
824                 /* Disable all DMA interrupts for the channel. */
825                 dma_write(0, CICR(lch));
826                 /* Make sure the DMA transfer is stopped. */
827                 dma_write(0, CCR(lch));
828         }
829
830         if (cpu_class_is_omap2()) {
831                 omap2_disable_irq_lch(lch);
832
833                 /* Clear the CSR register and IRQ status register */
834                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
835                 dma_write(1 << lch, IRQSTATUS_L0);
836
837                 /* Disable all DMA interrupts for the channel. */
838                 dma_write(0, CICR(lch));
839
840                 /* Make sure the DMA transfer is stopped. */
841                 dma_write(0, CCR(lch));
842                 omap_clear_dma(lch);
843         }
844
845         spin_lock_irqsave(&dma_chan_lock, flags);
846         dma_chan[lch].dev_id = -1;
847         dma_chan[lch].next_lch = -1;
848         dma_chan[lch].callback = NULL;
849         spin_unlock_irqrestore(&dma_chan_lock, flags);
850 }
851 EXPORT_SYMBOL(omap_free_dma);
852
853 /**
854  * @brief omap_dma_set_global_params : Set global priority settings for dma
855  *
856  * @param arb_rate
857  * @param max_fifo_depth
858  * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
859  *                                                 DMA_THREAD_RESERVE_ONET
860  *                                                 DMA_THREAD_RESERVE_TWOT
861  *                                                 DMA_THREAD_RESERVE_THREET
862  */
863 void
864 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
865 {
866         u32 reg;
867
868         if (!cpu_class_is_omap2()) {
869                 printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
870                 return;
871         }
872
873         if (max_fifo_depth == 0)
874                 max_fifo_depth = 1;
875         if (arb_rate == 0)
876                 arb_rate = 1;
877
878         reg = 0xff & max_fifo_depth;
879         reg |= (0x3 & tparams) << 12;
880         reg |= (arb_rate & 0xff) << 16;
881
882         dma_write(reg, GCR);
883 }
884 EXPORT_SYMBOL(omap_dma_set_global_params);
885
886 /**
887  * @brief omap_dma_set_prio_lch : Set channel wise priority settings
888  *
889  * @param lch
890  * @param read_prio - Read priority
891  * @param write_prio - Write priority
892  * Both of the above can be set with one of the following values :
893  *      DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
894  */
895 int
896 omap_dma_set_prio_lch(int lch, unsigned char read_prio,
897                       unsigned char write_prio)
898 {
899         u32 l;
900
901         if (unlikely((lch < 0 || lch >= dma_lch_count))) {
902                 printk(KERN_ERR "Invalid channel id\n");
903                 return -EINVAL;
904         }
905         l = dma_read(CCR(lch));
906         l &= ~((1 << 6) | (1 << 26));
907         if (cpu_is_omap2430() || cpu_is_omap34xx() ||  cpu_is_omap44xx())
908                 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
909         else
910                 l |= ((read_prio & 0x1) << 6);
911
912         dma_write(l, CCR(lch));
913
914         return 0;
915 }
916 EXPORT_SYMBOL(omap_dma_set_prio_lch);
917
918 /*
919  * Clears any DMA state so the DMA engine is ready to restart with new buffers
920  * through omap_start_dma(). Any buffers in flight are discarded.
921  */
922 void omap_clear_dma(int lch)
923 {
924         unsigned long flags;
925
926         local_irq_save(flags);
927
928         if (cpu_class_is_omap1()) {
929                 u32 l;
930
931                 l = dma_read(CCR(lch));
932                 l &= ~OMAP_DMA_CCR_EN;
933                 dma_write(l, CCR(lch));
934
935                 /* Clear pending interrupts */
936                 l = dma_read(CSR(lch));
937         }
938
939         if (cpu_class_is_omap2()) {
940                 int i;
941                 void __iomem *lch_base = omap_dma_base + OMAP_DMA4_CH_BASE(lch);
942                 for (i = 0; i < 0x44; i += 4)
943                         __raw_writel(0, lch_base + i);
944         }
945
946         local_irq_restore(flags);
947 }
948 EXPORT_SYMBOL(omap_clear_dma);
949
950 void omap_start_dma(int lch)
951 {
952         u32 l;
953
954         /*
955          * The CPC/CDAC register needs to be initialized to zero
956          * before starting dma transfer.
957          */
958         if (cpu_is_omap15xx())
959                 dma_write(0, CPC(lch));
960         else
961                 dma_write(0, CDAC(lch));
962
963         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
964                 int next_lch, cur_lch;
965                 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
966
967                 dma_chan_link_map[lch] = 1;
968                 /* Set the link register of the first channel */
969                 enable_lnk(lch);
970
971                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
972                 cur_lch = dma_chan[lch].next_lch;
973                 do {
974                         next_lch = dma_chan[cur_lch].next_lch;
975
976                         /* The loop case: we've been here already */
977                         if (dma_chan_link_map[cur_lch])
978                                 break;
979                         /* Mark the current channel */
980                         dma_chan_link_map[cur_lch] = 1;
981
982                         enable_lnk(cur_lch);
983                         omap_enable_channel_irq(cur_lch);
984
985                         cur_lch = next_lch;
986                 } while (next_lch != -1);
987         } else if (cpu_is_omap242x() ||
988                 (cpu_is_omap243x() &&  omap_type() <= OMAP2430_REV_ES1_0)) {
989
990                 /* Errata: Need to write lch even if not using chaining */
991                 dma_write(lch, CLNK_CTRL(lch));
992         }
993
994         omap_enable_channel_irq(lch);
995
996         l = dma_read(CCR(lch));
997
998         /*
999          * Errata: On ES2.0 BUFFERING disable must be set.
1000          * This will always fail on ES1.0
1001          */
1002         if (cpu_is_omap24xx())
1003                 l |= OMAP_DMA_CCR_EN;
1004
1005         l |= OMAP_DMA_CCR_EN;
1006         dma_write(l, CCR(lch));
1007
1008         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1009 }
1010 EXPORT_SYMBOL(omap_start_dma);
1011
1012 void omap_stop_dma(int lch)
1013 {
1014         u32 l;
1015
1016         /* Disable all interrupts on the channel */
1017         if (cpu_class_is_omap1())
1018                 dma_write(0, CICR(lch));
1019
1020         l = dma_read(CCR(lch));
1021         l &= ~OMAP_DMA_CCR_EN;
1022         dma_write(l, CCR(lch));
1023
1024         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1025                 int next_lch, cur_lch = lch;
1026                 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
1027
1028                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
1029                 do {
1030                         /* The loop case: we've been here already */
1031                         if (dma_chan_link_map[cur_lch])
1032                                 break;
1033                         /* Mark the current channel */
1034                         dma_chan_link_map[cur_lch] = 1;
1035
1036                         disable_lnk(cur_lch);
1037
1038                         next_lch = dma_chan[cur_lch].next_lch;
1039                         cur_lch = next_lch;
1040                 } while (next_lch != -1);
1041         }
1042
1043         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
1044 }
1045 EXPORT_SYMBOL(omap_stop_dma);
1046
1047 /*
1048  * Allows changing the DMA callback function or data. This may be needed if
1049  * the driver shares a single DMA channel for multiple dma triggers.
1050  */
1051 int omap_set_dma_callback(int lch,
1052                           void (*callback)(int lch, u16 ch_status, void *data),
1053                           void *data)
1054 {
1055         unsigned long flags;
1056
1057         if (lch < 0)
1058                 return -ENODEV;
1059
1060         spin_lock_irqsave(&dma_chan_lock, flags);
1061         if (dma_chan[lch].dev_id == -1) {
1062                 printk(KERN_ERR "DMA callback for not set for free channel\n");
1063                 spin_unlock_irqrestore(&dma_chan_lock, flags);
1064                 return -EINVAL;
1065         }
1066         dma_chan[lch].callback = callback;
1067         dma_chan[lch].data = data;
1068         spin_unlock_irqrestore(&dma_chan_lock, flags);
1069
1070         return 0;
1071 }
1072 EXPORT_SYMBOL(omap_set_dma_callback);
1073
1074 /*
1075  * Returns current physical source address for the given DMA channel.
1076  * If the channel is running the caller must disable interrupts prior calling
1077  * this function and process the returned value before re-enabling interrupt to
1078  * prevent races with the interrupt handler. Note that in continuous mode there
1079  * is a chance for CSSA_L register overflow inbetween the two reads resulting
1080  * in incorrect return value.
1081  */
1082 dma_addr_t omap_get_dma_src_pos(int lch)
1083 {
1084         dma_addr_t offset = 0;
1085
1086         if (cpu_is_omap15xx())
1087                 offset = dma_read(CPC(lch));
1088         else
1089                 offset = dma_read(CSAC(lch));
1090
1091         /*
1092          * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1093          * read before the DMA controller finished disabling the channel.
1094          */
1095         if (!cpu_is_omap15xx() && offset == 0)
1096                 offset = dma_read(CSAC(lch));
1097
1098         if (cpu_class_is_omap1())
1099                 offset |= (dma_read(CSSA_U(lch)) << 16);
1100
1101         return offset;
1102 }
1103 EXPORT_SYMBOL(omap_get_dma_src_pos);
1104
1105 /*
1106  * Returns current physical destination address for the given DMA channel.
1107  * If the channel is running the caller must disable interrupts prior calling
1108  * this function and process the returned value before re-enabling interrupt to
1109  * prevent races with the interrupt handler. Note that in continuous mode there
1110  * is a chance for CDSA_L register overflow inbetween the two reads resulting
1111  * in incorrect return value.
1112  */
1113 dma_addr_t omap_get_dma_dst_pos(int lch)
1114 {
1115         dma_addr_t offset = 0;
1116
1117         if (cpu_is_omap15xx())
1118                 offset = dma_read(CPC(lch));
1119         else
1120                 offset = dma_read(CDAC(lch));
1121
1122         /*
1123          * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1124          * read before the DMA controller finished disabling the channel.
1125          */
1126         if (!cpu_is_omap15xx() && offset == 0)
1127                 offset = dma_read(CDAC(lch));
1128
1129         if (cpu_class_is_omap1())
1130                 offset |= (dma_read(CDSA_U(lch)) << 16);
1131
1132         return offset;
1133 }
1134 EXPORT_SYMBOL(omap_get_dma_dst_pos);
1135
1136 int omap_get_dma_active_status(int lch)
1137 {
1138         return (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN) != 0;
1139 }
1140 EXPORT_SYMBOL(omap_get_dma_active_status);
1141
1142 int omap_dma_running(void)
1143 {
1144         int lch;
1145
1146         if (cpu_class_is_omap1())
1147                 if (omap_lcd_dma_running())
1148                         return 1;
1149
1150         for (lch = 0; lch < dma_chan_count; lch++)
1151                 if (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN)
1152                         return 1;
1153
1154         return 0;
1155 }
1156
1157 /*
1158  * lch_queue DMA will start right after lch_head one is finished.
1159  * For this DMA link to start, you still need to start (see omap_start_dma)
1160  * the first one. That will fire up the entire queue.
1161  */
1162 void omap_dma_link_lch(int lch_head, int lch_queue)
1163 {
1164         if (omap_dma_in_1510_mode()) {
1165                 if (lch_head == lch_queue) {
1166                         dma_write(dma_read(CCR(lch_head)) | (3 << 8),
1167                                                                 CCR(lch_head));
1168                         return;
1169                 }
1170                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1171                 BUG();
1172                 return;
1173         }
1174
1175         if ((dma_chan[lch_head].dev_id == -1) ||
1176             (dma_chan[lch_queue].dev_id == -1)) {
1177                 printk(KERN_ERR "omap_dma: trying to link "
1178                        "non requested channels\n");
1179                 dump_stack();
1180         }
1181
1182         dma_chan[lch_head].next_lch = lch_queue;
1183 }
1184 EXPORT_SYMBOL(omap_dma_link_lch);
1185
1186 /*
1187  * Once the DMA queue is stopped, we can destroy it.
1188  */
1189 void omap_dma_unlink_lch(int lch_head, int lch_queue)
1190 {
1191         if (omap_dma_in_1510_mode()) {
1192                 if (lch_head == lch_queue) {
1193                         dma_write(dma_read(CCR(lch_head)) & ~(3 << 8),
1194                                                                 CCR(lch_head));
1195                         return;
1196                 }
1197                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1198                 BUG();
1199                 return;
1200         }
1201
1202         if (dma_chan[lch_head].next_lch != lch_queue ||
1203             dma_chan[lch_head].next_lch == -1) {
1204                 printk(KERN_ERR "omap_dma: trying to unlink "
1205                        "non linked channels\n");
1206                 dump_stack();
1207         }
1208
1209         if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
1210             (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) {
1211                 printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
1212                        "before unlinking\n");
1213                 dump_stack();
1214         }
1215
1216         dma_chan[lch_head].next_lch = -1;
1217 }
1218 EXPORT_SYMBOL(omap_dma_unlink_lch);
1219
1220 /*----------------------------------------------------------------------------*/
1221
1222 #ifndef CONFIG_ARCH_OMAP1
1223 /* Create chain of DMA channesls */
1224 static void create_dma_lch_chain(int lch_head, int lch_queue)
1225 {
1226         u32 l;
1227
1228         /* Check if this is the first link in chain */
1229         if (dma_chan[lch_head].next_linked_ch == -1) {
1230                 dma_chan[lch_head].next_linked_ch = lch_queue;
1231                 dma_chan[lch_head].prev_linked_ch = lch_queue;
1232                 dma_chan[lch_queue].next_linked_ch = lch_head;
1233                 dma_chan[lch_queue].prev_linked_ch = lch_head;
1234         }
1235
1236         /* a link exists, link the new channel in circular chain */
1237         else {
1238                 dma_chan[lch_queue].next_linked_ch =
1239                                         dma_chan[lch_head].next_linked_ch;
1240                 dma_chan[lch_queue].prev_linked_ch = lch_head;
1241                 dma_chan[lch_head].next_linked_ch = lch_queue;
1242                 dma_chan[dma_chan[lch_queue].next_linked_ch].prev_linked_ch =
1243                                         lch_queue;
1244         }
1245
1246         l = dma_read(CLNK_CTRL(lch_head));
1247         l &= ~(0x1f);
1248         l |= lch_queue;
1249         dma_write(l, CLNK_CTRL(lch_head));
1250
1251         l = dma_read(CLNK_CTRL(lch_queue));
1252         l &= ~(0x1f);
1253         l |= (dma_chan[lch_queue].next_linked_ch);
1254         dma_write(l, CLNK_CTRL(lch_queue));
1255 }
1256
1257 /**
1258  * @brief omap_request_dma_chain : Request a chain of DMA channels
1259  *
1260  * @param dev_id - Device id using the dma channel
1261  * @param dev_name - Device name
1262  * @param callback - Call back function
1263  * @chain_id -
1264  * @no_of_chans - Number of channels requested
1265  * @chain_mode - Dynamic or static chaining : OMAP_DMA_STATIC_CHAIN
1266  *                                            OMAP_DMA_DYNAMIC_CHAIN
1267  * @params - Channel parameters
1268  *
1269  * @return - Success : 0
1270  *           Failure: -EINVAL/-ENOMEM
1271  */
1272 int omap_request_dma_chain(int dev_id, const char *dev_name,
1273                            void (*callback) (int lch, u16 ch_status,
1274                                              void *data),
1275                            int *chain_id, int no_of_chans, int chain_mode,
1276                            struct omap_dma_channel_params params)
1277 {
1278         int *channels;
1279         int i, err;
1280
1281         /* Is the chain mode valid ? */
1282         if (chain_mode != OMAP_DMA_STATIC_CHAIN
1283                         && chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
1284                 printk(KERN_ERR "Invalid chain mode requested\n");
1285                 return -EINVAL;
1286         }
1287
1288         if (unlikely((no_of_chans < 1
1289                         || no_of_chans > dma_lch_count))) {
1290                 printk(KERN_ERR "Invalid Number of channels requested\n");
1291                 return -EINVAL;
1292         }
1293
1294         /*
1295          * Allocate a queue to maintain the status of the channels
1296          * in the chain
1297          */
1298         channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
1299         if (channels == NULL) {
1300                 printk(KERN_ERR "omap_dma: No memory for channel queue\n");
1301                 return -ENOMEM;
1302         }
1303
1304         /* request and reserve DMA channels for the chain */
1305         for (i = 0; i < no_of_chans; i++) {
1306                 err = omap_request_dma(dev_id, dev_name,
1307                                         callback, NULL, &channels[i]);
1308                 if (err < 0) {
1309                         int j;
1310                         for (j = 0; j < i; j++)
1311                                 omap_free_dma(channels[j]);
1312                         kfree(channels);
1313                         printk(KERN_ERR "omap_dma: Request failed %d\n", err);
1314                         return err;
1315                 }
1316                 dma_chan[channels[i]].prev_linked_ch = -1;
1317                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1318
1319                 /*
1320                  * Allowing client drivers to set common parameters now,
1321                  * so that later only relevant (src_start, dest_start
1322                  * and element count) can be set
1323                  */
1324                 omap_set_dma_params(channels[i], &params);
1325         }
1326
1327         *chain_id = channels[0];
1328         dma_linked_lch[*chain_id].linked_dmach_q = channels;
1329         dma_linked_lch[*chain_id].chain_mode = chain_mode;
1330         dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1331         dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
1332
1333         for (i = 0; i < no_of_chans; i++)
1334                 dma_chan[channels[i]].chain_id = *chain_id;
1335
1336         /* Reset the Queue pointers */
1337         OMAP_DMA_CHAIN_QINIT(*chain_id);
1338
1339         /* Set up the chain */
1340         if (no_of_chans == 1)
1341                 create_dma_lch_chain(channels[0], channels[0]);
1342         else {
1343                 for (i = 0; i < (no_of_chans - 1); i++)
1344                         create_dma_lch_chain(channels[i], channels[i + 1]);
1345         }
1346
1347         return 0;
1348 }
1349 EXPORT_SYMBOL(omap_request_dma_chain);
1350
1351 /**
1352  * @brief omap_modify_dma_chain_param : Modify the chain's params - Modify the
1353  * params after setting it. Dont do this while dma is running!!
1354  *
1355  * @param chain_id - Chained logical channel id.
1356  * @param params
1357  *
1358  * @return - Success : 0
1359  *           Failure : -EINVAL
1360  */
1361 int omap_modify_dma_chain_params(int chain_id,
1362                                 struct omap_dma_channel_params params)
1363 {
1364         int *channels;
1365         u32 i;
1366
1367         /* Check for input params */
1368         if (unlikely((chain_id < 0
1369                         || chain_id >= dma_lch_count))) {
1370                 printk(KERN_ERR "Invalid chain id\n");
1371                 return -EINVAL;
1372         }
1373
1374         /* Check if the chain exists */
1375         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1376                 printk(KERN_ERR "Chain doesn't exists\n");
1377                 return -EINVAL;
1378         }
1379         channels = dma_linked_lch[chain_id].linked_dmach_q;
1380
1381         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1382                 /*
1383                  * Allowing client drivers to set common parameters now,
1384                  * so that later only relevant (src_start, dest_start
1385                  * and element count) can be set
1386                  */
1387                 omap_set_dma_params(channels[i], &params);
1388         }
1389
1390         return 0;
1391 }
1392 EXPORT_SYMBOL(omap_modify_dma_chain_params);
1393
1394 /**
1395  * @brief omap_free_dma_chain - Free all the logical channels in a chain.
1396  *
1397  * @param chain_id
1398  *
1399  * @return - Success : 0
1400  *           Failure : -EINVAL
1401  */
1402 int omap_free_dma_chain(int chain_id)
1403 {
1404         int *channels;
1405         u32 i;
1406
1407         /* Check for input params */
1408         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1409                 printk(KERN_ERR "Invalid chain id\n");
1410                 return -EINVAL;
1411         }
1412
1413         /* Check if the chain exists */
1414         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1415                 printk(KERN_ERR "Chain doesn't exists\n");
1416                 return -EINVAL;
1417         }
1418
1419         channels = dma_linked_lch[chain_id].linked_dmach_q;
1420         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1421                 dma_chan[channels[i]].next_linked_ch = -1;
1422                 dma_chan[channels[i]].prev_linked_ch = -1;
1423                 dma_chan[channels[i]].chain_id = -1;
1424                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1425                 omap_free_dma(channels[i]);
1426         }
1427
1428         kfree(channels);
1429
1430         dma_linked_lch[chain_id].linked_dmach_q = NULL;
1431         dma_linked_lch[chain_id].chain_mode = -1;
1432         dma_linked_lch[chain_id].chain_state = -1;
1433
1434         return (0);
1435 }
1436 EXPORT_SYMBOL(omap_free_dma_chain);
1437
1438 /**
1439  * @brief omap_dma_chain_status - Check if the chain is in
1440  * active / inactive state.
1441  * @param chain_id
1442  *
1443  * @return - Success : OMAP_DMA_CHAIN_ACTIVE/OMAP_DMA_CHAIN_INACTIVE
1444  *           Failure : -EINVAL
1445  */
1446 int omap_dma_chain_status(int chain_id)
1447 {
1448         /* Check for input params */
1449         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1450                 printk(KERN_ERR "Invalid chain id\n");
1451                 return -EINVAL;
1452         }
1453
1454         /* Check if the chain exists */
1455         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1456                 printk(KERN_ERR "Chain doesn't exists\n");
1457                 return -EINVAL;
1458         }
1459         pr_debug("CHAINID=%d, qcnt=%d\n", chain_id,
1460                         dma_linked_lch[chain_id].q_count);
1461
1462         if (OMAP_DMA_CHAIN_QEMPTY(chain_id))
1463                 return OMAP_DMA_CHAIN_INACTIVE;
1464
1465         return OMAP_DMA_CHAIN_ACTIVE;
1466 }
1467 EXPORT_SYMBOL(omap_dma_chain_status);
1468
1469 /**
1470  * @brief omap_dma_chain_a_transfer - Get a free channel from a chain,
1471  * set the params and start the transfer.
1472  *
1473  * @param chain_id
1474  * @param src_start - buffer start address
1475  * @param dest_start - Dest address
1476  * @param elem_count
1477  * @param frame_count
1478  * @param callbk_data - channel callback parameter data.
1479  *
1480  * @return  - Success : 0
1481  *            Failure: -EINVAL/-EBUSY
1482  */
1483 int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1484                         int elem_count, int frame_count, void *callbk_data)
1485 {
1486         int *channels;
1487         u32 l, lch;
1488         int start_dma = 0;
1489
1490         /*
1491          * if buffer size is less than 1 then there is
1492          * no use of starting the chain
1493          */
1494         if (elem_count < 1) {
1495                 printk(KERN_ERR "Invalid buffer size\n");
1496                 return -EINVAL;
1497         }
1498
1499         /* Check for input params */
1500         if (unlikely((chain_id < 0
1501                         || chain_id >= dma_lch_count))) {
1502                 printk(KERN_ERR "Invalid chain id\n");
1503                 return -EINVAL;
1504         }
1505
1506         /* Check if the chain exists */
1507         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1508                 printk(KERN_ERR "Chain doesn't exist\n");
1509                 return -EINVAL;
1510         }
1511
1512         /* Check if all the channels in chain are in use */
1513         if (OMAP_DMA_CHAIN_QFULL(chain_id))
1514                 return -EBUSY;
1515
1516         /* Frame count may be negative in case of indexed transfers */
1517         channels = dma_linked_lch[chain_id].linked_dmach_q;
1518
1519         /* Get a free channel */
1520         lch = channels[dma_linked_lch[chain_id].q_tail];
1521
1522         /* Store the callback data */
1523         dma_chan[lch].data = callbk_data;
1524
1525         /* Increment the q_tail */
1526         OMAP_DMA_CHAIN_INCQTAIL(chain_id);
1527
1528         /* Set the params to the free channel */
1529         if (src_start != 0)
1530                 dma_write(src_start, CSSA(lch));
1531         if (dest_start != 0)
1532                 dma_write(dest_start, CDSA(lch));
1533
1534         /* Write the buffer size */
1535         dma_write(elem_count, CEN(lch));
1536         dma_write(frame_count, CFN(lch));
1537
1538         /*
1539          * If the chain is dynamically linked,
1540          * then we may have to start the chain if its not active
1541          */
1542         if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) {
1543
1544                 /*
1545                  * In Dynamic chain, if the chain is not started,
1546                  * queue the channel
1547                  */
1548                 if (dma_linked_lch[chain_id].chain_state ==
1549                                                 DMA_CHAIN_NOTSTARTED) {
1550                         /* Enable the link in previous channel */
1551                         if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1552                                                                 DMA_CH_QUEUED)
1553                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1554                         dma_chan[lch].state = DMA_CH_QUEUED;
1555                 }
1556
1557                 /*
1558                  * Chain is already started, make sure its active,
1559                  * if not then start the chain
1560                  */
1561                 else {
1562                         start_dma = 1;
1563
1564                         if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1565                                                         DMA_CH_STARTED) {
1566                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1567                                 dma_chan[lch].state = DMA_CH_QUEUED;
1568                                 start_dma = 0;
1569                                 if (0 == ((1 << 7) & dma_read(
1570                                         CCR(dma_chan[lch].prev_linked_ch)))) {
1571                                         disable_lnk(dma_chan[lch].
1572                                                     prev_linked_ch);
1573                                         pr_debug("\n prev ch is stopped\n");
1574                                         start_dma = 1;
1575                                 }
1576                         }
1577
1578                         else if (dma_chan[dma_chan[lch].prev_linked_ch].state
1579                                                         == DMA_CH_QUEUED) {
1580                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1581                                 dma_chan[lch].state = DMA_CH_QUEUED;
1582                                 start_dma = 0;
1583                         }
1584                         omap_enable_channel_irq(lch);
1585
1586                         l = dma_read(CCR(lch));
1587
1588                         if ((0 == (l & (1 << 24))))
1589                                 l &= ~(1 << 25);
1590                         else
1591                                 l |= (1 << 25);
1592                         if (start_dma == 1) {
1593                                 if (0 == (l & (1 << 7))) {
1594                                         l |= (1 << 7);
1595                                         dma_chan[lch].state = DMA_CH_STARTED;
1596                                         pr_debug("starting %d\n", lch);
1597                                         dma_write(l, CCR(lch));
1598                                 } else
1599                                         start_dma = 0;
1600                         } else {
1601                                 if (0 == (l & (1 << 7)))
1602                                         dma_write(l, CCR(lch));
1603                         }
1604                         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1605                 }
1606         }
1607
1608         return 0;
1609 }
1610 EXPORT_SYMBOL(omap_dma_chain_a_transfer);
1611
1612 /**
1613  * @brief omap_start_dma_chain_transfers - Start the chain
1614  *
1615  * @param chain_id
1616  *
1617  * @return - Success : 0
1618  *           Failure : -EINVAL/-EBUSY
1619  */
1620 int omap_start_dma_chain_transfers(int chain_id)
1621 {
1622         int *channels;
1623         u32 l, i;
1624
1625         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1626                 printk(KERN_ERR "Invalid chain id\n");
1627                 return -EINVAL;
1628         }
1629
1630         channels = dma_linked_lch[chain_id].linked_dmach_q;
1631
1632         if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) {
1633                 printk(KERN_ERR "Chain is already started\n");
1634                 return -EBUSY;
1635         }
1636
1637         if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) {
1638                 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked;
1639                                                                         i++) {
1640                         enable_lnk(channels[i]);
1641                         omap_enable_channel_irq(channels[i]);
1642                 }
1643         } else {
1644                 omap_enable_channel_irq(channels[0]);
1645         }
1646
1647         l = dma_read(CCR(channels[0]));
1648         l |= (1 << 7);
1649         dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1650         dma_chan[channels[0]].state = DMA_CH_STARTED;
1651
1652         if ((0 == (l & (1 << 24))))
1653                 l &= ~(1 << 25);
1654         else
1655                 l |= (1 << 25);
1656         dma_write(l, CCR(channels[0]));
1657
1658         dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1659
1660         return 0;
1661 }
1662 EXPORT_SYMBOL(omap_start_dma_chain_transfers);
1663
1664 /**
1665  * @brief omap_stop_dma_chain_transfers - Stop the dma transfer of a chain.
1666  *
1667  * @param chain_id
1668  *
1669  * @return - Success : 0
1670  *           Failure : EINVAL
1671  */
1672 int omap_stop_dma_chain_transfers(int chain_id)
1673 {
1674         int *channels;
1675         u32 l, i;
1676         u32 sys_cf;
1677
1678         /* Check for input params */
1679         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1680                 printk(KERN_ERR "Invalid chain id\n");
1681                 return -EINVAL;
1682         }
1683
1684         /* Check if the chain exists */
1685         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1686                 printk(KERN_ERR "Chain doesn't exists\n");
1687                 return -EINVAL;
1688         }
1689         channels = dma_linked_lch[chain_id].linked_dmach_q;
1690
1691         /*
1692          * DMA Errata:
1693          * Special programming model needed to disable DMA before end of block
1694          */
1695         sys_cf = dma_read(OCP_SYSCONFIG);
1696         l = sys_cf;
1697         /* Middle mode reg set no Standby */
1698         l &= ~((1 << 12)|(1 << 13));
1699         dma_write(l, OCP_SYSCONFIG);
1700
1701         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1702
1703                 /* Stop the Channel transmission */
1704                 l = dma_read(CCR(channels[i]));
1705                 l &= ~(1 << 7);
1706                 dma_write(l, CCR(channels[i]));
1707
1708                 /* Disable the link in all the channels */
1709                 disable_lnk(channels[i]);
1710                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1711
1712         }
1713         dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1714
1715         /* Reset the Queue pointers */
1716         OMAP_DMA_CHAIN_QINIT(chain_id);
1717
1718         /* Errata - put in the old value */
1719         dma_write(sys_cf, OCP_SYSCONFIG);
1720
1721         return 0;
1722 }
1723 EXPORT_SYMBOL(omap_stop_dma_chain_transfers);
1724
1725 /* Get the index of the ongoing DMA in chain */
1726 /**
1727  * @brief omap_get_dma_chain_index - Get the element and frame index
1728  * of the ongoing DMA in chain
1729  *
1730  * @param chain_id
1731  * @param ei - Element index
1732  * @param fi - Frame index
1733  *
1734  * @return - Success : 0
1735  *           Failure : -EINVAL
1736  */
1737 int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1738 {
1739         int lch;
1740         int *channels;
1741
1742         /* Check for input params */
1743         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1744                 printk(KERN_ERR "Invalid chain id\n");
1745                 return -EINVAL;
1746         }
1747
1748         /* Check if the chain exists */
1749         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1750                 printk(KERN_ERR "Chain doesn't exists\n");
1751                 return -EINVAL;
1752         }
1753         if ((!ei) || (!fi))
1754                 return -EINVAL;
1755
1756         channels = dma_linked_lch[chain_id].linked_dmach_q;
1757
1758         /* Get the current channel */
1759         lch = channels[dma_linked_lch[chain_id].q_head];
1760
1761         *ei = dma_read(CCEN(lch));
1762         *fi = dma_read(CCFN(lch));
1763
1764         return 0;
1765 }
1766 EXPORT_SYMBOL(omap_get_dma_chain_index);
1767
1768 /**
1769  * @brief omap_get_dma_chain_dst_pos - Get the destination position of the
1770  * ongoing DMA in chain
1771  *
1772  * @param chain_id
1773  *
1774  * @return - Success : Destination position
1775  *           Failure : -EINVAL
1776  */
1777 int omap_get_dma_chain_dst_pos(int chain_id)
1778 {
1779         int lch;
1780         int *channels;
1781
1782         /* Check for input params */
1783         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1784                 printk(KERN_ERR "Invalid chain id\n");
1785                 return -EINVAL;
1786         }
1787
1788         /* Check if the chain exists */
1789         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1790                 printk(KERN_ERR "Chain doesn't exists\n");
1791                 return -EINVAL;
1792         }
1793
1794         channels = dma_linked_lch[chain_id].linked_dmach_q;
1795
1796         /* Get the current channel */
1797         lch = channels[dma_linked_lch[chain_id].q_head];
1798
1799         return dma_read(CDAC(lch));
1800 }
1801 EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1802
1803 /**
1804  * @brief omap_get_dma_chain_src_pos - Get the source position
1805  * of the ongoing DMA in chain
1806  * @param chain_id
1807  *
1808  * @return - Success : Destination position
1809  *           Failure : -EINVAL
1810  */
1811 int omap_get_dma_chain_src_pos(int chain_id)
1812 {
1813         int lch;
1814         int *channels;
1815
1816         /* Check for input params */
1817         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1818                 printk(KERN_ERR "Invalid chain id\n");
1819                 return -EINVAL;
1820         }
1821
1822         /* Check if the chain exists */
1823         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1824                 printk(KERN_ERR "Chain doesn't exists\n");
1825                 return -EINVAL;
1826         }
1827
1828         channels = dma_linked_lch[chain_id].linked_dmach_q;
1829
1830         /* Get the current channel */
1831         lch = channels[dma_linked_lch[chain_id].q_head];
1832
1833         return dma_read(CSAC(lch));
1834 }
1835 EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1836 #endif  /* ifndef CONFIG_ARCH_OMAP1 */
1837
1838 /*----------------------------------------------------------------------------*/
1839
1840 #ifdef CONFIG_ARCH_OMAP1
1841
1842 static int omap1_dma_handle_ch(int ch)
1843 {
1844         u32 csr;
1845
1846         if (enable_1510_mode && ch >= 6) {
1847                 csr = dma_chan[ch].saved_csr;
1848                 dma_chan[ch].saved_csr = 0;
1849         } else
1850                 csr = dma_read(CSR(ch));
1851         if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1852                 dma_chan[ch + 6].saved_csr = csr >> 7;
1853                 csr &= 0x7f;
1854         }
1855         if ((csr & 0x3f) == 0)
1856                 return 0;
1857         if (unlikely(dma_chan[ch].dev_id == -1)) {
1858                 printk(KERN_WARNING "Spurious interrupt from DMA channel "
1859                        "%d (CSR %04x)\n", ch, csr);
1860                 return 0;
1861         }
1862         if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
1863                 printk(KERN_WARNING "DMA timeout with device %d\n",
1864                        dma_chan[ch].dev_id);
1865         if (unlikely(csr & OMAP_DMA_DROP_IRQ))
1866                 printk(KERN_WARNING "DMA synchronization event drop occurred "
1867                        "with device %d\n", dma_chan[ch].dev_id);
1868         if (likely(csr & OMAP_DMA_BLOCK_IRQ))
1869                 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1870         if (likely(dma_chan[ch].callback != NULL))
1871                 dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
1872
1873         return 1;
1874 }
1875
1876 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1877 {
1878         int ch = ((int) dev_id) - 1;
1879         int handled = 0;
1880
1881         for (;;) {
1882                 int handled_now = 0;
1883
1884                 handled_now += omap1_dma_handle_ch(ch);
1885                 if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
1886                         handled_now += omap1_dma_handle_ch(ch + 6);
1887                 if (!handled_now)
1888                         break;
1889                 handled += handled_now;
1890         }
1891
1892         return handled ? IRQ_HANDLED : IRQ_NONE;
1893 }
1894
1895 #else
1896 #define omap1_dma_irq_handler   NULL
1897 #endif
1898
1899 #ifdef CONFIG_ARCH_OMAP2PLUS
1900
1901 static int omap2_dma_handle_ch(int ch)
1902 {
1903         u32 status = dma_read(CSR(ch));
1904
1905         if (!status) {
1906                 if (printk_ratelimit())
1907                         printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n",
1908                                 ch);
1909                 dma_write(1 << ch, IRQSTATUS_L0);
1910                 return 0;
1911         }
1912         if (unlikely(dma_chan[ch].dev_id == -1)) {
1913                 if (printk_ratelimit())
1914                         printk(KERN_WARNING "IRQ %04x for non-allocated DMA"
1915                                         "channel %d\n", status, ch);
1916                 return 0;
1917         }
1918         if (unlikely(status & OMAP_DMA_DROP_IRQ))
1919                 printk(KERN_INFO
1920                        "DMA synchronization event drop occurred with device "
1921                        "%d\n", dma_chan[ch].dev_id);
1922         if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
1923                 printk(KERN_INFO "DMA transaction error with device %d\n",
1924                        dma_chan[ch].dev_id);
1925                 if (cpu_class_is_omap2()) {
1926                         /*
1927                          * Errata: sDMA Channel is not disabled
1928                          * after a transaction error. So we explicitely
1929                          * disable the channel
1930                          */
1931                         u32 ccr;
1932
1933                         ccr = dma_read(CCR(ch));
1934                         ccr &= ~OMAP_DMA_CCR_EN;
1935                         dma_write(ccr, CCR(ch));
1936                         dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1937                 }
1938         }
1939         if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
1940                 printk(KERN_INFO "DMA secure error with device %d\n",
1941                        dma_chan[ch].dev_id);
1942         if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
1943                 printk(KERN_INFO "DMA misaligned error with device %d\n",
1944                        dma_chan[ch].dev_id);
1945
1946         dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch));
1947         dma_write(1 << ch, IRQSTATUS_L0);
1948
1949         /* If the ch is not chained then chain_id will be -1 */
1950         if (dma_chan[ch].chain_id != -1) {
1951                 int chain_id = dma_chan[ch].chain_id;
1952                 dma_chan[ch].state = DMA_CH_NOTSTARTED;
1953                 if (dma_read(CLNK_CTRL(ch)) & (1 << 15))
1954                         dma_chan[dma_chan[ch].next_linked_ch].state =
1955                                                         DMA_CH_STARTED;
1956                 if (dma_linked_lch[chain_id].chain_mode ==
1957                                                 OMAP_DMA_DYNAMIC_CHAIN)
1958                         disable_lnk(ch);
1959
1960                 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
1961                         OMAP_DMA_CHAIN_INCQHEAD(chain_id);
1962
1963                 status = dma_read(CSR(ch));
1964         }
1965
1966         dma_write(status, CSR(ch));
1967
1968         if (likely(dma_chan[ch].callback != NULL))
1969                 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
1970
1971         return 0;
1972 }
1973
1974 /* STATUS register count is from 1-32 while our is 0-31 */
1975 static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
1976 {
1977         u32 val, enable_reg;
1978         int i;
1979
1980         val = dma_read(IRQSTATUS_L0);
1981         if (val == 0) {
1982                 if (printk_ratelimit())
1983                         printk(KERN_WARNING "Spurious DMA IRQ\n");
1984                 return IRQ_HANDLED;
1985         }
1986         enable_reg = dma_read(IRQENABLE_L0);
1987         val &= enable_reg; /* Dispatch only relevant interrupts */
1988         for (i = 0; i < dma_lch_count && val != 0; i++) {
1989                 if (val & 1)
1990                         omap2_dma_handle_ch(i);
1991                 val >>= 1;
1992         }
1993
1994         return IRQ_HANDLED;
1995 }
1996
1997 static struct irqaction omap24xx_dma_irq = {
1998         .name = "DMA",
1999         .handler = omap2_dma_irq_handler,
2000         .flags = IRQF_DISABLED
2001 };
2002
2003 #else
2004 static struct irqaction omap24xx_dma_irq;
2005 #endif
2006
2007 /*----------------------------------------------------------------------------*/
2008
2009 void omap_dma_global_context_save(void)
2010 {
2011         omap_dma_global_context.dma_irqenable_l0 =
2012                 dma_read(IRQENABLE_L0);
2013         omap_dma_global_context.dma_ocp_sysconfig =
2014                 dma_read(OCP_SYSCONFIG);
2015         omap_dma_global_context.dma_gcr = dma_read(GCR);
2016 }
2017
2018 void omap_dma_global_context_restore(void)
2019 {
2020         int ch;
2021
2022         dma_write(omap_dma_global_context.dma_gcr, GCR);
2023         dma_write(omap_dma_global_context.dma_ocp_sysconfig,
2024                 OCP_SYSCONFIG);
2025         dma_write(omap_dma_global_context.dma_irqenable_l0,
2026                 IRQENABLE_L0);
2027
2028         /*
2029          * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
2030          * after secure sram context save and restore. Hence we need to
2031          * manually clear those IRQs to avoid spurious interrupts. This
2032          * affects only secure devices.
2033          */
2034         if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
2035                 dma_write(0x3 , IRQSTATUS_L0);
2036
2037         for (ch = 0; ch < dma_chan_count; ch++)
2038                 if (dma_chan[ch].dev_id != -1)
2039                         omap_clear_dma(ch);
2040 }
2041
2042 /*----------------------------------------------------------------------------*/
2043
2044 static int __init omap_init_dma(void)
2045 {
2046         unsigned long base;
2047         int ch, r;
2048
2049         if (cpu_class_is_omap1()) {
2050                 base = OMAP1_DMA_BASE;
2051                 dma_lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
2052         } else if (cpu_is_omap24xx()) {
2053                 base = OMAP24XX_DMA4_BASE;
2054                 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2055         } else if (cpu_is_omap34xx()) {
2056                 base = OMAP34XX_DMA4_BASE;
2057                 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2058         } else if (cpu_is_omap44xx()) {
2059                 base = OMAP44XX_DMA4_BASE;
2060                 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2061         } else {
2062                 pr_err("DMA init failed for unsupported omap\n");
2063                 return -ENODEV;
2064         }
2065
2066         omap_dma_base = ioremap(base, SZ_4K);
2067         BUG_ON(!omap_dma_base);
2068
2069         if (cpu_class_is_omap2() && omap_dma_reserve_channels
2070                         && (omap_dma_reserve_channels <= dma_lch_count))
2071                 dma_lch_count = omap_dma_reserve_channels;
2072
2073         dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count,
2074                                 GFP_KERNEL);
2075         if (!dma_chan) {
2076                 r = -ENOMEM;
2077                 goto out_unmap;
2078         }
2079
2080         if (cpu_class_is_omap2()) {
2081                 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2082                                                 dma_lch_count, GFP_KERNEL);
2083                 if (!dma_linked_lch) {
2084                         r = -ENOMEM;
2085                         goto out_free;
2086                 }
2087         }
2088
2089         if (cpu_is_omap15xx()) {
2090                 printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
2091                 dma_chan_count = 9;
2092                 enable_1510_mode = 1;
2093         } else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
2094                 printk(KERN_INFO "OMAP DMA hardware version %d\n",
2095                        dma_read(HW_ID));
2096                 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
2097                        (dma_read(CAPS_0_U) << 16) |
2098                        dma_read(CAPS_0_L),
2099                        (dma_read(CAPS_1_U) << 16) |
2100                        dma_read(CAPS_1_L),
2101                        dma_read(CAPS_2), dma_read(CAPS_3),
2102                        dma_read(CAPS_4));
2103                 if (!enable_1510_mode) {
2104                         u16 w;
2105
2106                         /* Disable OMAP 3.0/3.1 compatibility mode. */
2107                         w = dma_read(GSCR);
2108                         w |= 1 << 3;
2109                         dma_write(w, GSCR);
2110                         dma_chan_count = 16;
2111                 } else
2112                         dma_chan_count = 9;
2113         } else if (cpu_class_is_omap2()) {
2114                 u8 revision = dma_read(REVISION) & 0xff;
2115                 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
2116                        revision >> 4, revision & 0xf);
2117                 dma_chan_count = dma_lch_count;
2118         } else {
2119                 dma_chan_count = 0;
2120                 return 0;
2121         }
2122
2123         spin_lock_init(&dma_chan_lock);
2124
2125         for (ch = 0; ch < dma_chan_count; ch++) {
2126                 omap_clear_dma(ch);
2127                 if (cpu_class_is_omap2())
2128                         omap2_disable_irq_lch(ch);
2129
2130                 dma_chan[ch].dev_id = -1;
2131                 dma_chan[ch].next_lch = -1;
2132
2133                 if (ch >= 6 && enable_1510_mode)
2134                         continue;
2135
2136                 if (cpu_class_is_omap1()) {
2137                         /*
2138                          * request_irq() doesn't like dev_id (ie. ch) being
2139                          * zero, so we have to kludge around this.
2140                          */
2141                         r = request_irq(omap1_dma_irq[ch],
2142                                         omap1_dma_irq_handler, 0, "DMA",
2143                                         (void *) (ch + 1));
2144                         if (r != 0) {
2145                                 int i;
2146
2147                                 printk(KERN_ERR "unable to request IRQ %d "
2148                                        "for DMA (error %d)\n",
2149                                        omap1_dma_irq[ch], r);
2150                                 for (i = 0; i < ch; i++)
2151                                         free_irq(omap1_dma_irq[i],
2152                                                  (void *) (i + 1));
2153                                 goto out_free;
2154                         }
2155                 }
2156         }
2157
2158         if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
2159                 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
2160                                 DMA_DEFAULT_FIFO_DEPTH, 0);
2161
2162         if (cpu_class_is_omap2()) {
2163                 int irq;
2164                 if (cpu_is_omap44xx())
2165                         irq = OMAP44XX_IRQ_SDMA_0;
2166                 else
2167                         irq = INT_24XX_SDMA_IRQ0;
2168                 setup_irq(irq, &omap24xx_dma_irq);
2169         }
2170
2171         if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
2172                 /* Enable smartidle idlemodes and autoidle */
2173                 u32 v = dma_read(OCP_SYSCONFIG);
2174                 v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK |
2175                                 DMA_SYSCONFIG_SIDLEMODE_MASK |
2176                                 DMA_SYSCONFIG_AUTOIDLE);
2177                 v |= (DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2178                         DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2179                         DMA_SYSCONFIG_AUTOIDLE);
2180                 dma_write(v , OCP_SYSCONFIG);
2181                 /* reserve dma channels 0 and 1 in high security devices */
2182                 if (cpu_is_omap34xx() &&
2183                         (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2184                         printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2185                                         "HS ROM code\n");
2186                         dma_chan[0].dev_id = 0;
2187                         dma_chan[1].dev_id = 1;
2188                 }
2189         }
2190
2191         return 0;
2192
2193 out_free:
2194         kfree(dma_chan);
2195
2196 out_unmap:
2197         iounmap(omap_dma_base);
2198
2199         return r;
2200 }
2201
2202 arch_initcall(omap_init_dma);
2203
2204 /*
2205  * Reserve the omap SDMA channels using cmdline bootarg
2206  * "omap_dma_reserve_ch=". The valid range is 1 to 32
2207  */
2208 static int __init omap_dma_cmdline_reserve_ch(char *str)
2209 {
2210         if (get_option(&str, &omap_dma_reserve_channels) != 1)
2211                 omap_dma_reserve_channels = 0;
2212         return 1;
2213 }
2214
2215 __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
2216
2217