Merge branch 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daein...
authorDave Airlie <airlied@redhat.com>
Sun, 16 Dec 2012 05:49:46 +0000 (05:49 +0000)
committerDave Airlie <airlied@redhat.com>
Sun, 16 Dec 2012 05:49:46 +0000 (05:49 +0000)
Inki writes:
"- add dmabuf attach/detach feature
  . This patch would resolve performance deterioration issue
    when v4l2-based driver is using the buffer imported from gem.
- drm/exynos: use DMA_ATTR_NO_KERNEL_MAPPING attribute
  . With gem allocation, kernel space mapping isn't allocated and
    also physical pages aren't mapped with the kernel space.
    The physical pages are mapped with kernel space though vmap
    function only for console framebuffer.
- add the below two patches I missed.
  drm: exynos: moved exynos drm device registration to drm driver
  drm: exynos: moved exynos drm hdmi device registration to drm driver
- add IPP subsystem framework and its-based device drivers.
  . This patch set includes fimc, rotator and gsc drivers to perform
    image scaling, rotation and color space conversion.
- add runtime pm support to hdmi driver.
- And fixups and cleanups."

* 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos: (30 commits)
  drm/exynos: add gsc ipp driver
  drm/exynos: add rotator ipp driver
  drm/exynos: add fimc ipp driver
  drm/exynos: add iommu support for ipp
  drm/exynos: add ipp subsystem
  drm/exynos: support device tree for fimd
  drm/exynos: support extended screen coordinate of fimd
  drm/exynos: fix x, y coordinates for right bottom pixel
  drm/exynos: fix fb offset calculation for plane
  drm/exynos: hdmi: Fix potential NULL pointer dereference error
  drm/exynos: hdmi: Add CONFIG_OF and use of_match_ptr() macro
  drm/exynos: add support for hdmiphy power control for exynos5
  drm/exynos: add runtime pm support for mixer
  drm/exynos: added runtime pm support for hdmi
  drm/exynos: fix allocation and cache mapping type
  drm/exynos: reorder framebuffer init sequence
  drm/exynos/iommu: fix return value check in drm_create_iommu_mapping()
  drm/exynos: remove unused vaddr member
  drm/exynos: use DMA_ATTR_NO_KERNEL_MAPPING attribute
  drm/exynos: add exception codes to exynos_drm_fbdev_create()
  ...

35 files changed:
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/Makefile
drivers/gpu/drm/exynos/exynos_ddc.c
drivers/gpu/drm/exynos/exynos_drm_buf.c
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_encoder.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_fimc.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_drm_gsc.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_gsc.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_hdmi.c
drivers/gpu/drm/exynos/exynos_drm_hdmi.h
drivers/gpu/drm/exynos/exynos_drm_iommu.c
drivers/gpu/drm/exynos/exynos_drm_ipp.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_ipp.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_rotator.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_hdmiphy.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/exynos/regs-fimc.h [new file with mode: 0644]
drivers/gpu/drm/exynos/regs-gsc.h [new file with mode: 0644]
drivers/gpu/drm/exynos/regs-hdmi.h
drivers/gpu/drm/exynos/regs-rotator.h [new file with mode: 0644]
include/drm/exynos_drm.h
include/uapi/drm/exynos_drm.h

index 86fb75d3fcad4b9bfc2f5b7c6230c7ff06eb5ceb..1d1f1e5e33f0ff267daa1d8ed4818b0a4e00fc7e 100644 (file)
@@ -45,3 +45,27 @@ config DRM_EXYNOS_G2D
        depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
        help
          Choose this option if you want to use Exynos G2D for DRM.
+
+config DRM_EXYNOS_IPP
+       bool "Exynos DRM IPP"
+       depends on DRM_EXYNOS
+       help
+         Choose this option if you want to use IPP feature for DRM.
+
+config DRM_EXYNOS_FIMC
+       bool "Exynos DRM FIMC"
+       depends on DRM_EXYNOS_IPP
+       help
+         Choose this option if you want to use Exynos FIMC for DRM.
+
+config DRM_EXYNOS_ROTATOR
+       bool "Exynos DRM Rotator"
+       depends on DRM_EXYNOS_IPP
+       help
+         Choose this option if you want to use Exynos Rotator for DRM.
+
+config DRM_EXYNOS_GSC
+       bool "Exynos DRM GSC"
+       depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+       help
+         Choose this option if you want to use Exynos GSC for DRM.
index 26813b8a5056828a020a7d232df401a76fbc3655..639b49e1ec0586ceed662dbbfe3075bbf1157aa9 100644 (file)
@@ -16,5 +16,9 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)   += exynos_hdmi.o exynos_mixer.o \
                                           exynos_drm_hdmi.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI)    += exynos_drm_vidi.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_G2D)     += exynos_drm_g2d.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IPP)     += exynos_drm_ipp.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC)    += exynos_drm_fimc.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_GSC)     += exynos_drm_gsc.o
 
 obj-$(CONFIG_DRM_EXYNOS)               += exynosdrm.o
index 37e6ec704e1d05117324922ff31da960928be536..bef43e0342a69e82dcb7e91756158552c6ca8159 100644 (file)
@@ -48,6 +48,7 @@ static struct i2c_device_id ddc_idtable[] = {
        { },
 };
 
+#ifdef CONFIG_OF
 static struct of_device_id hdmiddc_match_types[] = {
        {
                .compatible = "samsung,exynos5-hdmiddc",
@@ -55,12 +56,13 @@ static struct of_device_id hdmiddc_match_types[] = {
                /* end node */
        }
 };
+#endif
 
 struct i2c_driver ddc_driver = {
        .driver = {
                .name = "exynos-hdmiddc",
                .owner = THIS_MODULE,
-               .of_match_table = hdmiddc_match_types,
+               .of_match_table = of_match_ptr(hdmiddc_match_types),
        },
        .id_table       = ddc_idtable,
        .probe          = s5p_ddc_probe,
index 72bf97b96ba0a676ceeea99dc0e72cfb472afbc6..9601bad47a2e6d5a2bd04deffec5ec5744bfbcee 100644 (file)
@@ -34,7 +34,8 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
                unsigned int flags, struct exynos_drm_gem_buf *buf)
 {
        int ret = 0;
-       enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS;
+       enum dma_attr attr;
+       unsigned int nr_pages;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -45,44 +46,49 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
 
        init_dma_attrs(&buf->dma_attrs);
 
-       if (flags & EXYNOS_BO_NONCONTIG)
+       /*
+        * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+        * region will be allocated else physically contiguous
+        * as possible.
+        */
+       if (flags & EXYNOS_BO_CONTIG)
+               dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
+
+       /*
+        * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+        * else cachable mapping.
+        */
+       if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
                attr = DMA_ATTR_WRITE_COMBINE;
+       else
+               attr = DMA_ATTR_NON_CONSISTENT;
 
        dma_set_attr(attr, &buf->dma_attrs);
+       dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
 
-       buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
+       buf->pages = dma_alloc_attrs(dev->dev, buf->size,
                        &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
-       if (!buf->kvaddr) {
+       if (!buf->pages) {
                DRM_ERROR("failed to allocate buffer.\n");
                return -ENOMEM;
        }
 
-       buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+       nr_pages = buf->size >> PAGE_SHIFT;
+       buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
        if (!buf->sgt) {
-               DRM_ERROR("failed to allocate sg table.\n");
+               DRM_ERROR("failed to get sg table.\n");
                ret = -ENOMEM;
                goto err_free_attrs;
        }
 
-       ret = dma_get_sgtable(dev->dev, buf->sgt, buf->kvaddr, buf->dma_addr,
-                       buf->size);
-       if (ret < 0) {
-               DRM_ERROR("failed to get sgtable.\n");
-               goto err_free_sgt;
-       }
-
-       DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
-                       (unsigned long)buf->kvaddr,
+       DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)buf->dma_addr,
                        buf->size);
 
        return ret;
 
-err_free_sgt:
-       kfree(buf->sgt);
-       buf->sgt = NULL;
 err_free_attrs:
-       dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
+       dma_free_attrs(dev->dev, buf->size, buf->pages,
                        (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
        buf->dma_addr = (dma_addr_t)NULL;
 
@@ -99,8 +105,7 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
                return;
        }
 
-       DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
-                       (unsigned long)buf->kvaddr,
+       DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)buf->dma_addr,
                        buf->size);
 
@@ -109,7 +114,7 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
        kfree(buf->sgt);
        buf->sgt = NULL;
 
-       dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
+       dma_free_attrs(dev->dev, buf->size, buf->pages,
                                (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
        buf->dma_addr = (dma_addr_t)NULL;
 }
index 539da9f4eb9762cae7137d2160ad4919eaff26e1..61d5a8402eb8be71857a4b119f3647dfa5975807 100644 (file)
 
 #include <linux/dma-buf.h>
 
-static struct sg_table *exynos_get_sgt(struct drm_device *drm_dev,
-                                       struct exynos_drm_gem_buf *buf)
+struct exynos_drm_dmabuf_attachment {
+       struct sg_table sgt;
+       enum dma_data_direction dir;
+};
+
+static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
+                                       struct device *dev,
+                                       struct dma_buf_attachment *attach)
 {
-       struct sg_table *sgt = NULL;
-       int ret;
+       struct exynos_drm_dmabuf_attachment *exynos_attach;
 
-       sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
-       if (!sgt)
-               goto out;
+       exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
+       if (!exynos_attach)
+               return -ENOMEM;
 
-       ret = dma_get_sgtable(drm_dev->dev, sgt, buf->kvaddr,
-                               buf->dma_addr, buf->size);
-       if (ret < 0) {
-               DRM_ERROR("failed to get sgtable.\n");
-               goto err_free_sgt;
-       }
+       exynos_attach->dir = DMA_NONE;
+       attach->priv = exynos_attach;
 
-       return sgt;
+       return 0;
+}
 
-err_free_sgt:
-       kfree(sgt);
-       sgt = NULL;
-out:
-       return NULL;
+static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
+                                       struct dma_buf_attachment *attach)
+{
+       struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
+       struct sg_table *sgt;
+
+       if (!exynos_attach)
+               return;
+
+       sgt = &exynos_attach->sgt;
+
+       if (exynos_attach->dir != DMA_NONE)
+               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+                               exynos_attach->dir);
+
+       sg_free_table(sgt);
+       kfree(exynos_attach);
+       attach->priv = NULL;
 }
 
 static struct sg_table *
                exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
                                        enum dma_data_direction dir)
 {
+       struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
        struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
        struct drm_device *dev = gem_obj->base.dev;
        struct exynos_drm_gem_buf *buf;
+       struct scatterlist *rd, *wr;
        struct sg_table *sgt = NULL;
-       int nents;
+       unsigned int i;
+       int nents, ret;
 
        DRM_DEBUG_PRIME("%s\n", __FILE__);
 
+       if (WARN_ON(dir == DMA_NONE))
+               return ERR_PTR(-EINVAL);
+
+       /* just return current sgt if already requested. */
+       if (exynos_attach->dir == dir)
+               return &exynos_attach->sgt;
+
+       /* reattaching is not allowed. */
+       if (WARN_ON(exynos_attach->dir != DMA_NONE))
+               return ERR_PTR(-EBUSY);
+
        buf = gem_obj->buffer;
        if (!buf) {
                DRM_ERROR("buffer is null.\n");
-               return sgt;
+               return ERR_PTR(-ENOMEM);
+       }
+
+       sgt = &exynos_attach->sgt;
+
+       ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
+       if (ret) {
+               DRM_ERROR("failed to alloc sgt.\n");
+               return ERR_PTR(-ENOMEM);
        }
 
        mutex_lock(&dev->struct_mutex);
 
-       sgt = exynos_get_sgt(dev, buf);
-       if (!sgt)
-               goto err_unlock;
+       rd = buf->sgt->sgl;
+       wr = sgt->sgl;
+       for (i = 0; i < sgt->orig_nents; ++i) {
+               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+               rd = sg_next(rd);
+               wr = sg_next(wr);
+       }
 
-       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
        if (!nents) {
                DRM_ERROR("failed to map sgl with iommu.\n");
-               sgt = NULL;
+               sgt = ERR_PTR(-EIO);
                goto err_unlock;
        }
 
+       exynos_attach->dir = dir;
+       attach->priv = exynos_attach;
+
        DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
 
 err_unlock:
@@ -98,11 +142,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
                                                struct sg_table *sgt,
                                                enum dma_data_direction dir)
 {
-       dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
-
-       sg_free_table(sgt);
-       kfree(sgt);
-       sgt = NULL;
+       /* Nothing to do. */
 }
 
 static void exynos_dmabuf_release(struct dma_buf *dmabuf)
@@ -164,6 +204,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
 }
 
 static struct dma_buf_ops exynos_dmabuf_ops = {
+       .attach                 = exynos_gem_attach_dma_buf,
+       .detach                 = exynos_gem_detach_dma_buf,
        .map_dma_buf            = exynos_gem_map_dma_buf,
        .unmap_dma_buf          = exynos_gem_unmap_dma_buf,
        .kmap                   = exynos_gem_dmabuf_kmap,
index 2b287d2fc92eca81731e564d1a9653b294ce1f2b..e0a8e8024b010683a32a6ef8a9754e6ade1adcce 100644 (file)
@@ -40,6 +40,7 @@
 #include "exynos_drm_vidi.h"
 #include "exynos_drm_dmabuf.h"
 #include "exynos_drm_g2d.h"
+#include "exynos_drm_ipp.h"
 #include "exynos_drm_iommu.h"
 
 #define DRIVER_NAME    "exynos"
@@ -50,6 +51,9 @@
 
 #define VBLANK_OFF_DELAY       50000
 
+/* platform device pointer for eynos drm device. */
+static struct platform_device *exynos_drm_pdev;
+
 static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 {
        struct exynos_drm_private *private;
@@ -246,6 +250,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
                        exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
        DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
                        exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
+                       exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
+                       exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
+                       exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
+                       exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
 };
 
 static const struct file_operations exynos_drm_driver_fops = {
@@ -296,6 +308,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
 {
        DRM_DEBUG_DRIVER("%s\n", __FILE__);
 
+       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
 
        return drm_platform_init(&exynos_drm_driver, pdev);
@@ -341,6 +354,10 @@ static int __init exynos_drm_init(void)
        ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
        if (ret < 0)
                goto out_common_hdmi;
+
+       ret = exynos_platform_device_hdmi_register();
+       if (ret < 0)
+               goto out_common_hdmi_dev;
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -355,24 +372,80 @@ static int __init exynos_drm_init(void)
                goto out_g2d;
 #endif
 
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+       ret = platform_driver_register(&fimc_driver);
+       if (ret < 0)
+               goto out_fimc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+       ret = platform_driver_register(&rotator_driver);
+       if (ret < 0)
+               goto out_rotator;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+       ret = platform_driver_register(&gsc_driver);
+       if (ret < 0)
+               goto out_gsc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+       ret = platform_driver_register(&ipp_driver);
+       if (ret < 0)
+               goto out_ipp;
+#endif
+
        ret = platform_driver_register(&exynos_drm_platform_driver);
        if (ret < 0)
+               goto out_drm;
+
+       exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
+                               NULL, 0);
+       if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
+               ret = PTR_ERR(exynos_drm_pdev);
                goto out;
+       }
 
        return 0;
 
 out:
+       platform_driver_unregister(&exynos_drm_platform_driver);
+
+out_drm:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+       platform_driver_unregister(&ipp_driver);
+out_ipp:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+       platform_driver_unregister(&gsc_driver);
+out_gsc:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+       platform_driver_unregister(&rotator_driver);
+out_rotator:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+       platform_driver_unregister(&fimc_driver);
+out_fimc:
+#endif
+
 #ifdef CONFIG_DRM_EXYNOS_G2D
        platform_driver_unregister(&g2d_driver);
 out_g2d:
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_VIDI
-out_vidi:
        platform_driver_unregister(&vidi_driver);
+out_vidi:
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_HDMI
+       exynos_platform_device_hdmi_unregister();
+out_common_hdmi_dev:
        platform_driver_unregister(&exynos_drm_common_hdmi_driver);
 out_common_hdmi:
        platform_driver_unregister(&mixer_driver);
@@ -392,13 +465,32 @@ static void __exit exynos_drm_exit(void)
 {
        DRM_DEBUG_DRIVER("%s\n", __FILE__);
 
+       platform_device_unregister(exynos_drm_pdev);
+
        platform_driver_unregister(&exynos_drm_platform_driver);
 
+#ifdef CONFIG_DRM_EXYNOS_IPP
+       platform_driver_unregister(&ipp_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+       platform_driver_unregister(&gsc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+       platform_driver_unregister(&rotator_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+       platform_driver_unregister(&fimc_driver);
+#endif
+
 #ifdef CONFIG_DRM_EXYNOS_G2D
        platform_driver_unregister(&g2d_driver);
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_HDMI
+       exynos_platform_device_hdmi_unregister();
        platform_driver_unregister(&exynos_drm_common_hdmi_driver);
        platform_driver_unregister(&mixer_driver);
        platform_driver_unregister(&hdmi_driver);
index 9c9c2dc7582800ed5c5594d59568d1b2443ab2eb..f5a97745bf93b44a6872f8daaca88642d2912c54 100644 (file)
@@ -74,8 +74,6 @@ enum exynos_drm_output_type {
  * @commit: apply hardware specific overlay data to registers.
  * @enable: enable hardware specific overlay.
  * @disable: disable hardware specific overlay.
- * @wait_for_vblank: wait for vblank interrupt to make sure that
- *     hardware overlay is disabled.
  */
 struct exynos_drm_overlay_ops {
        void (*mode_set)(struct device *subdrv_dev,
@@ -83,7 +81,6 @@ struct exynos_drm_overlay_ops {
        void (*commit)(struct device *subdrv_dev, int zpos);
        void (*enable)(struct device *subdrv_dev, int zpos);
        void (*disable)(struct device *subdrv_dev, int zpos);
-       void (*wait_for_vblank)(struct device *subdrv_dev);
 };
 
 /*
@@ -110,7 +107,6 @@ struct exynos_drm_overlay_ops {
  * @pixel_format: fourcc pixel format of this overlay
  * @dma_addr: array of bus(accessed by dma) address to the memory region
  *           allocated for a overlay.
- * @vaddr: array of virtual memory addresss to this overlay.
  * @zpos: order of overlay layer(z position).
  * @default_win: a window to be enabled.
  * @color_key: color key on or off.
@@ -142,7 +138,6 @@ struct exynos_drm_overlay {
        unsigned int pitch;
        uint32_t pixel_format;
        dma_addr_t dma_addr[MAX_FB_BUFFER];
-       void __iomem *vaddr[MAX_FB_BUFFER];
        int zpos;
 
        bool default_win;
@@ -186,6 +181,8 @@ struct exynos_drm_display_ops {
  * @commit: set current hw specific display mode to hw.
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @wait_for_vblank: wait for vblank interrupt to make sure that
+ *     hardware overlay is updated.
  */
 struct exynos_drm_manager_ops {
        void (*dpms)(struct device *subdrv_dev, int mode);
@@ -200,6 +197,7 @@ struct exynos_drm_manager_ops {
        void (*commit)(struct device *subdrv_dev);
        int (*enable_vblank)(struct device *subdrv_dev);
        void (*disable_vblank)(struct device *subdrv_dev);
+       void (*wait_for_vblank)(struct device *subdrv_dev);
 };
 
 /*
@@ -234,8 +232,14 @@ struct exynos_drm_g2d_private {
        struct list_head        userptr_list;
 };
 
+struct exynos_drm_ipp_private {
+       struct device   *dev;
+       struct list_head        event_list;
+};
+
 struct drm_exynos_file_private {
        struct exynos_drm_g2d_private   *g2d_priv;
+       struct exynos_drm_ipp_private   *ipp_priv;
 };
 
 /*
@@ -328,10 +332,25 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
 int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
 void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
 
+/*
+ * this function registers exynos drm hdmi platform device. It ensures only one
+ * instance of the device is created.
+ */
+extern int exynos_platform_device_hdmi_register(void);
+
+/*
+ * this function unregisters exynos drm hdmi platform device if it exists.
+ */
+void exynos_platform_device_hdmi_unregister(void);
+
 extern struct platform_driver fimd_driver;
 extern struct platform_driver hdmi_driver;
 extern struct platform_driver mixer_driver;
 extern struct platform_driver exynos_drm_common_hdmi_driver;
 extern struct platform_driver vidi_driver;
 extern struct platform_driver g2d_driver;
+extern struct platform_driver fimc_driver;
+extern struct platform_driver rotator_driver;
+extern struct platform_driver gsc_driver;
+extern struct platform_driver ipp_driver;
 #endif
index e5001dd85afc234c6e062993704c6318a2069590..301485215a70d782aa28e182633ca0f1b1592afc 100644 (file)
@@ -237,8 +237,7 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
 void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
 {
        struct exynos_drm_encoder *exynos_encoder;
-       struct exynos_drm_overlay_ops *overlay_ops;
-       struct exynos_drm_manager *manager;
+       struct exynos_drm_manager_ops *ops;
        struct drm_device *dev = fb->dev;
        struct drm_encoder *encoder;
 
@@ -248,21 +247,15 @@ void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
         */
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                exynos_encoder = to_exynos_encoder(encoder);
-
-               /* if exynos was disabled, just ignor it. */
-               if (exynos_encoder->dpms > DRM_MODE_DPMS_ON)
-                       continue;
-
-               manager = exynos_encoder->manager;
-               overlay_ops = manager->overlay_ops;
+               ops = exynos_encoder->manager->ops;
 
                /*
                 * wait for vblank interrupt
                 * - this makes sure that overlay data are updated to
                 *      real hardware.
                 */
-               if (overlay_ops->wait_for_vblank)
-                       overlay_ops->wait_for_vblank(manager->dev);
+               if (ops->wait_for_vblank)
+                       ops->wait_for_vblank(exynos_encoder->manager->dev);
        }
 }
 
@@ -538,14 +531,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
 
        if (overlay_ops && overlay_ops->disable)
                overlay_ops->disable(manager->dev, zpos);
-
-       /*
-        * wait for vblank interrupt
-        * - this makes sure that hardware overlay is disabled to avoid
-        * for the dma accesses to memory after gem buffer was released
-        * because the setting for disabling the overlay will be updated
-        * at vsync.
-        */
-       if (overlay_ops && overlay_ops->wait_for_vblank)
-               overlay_ops->wait_for_vblank(manager->dev);
 }
index 7413f4b729b07e5455a1dfc8ae7085eabd026bcd..5426cc5a5e8d53b30409a980aa5e8878a30e7c83 100644 (file)
@@ -177,6 +177,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
                return ERR_PTR(-ENOMEM);
        }
 
+       drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
        exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
 
        ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
@@ -185,8 +186,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
                return ERR_PTR(ret);
        }
 
-       drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
-
        return &exynos_fb->fb;
 }
 
@@ -232,9 +231,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                      struct drm_mode_fb_cmd2 *mode_cmd)
 {
        struct drm_gem_object *obj;
-       struct drm_framebuffer *fb;
        struct exynos_drm_fb *exynos_fb;
-       int i;
+       int i, ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -244,13 +242,14 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                return ERR_PTR(-ENOENT);
        }
 
-       fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
-       if (IS_ERR(fb)) {
-               drm_gem_object_unreference_unlocked(obj);
-               return fb;
+       exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+       if (!exynos_fb) {
+               DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+               return ERR_PTR(-ENOMEM);
        }
 
-       exynos_fb = to_exynos_fb(fb);
+       drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+       exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
        exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
 
        DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
@@ -263,7 +262,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                                mode_cmd->handles[i]);
                if (!obj) {
                        DRM_ERROR("failed to lookup gem object\n");
-                       exynos_drm_fb_destroy(fb);
+                       kfree(exynos_fb);
                        return ERR_PTR(-ENOENT);
                }
 
@@ -272,14 +271,27 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
                if (ret < 0) {
                        DRM_ERROR("cannot use this gem memory type for fb.\n");
-                       exynos_drm_fb_destroy(fb);
+                       kfree(exynos_fb);
                        return ERR_PTR(ret);
                }
 
                exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
        }
 
-       return fb;
+       ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+       if (ret) {
+               for (i = 0; i < exynos_fb->buf_cnt; i++) {
+                       struct exynos_drm_gem_obj *gem_obj;
+
+                       gem_obj = exynos_fb->exynos_gem_obj[i];
+                       drm_gem_object_unreference_unlocked(&gem_obj->base);
+               }
+
+               kfree(exynos_fb);
+               return ERR_PTR(ret);
+       }
+
+       return &exynos_fb->fb;
 }
 
 struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
@@ -297,9 +309,7 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
        if (!buffer)
                return NULL;
 
-       DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
-                       (unsigned long)buffer->kvaddr,
-                       (unsigned long)buffer->dma_addr);
+       DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
 
        return buffer;
 }
index a2232792e0c0bda1dd61c728fd715e88a2e602e5..f433eb7533a97e9550f86f2f94c4f5fa173d0427 100644 (file)
@@ -65,7 +65,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
        if (vm_size > buffer->size)
                return -EINVAL;
 
-       ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->kvaddr,
+       ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
                buffer->dma_addr, buffer->size, &buffer->dma_attrs);
        if (ret < 0) {
                DRM_ERROR("failed to mmap.\n");
@@ -109,6 +109,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
                return -EFAULT;
        }
 
+       /* map pages with kernel virtual space. */
+       if (!buffer->kvaddr) {
+               unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
+               buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+                                       pgprot_writecombine(PAGE_KERNEL));
+               if (!buffer->kvaddr) {
+                       DRM_ERROR("failed to map pages to kernel space.\n");
+                       return -EIO;
+               }
+       }
+
        /* buffer count to framebuffer always is 1 at booting time. */
        exynos_drm_fb_set_buf_cnt(fb, 1);
 
@@ -164,7 +175,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
        exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
        if (IS_ERR(exynos_gem_obj)) {
                ret = PTR_ERR(exynos_gem_obj);
-               goto out;
+               goto err_release_framebuffer;
        }
 
        exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -174,7 +185,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
        if (IS_ERR_OR_NULL(helper->fb)) {
                DRM_ERROR("failed to create drm framebuffer.\n");
                ret = PTR_ERR(helper->fb);
-               goto out;
+               goto err_destroy_gem;
        }
 
        helper->fbdev = fbi;
@@ -186,14 +197,24 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
        ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
        if (ret) {
                DRM_ERROR("failed to allocate cmap.\n");
-               goto out;
+               goto err_destroy_framebuffer;
        }
 
        ret = exynos_drm_fbdev_update(helper, helper->fb);
-       if (ret < 0) {
-               fb_dealloc_cmap(&fbi->cmap);
-               goto out;
-       }
+       if (ret < 0)
+               goto err_dealloc_cmap;
+
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+
+err_dealloc_cmap:
+       fb_dealloc_cmap(&fbi->cmap);
+err_destroy_framebuffer:
+       drm_framebuffer_cleanup(helper->fb);
+err_destroy_gem:
+       exynos_drm_gem_destroy(exynos_gem_obj);
+err_release_framebuffer:
+       framebuffer_release(fbi);
 
 /*
  * if failed, all resources allocated above would be released by
@@ -295,8 +316,13 @@ err_init:
 static void exynos_drm_fbdev_destroy(struct drm_device *dev,
                                      struct drm_fb_helper *fb_helper)
 {
+       struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
+       struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
        struct drm_framebuffer *fb;
 
+       if (exynos_gem_obj->buffer->kvaddr)
+               vunmap(exynos_gem_obj->buffer->kvaddr);
+
        /* release drm framebuffer and real buffer */
        if (fb_helper->fb && fb_helper->fb->funcs) {
                fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644 (file)
index 0000000..61ea242
--- /dev/null
@@ -0,0 +1,2001 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-fimc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_fimc.h"
+
+/*
+ * FIMC is stand for Fully Interactive Mobile Camera and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * FIMC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> FIMC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> FIMC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> FIMC H/W ----> FIMD.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define FIMC_MAX_DEVS  4
+#define FIMC_MAX_SRC   2
+#define FIMC_MAX_DST   32
+#define FIMC_SHFACTOR  10
+#define FIMC_BUF_STOP  1
+#define FIMC_BUF_START 2
+#define FIMC_REG_SZ            32
+#define FIMC_WIDTH_ITU_709     1280
+#define FIMC_REFRESH_MAX       60
+#define FIMC_REFRESH_MIN       12
+#define FIMC_CROP_MAX  8192
+#define FIMC_CROP_MIN  32
+#define FIMC_SCALE_MAX 4224
+#define FIMC_SCALE_MIN 32
+
+#define get_fimc_context(dev)  platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)    container_of(ippdrv,\
+                                       struct fimc_context, ippdrv);
+#define fimc_read(offset)              readl(ctx->regs + (offset))
+#define fimc_write(cfg, offset)        writel(cfg, ctx->regs + (offset))
+
+enum fimc_wb {
+       FIMC_WB_NONE,
+       FIMC_WB_A,
+       FIMC_WB_B,
+};
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @bypass: unused scaler path.
+ * @up_h: horizontal scale up.
+ * @up_v: vertical scale up.
+ * @hratio: horizontal ratio.
+ * @vratio: vertical ratio.
+ */
+struct fimc_scaler {
+       bool    range;
+       bool bypass;
+       bool up_h;
+       bool up_v;
+       u32 hratio;
+       u32 vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual table 43-1.
+ * @in_hori: scaler input horizontal size.
+ * @bypass: scaler bypass mode.
+ * @dst_h_wo_rot: target horizontal size without output rotation.
+ * @dst_h_rot: target horizontal size with output rotation.
+ * @rl_w_wo_rot: real width without input rotation.
+ * @rl_h_rot: real height without output rotation.
+ */
+struct fimc_capability {
+       /* scaler */
+       u32     in_hori;
+       u32     bypass;
+       /* output rotator */
+       u32     dst_h_wo_rot;
+       u32     dst_h_rot;
+       /* input rotator */
+       u32     rl_w_wo_rot;
+       u32     rl_h_rot;
+};
+
+/*
+ * A structure of fimc driver data.
+ *
+ * @parent_clk: name of parent clock.
+ */
+struct fimc_driverdata {
+       char    *parent_clk;
+};
+
+/*
+ * A structure of fimc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @sclk_fimc_clk: fimc source clock.
+ * @fimc_clk: fimc clock.
+ * @wb_clk: writeback a clock.
+ * @wb_b_clk: writeback b clock.
+ * @sc: scaler infomations.
+ * @odr: ordering of YUV.
+ * @ver: fimc version.
+ * @pol: porarity of writeback.
+ * @id: fimc id.
+ * @irq: irq number.
+ * @suspended: qos operations.
+ */
+struct fimc_context {
+       struct exynos_drm_ippdrv        ippdrv;
+       struct resource *regs_res;
+       void __iomem    *regs;
+       struct mutex    lock;
+       struct clk      *sclk_fimc_clk;
+       struct clk      *fimc_clk;
+       struct clk      *wb_clk;
+       struct clk      *wb_b_clk;
+       struct fimc_scaler      sc;
+       struct fimc_driverdata  *ddata;
+       struct exynos_drm_ipp_pol       pol;
+       int     id;
+       int     irq;
+       bool    suspended;
+};
+
+static void fimc_sw_reset(struct fimc_context *ctx, bool pattern)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:pattern[%d]\n", __func__, pattern);
+
+       cfg = fimc_read(EXYNOS_CISRCFMT);
+       cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
+       if (pattern)
+               cfg |= EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR;
+
+       fimc_write(cfg, EXYNOS_CISRCFMT);
+
+       /* s/w reset */
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg |= (EXYNOS_CIGCTRL_SWRST);
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+
+       /* s/w reset complete */
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg &= ~EXYNOS_CIGCTRL_SWRST;
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+
+       /* reset sequence */
+       fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+}
+
+static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+{
+       u32 camblk_cfg;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       camblk_cfg = readl(SYSREG_CAMERA_BLK);
+       camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
+       camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
+
+       writel(camblk_cfg, SYSREG_CAMERA_BLK);
+}
+
+static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
+               EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
+               EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
+               EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
+               EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
+               EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
+
+       switch (wb) {
+       case FIMC_WB_A:
+               cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
+                       EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+               break;
+       case FIMC_WB_B:
+               cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
+                       EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+               break;
+       case FIMC_WB_NONE:
+       default:
+               cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
+                       EXYNOS_CIGCTRL_SELWRITEBACK_A |
+                       EXYNOS_CIGCTRL_SELCAM_MIPI_A |
+                       EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
+               break;
+       }
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_set_polarity(struct fimc_context *ctx,
+               struct exynos_drm_ipp_pol *pol)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
+               __func__, pol->inv_pclk, pol->inv_vsync);
+       DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
+               __func__, pol->inv_href, pol->inv_hsync);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
+                EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
+
+       if (pol->inv_pclk)
+               cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
+       if (pol->inv_vsync)
+               cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
+       if (pol->inv_href)
+               cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
+       if (pol->inv_hsync)
+               cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       if (enable)
+               cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
+       else
+               cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
+               bool overflow, bool level)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+                       enable, overflow, level);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       if (enable) {
+               cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
+               cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
+               if (overflow)
+                       cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
+               if (level)
+                       cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
+       } else
+               cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_clear_irq(struct fimc_context *ctx)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static bool fimc_check_ovf(struct fimc_context *ctx)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg, status, flag;
+
+       status = fimc_read(EXYNOS_CISTATUS);
+       flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
+               EXYNOS_CISTATUS_OVFICR;
+
+       DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
+
+       if (status & flag) {
+               cfg = fimc_read(EXYNOS_CIWDOFST);
+               cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+                       EXYNOS_CIWDOFST_CLROVFICR);
+
+               fimc_write(cfg, EXYNOS_CIWDOFST);
+
+               cfg = fimc_read(EXYNOS_CIWDOFST);
+               cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+                       EXYNOS_CIWDOFST_CLROVFICR);
+
+               fimc_write(cfg, EXYNOS_CIWDOFST);
+
+               dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+                       ctx->id, status);
+               return true;
+       }
+
+       return false;
+}
+
+static bool fimc_check_frame_end(struct fimc_context *ctx)
+{
+       u32 cfg;
+
+       cfg = fimc_read(EXYNOS_CISTATUS);
+
+       DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
+
+       if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
+               return false;
+
+       cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
+       fimc_write(cfg, EXYNOS_CISTATUS);
+
+       return true;
+}
+
+static int fimc_get_buf_id(struct fimc_context *ctx)
+{
+       u32 cfg;
+       int frame_cnt, buf_id;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       cfg = fimc_read(EXYNOS_CISTATUS2);
+       frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
+
+       if (frame_cnt == 0)
+               frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
+
+       DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
+               EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+               EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+
+       if (frame_cnt == 0) {
+               DRM_ERROR("failed to get frame count.\n");
+               return -EIO;
+       }
+
+       buf_id = frame_cnt - 1;
+       DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+       return buf_id;
+}
+
+static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       cfg = fimc_read(EXYNOS_CIOCTRL);
+       if (enable)
+               cfg |= EXYNOS_CIOCTRL_LASTENDEN;
+       else
+               cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
+
+       fimc_write(cfg, EXYNOS_CIOCTRL);
+}
+
+
+static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       /* RGB */
+       cfg = fimc_read(EXYNOS_CISCCTRL);
+       cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+               cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               return 0;
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_XRGB8888:
+               cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               return 0;
+       default:
+               /* bypass */
+               break;
+       }
+
+       /* YUV */
+       cfg = fimc_read(EXYNOS_MSCTRL);
+       cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
+               EXYNOS_MSCTRL_C_INT_IN_2PLANE |
+               EXYNOS_MSCTRL_ORDER422_YCBYCR);
+
+       switch (fmt) {
+       case DRM_FORMAT_YUYV:
+               cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
+               break;
+       case DRM_FORMAT_YVYU:
+               cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
+               break;
+       case DRM_FORMAT_UYVY:
+               cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
+               break;
+       case DRM_FORMAT_VYUY:
+       case DRM_FORMAT_YUV444:
+               cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
+               break;
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV61:
+               cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
+                       EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+               break;
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
+               break;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV12MT:
+       case DRM_FORMAT_NV16:
+               cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
+                       EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg, EXYNOS_MSCTRL);
+
+       return 0;
+}
+
+static int fimc_src_set_fmt(struct device *dev, u32 fmt)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       cfg = fimc_read(EXYNOS_MSCTRL);
+       cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_XRGB8888:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
+               break;
+       case DRM_FORMAT_YUV444:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+               break;
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
+               break;
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+       case DRM_FORMAT_YUV422:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
+               break;
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV12MT:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg, EXYNOS_MSCTRL);
+
+       cfg = fimc_read(EXYNOS_CIDMAPARAM);
+       cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
+
+       if (fmt == DRM_FORMAT_NV12MT)
+               cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
+       else
+               cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+
+       fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+       return fimc_src_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_src_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg1, cfg2;
+
+       DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+               degree, flip);
+
+       cfg1 = fimc_read(EXYNOS_MSCTRL);
+       cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
+               EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+
+       cfg2 = fimc_read(EXYNOS_CITRGFMT);
+       cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_0:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_90:
+               cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+                       EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+                       EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+               cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg1, EXYNOS_MSCTRL);
+       fimc_write(cfg2, EXYNOS_CITRGFMT);
+       *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
+
+       return 0;
+}
+
+static int fimc_set_window(struct fimc_context *ctx,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       u32 cfg, h1, h2, v1, v2;
+
+       /* cropped image */
+       h1 = pos->x;
+       h2 = sz->hsize - pos->w - pos->x;
+       v1 = pos->y;
+       v2 = sz->vsize - pos->h - pos->y;
+
+       DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+       __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
+       DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
+               h1, h2, v1, v2);
+
+       /*
+        * set window offset 1, 2 size
+        * check figure 43-21 in user manual
+        */
+       cfg = fimc_read(EXYNOS_CIWDOFST);
+       cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
+               EXYNOS_CIWDOFST_WINVEROFST_MASK);
+       cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
+               EXYNOS_CIWDOFST_WINVEROFST(v1));
+       cfg |= EXYNOS_CIWDOFST_WINOFSEN;
+       fimc_write(cfg, EXYNOS_CIWDOFST);
+
+       cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
+               EXYNOS_CIWDOFST2_WINVEROFST2(v2));
+       fimc_write(cfg, EXYNOS_CIWDOFST2);
+
+       return 0;
+}
+
+static int fimc_src_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct drm_exynos_pos img_pos = *pos;
+       struct drm_exynos_sz img_sz = *sz;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+               __func__, swap, sz->hsize, sz->vsize);
+
+       /* original size */
+       cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
+               EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
+
+       fimc_write(cfg, EXYNOS_ORGISIZE);
+
+       DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
+               pos->x, pos->y, pos->w, pos->h);
+
+       if (swap) {
+               img_pos.w = pos->h;
+               img_pos.h = pos->w;
+               img_sz.hsize = sz->vsize;
+               img_sz.vsize = sz->hsize;
+       }
+
+       /* set input DMA image size */
+       cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
+       cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
+               EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
+       cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
+               EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
+       fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
+
+       /*
+        * set input FIFO image size
+        * for now, we support only ITU601 8 bit mode
+        */
+       cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
+               EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
+               EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
+       fimc_write(cfg, EXYNOS_CISRCFMT);
+
+       /* offset Y(RGB), Cb, Cr */
+       cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIIYOFF);
+       cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIICBOFF);
+       cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIICROFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIICROFF);
+
+       return fimc_set_window(ctx, &img_pos, &img_sz);
+}
+
+static int fimc_src_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+       struct drm_exynos_ipp_config *config;
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EINVAL;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+               property->prop_id, buf_id, buf_type);
+
+       if (buf_id > FIMC_MAX_SRC) {
+               dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+               return -ENOMEM;
+       }
+
+       /* address register set */
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               config = &property->config[EXYNOS_DRM_OPS_SRC];
+               fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+                       EXYNOS_CIIYSA(buf_id));
+
+               if (config->fmt == DRM_FORMAT_YVU420) {
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                               EXYNOS_CIICBSA(buf_id));
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                               EXYNOS_CIICRSA(buf_id));
+               } else {
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                               EXYNOS_CIICBSA(buf_id));
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                               EXYNOS_CIICRSA(buf_id));
+               }
+               break;
+       case IPP_BUF_DEQUEUE:
+               fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
+               fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
+               fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       return 0;
+}
+
+static struct exynos_drm_ipp_ops fimc_src_ops = {
+       .set_fmt = fimc_src_set_fmt,
+       .set_transf = fimc_src_set_transf,
+       .set_size = fimc_src_set_size,
+       .set_addr = fimc_src_set_addr,
+};
+
+static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       /* RGB */
+       cfg = fimc_read(EXYNOS_CISCCTRL);
+       cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+               cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               return 0;
+       case DRM_FORMAT_RGB888:
+               cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               return 0;
+       case DRM_FORMAT_XRGB8888:
+               cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
+                       EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       /* YUV */
+       cfg = fimc_read(EXYNOS_CIOCTRL);
+       cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
+               EXYNOS_CIOCTRL_ORDER422_MASK |
+               EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
+
+       switch (fmt) {
+       case DRM_FORMAT_XRGB8888:
+               cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
+               break;
+       case DRM_FORMAT_YUYV:
+               cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
+               break;
+       case DRM_FORMAT_YVYU:
+               cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
+               break;
+       case DRM_FORMAT_UYVY:
+               cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
+               break;
+       case DRM_FORMAT_VYUY:
+               cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
+               break;
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV61:
+               cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
+               cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+               break;
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
+               break;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV12MT:
+       case DRM_FORMAT_NV16:
+               cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
+               cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg, EXYNOS_CIOCTRL);
+
+       return 0;
+}
+
+static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       cfg = fimc_read(EXYNOS_CIEXTEN);
+
+       if (fmt == DRM_FORMAT_AYUV) {
+               cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
+               fimc_write(cfg, EXYNOS_CIEXTEN);
+       } else {
+               cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
+               fimc_write(cfg, EXYNOS_CIEXTEN);
+
+               cfg = fimc_read(EXYNOS_CITRGFMT);
+               cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
+
+               switch (fmt) {
+               case DRM_FORMAT_RGB565:
+               case DRM_FORMAT_RGB888:
+               case DRM_FORMAT_XRGB8888:
+                       cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
+                       break;
+               case DRM_FORMAT_YUYV:
+               case DRM_FORMAT_YVYU:
+               case DRM_FORMAT_UYVY:
+               case DRM_FORMAT_VYUY:
+                       cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
+                       break;
+               case DRM_FORMAT_NV16:
+               case DRM_FORMAT_NV61:
+               case DRM_FORMAT_YUV422:
+                       cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
+                       break;
+               case DRM_FORMAT_YUV420:
+               case DRM_FORMAT_YVU420:
+               case DRM_FORMAT_NV12:
+               case DRM_FORMAT_NV12MT:
+               case DRM_FORMAT_NV21:
+                       cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
+                       break;
+               default:
+                       dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
+                               fmt);
+                       return -EINVAL;
+               }
+
+               fimc_write(cfg, EXYNOS_CITRGFMT);
+       }
+
+       cfg = fimc_read(EXYNOS_CIDMAPARAM);
+       cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
+
+       if (fmt == DRM_FORMAT_NV12MT)
+               cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
+       else
+               cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+
+       fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+       return fimc_dst_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_dst_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+               degree, flip);
+
+       cfg = fimc_read(EXYNOS_CITRGFMT);
+       cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
+       cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_0:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_90:
+               cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+                       EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
+                       EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+                       EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg, EXYNOS_CITRGFMT);
+       *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
+
+       return 0;
+}
+
+static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
+{
+       DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+       if (src >= dst * 64) {
+               DRM_ERROR("failed to make ratio and shift.\n");
+               return -EINVAL;
+       } else if (src >= dst * 32) {
+               *ratio = 32;
+               *shift = 5;
+       } else if (src >= dst * 16) {
+               *ratio = 16;
+               *shift = 4;
+       } else if (src >= dst * 8) {
+               *ratio = 8;
+               *shift = 3;
+       } else if (src >= dst * 4) {
+               *ratio = 4;
+               *shift = 2;
+       } else if (src >= dst * 2) {
+               *ratio = 2;
+               *shift = 1;
+       } else {
+               *ratio = 1;
+               *shift = 0;
+       }
+
+       return 0;
+}
+
+static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
+               struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg, cfg_ext, shfactor;
+       u32 pre_dst_width, pre_dst_height;
+       u32 pre_hratio, hfactor, pre_vratio, vfactor;
+       int ret = 0;
+       u32 src_w, src_h, dst_w, dst_h;
+
+       cfg_ext = fimc_read(EXYNOS_CITRGFMT);
+       if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
+               src_w = src->h;
+               src_h = src->w;
+       } else {
+               src_w = src->w;
+               src_h = src->h;
+       }
+
+       if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
+               dst_w = dst->h;
+               dst_h = dst->w;
+       } else {
+               dst_w = dst->w;
+               dst_h = dst->h;
+       }
+
+       ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
+       if (ret) {
+               dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+               return ret;
+       }
+
+       ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
+       if (ret) {
+               dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+               return ret;
+       }
+
+       pre_dst_width = src_w / pre_hratio;
+       pre_dst_height = src_h / pre_vratio;
+       DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
+               pre_dst_width, pre_dst_height);
+       DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
+               __func__, pre_hratio, hfactor, pre_vratio, vfactor);
+
+       sc->hratio = (src_w << 14) / (dst_w << hfactor);
+       sc->vratio = (src_h << 14) / (dst_h << vfactor);
+       sc->up_h = (dst_w >= src_w) ? true : false;
+       sc->up_v = (dst_h >= src_h) ? true : false;
+       DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+       __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+
+       shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
+       DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
+
+       cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
+               EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
+               EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
+       fimc_write(cfg, EXYNOS_CISCPRERATIO);
+
+       cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
+               EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
+       fimc_write(cfg, EXYNOS_CISCPREDST);
+
+       return ret;
+}
+
+static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
+{
+       u32 cfg, cfg_ext;
+
+       DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+               __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
+       DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
+               __func__, sc->hratio, sc->vratio);
+
+       cfg = fimc_read(EXYNOS_CISCCTRL);
+       cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
+               EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
+               EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
+               EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
+               EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+               EXYNOS_CISCCTRL_CSCY2R_WIDE);
+
+       if (sc->range)
+               cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+                       EXYNOS_CISCCTRL_CSCY2R_WIDE);
+       if (sc->bypass)
+               cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
+       if (sc->up_h)
+               cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
+       if (sc->up_v)
+               cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
+
+       cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
+               EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
+       fimc_write(cfg, EXYNOS_CISCCTRL);
+
+       cfg_ext = fimc_read(EXYNOS_CIEXTEN);
+       cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
+       cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
+       cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
+               EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
+       fimc_write(cfg_ext, EXYNOS_CIEXTEN);
+}
+
+static int fimc_dst_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct drm_exynos_pos img_pos = *pos;
+       struct drm_exynos_sz img_sz = *sz;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+               __func__, swap, sz->hsize, sz->vsize);
+
+       /* original size */
+       cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
+               EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
+
+       fimc_write(cfg, EXYNOS_ORGOSIZE);
+
+       DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
+               __func__, pos->x, pos->y, pos->w, pos->h);
+
+       /* CSC ITU */
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
+
+       if (sz->hsize >= FIMC_WIDTH_ITU_709)
+               cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
+       else
+               cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+
+       if (swap) {
+               img_pos.w = pos->h;
+               img_pos.h = pos->w;
+               img_sz.hsize = sz->vsize;
+               img_sz.vsize = sz->hsize;
+       }
+
+       /* target image size */
+       cfg = fimc_read(EXYNOS_CITRGFMT);
+       cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
+               EXYNOS_CITRGFMT_TARGETV_MASK);
+       cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
+               EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
+       fimc_write(cfg, EXYNOS_CITRGFMT);
+
+       /* target area */
+       cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
+       fimc_write(cfg, EXYNOS_CITAREA);
+
+       /* offset Y(RGB), Cb, Cr */
+       cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIOYOFF);
+       cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIOCBOFF);
+       cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIOCROFF);
+
+       return 0;
+}
+
+static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
+{
+       u32 cfg, i, buf_num = 0;
+       u32 mask = 0x00000001;
+
+       cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+       for (i = 0; i < FIMC_REG_SZ; i++)
+               if (cfg & (mask << i))
+                       buf_num++;
+
+       DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+       return buf_num;
+}
+
+static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       bool enable;
+       u32 cfg;
+       u32 mask = 0x00000001 << buf_id;
+       int ret = 0;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+               buf_id, buf_type);
+
+       mutex_lock(&ctx->lock);
+
+       /* mask register set */
+       cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               enable = true;
+               break;
+       case IPP_BUF_DEQUEUE:
+               enable = false;
+               break;
+       default:
+               dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+               ret =  -EINVAL;
+               goto err_unlock;
+       }
+
+       /* sequence id */
+       cfg &= (~mask);
+       cfg |= (enable << buf_id);
+       fimc_write(cfg, EXYNOS_CIFCNTSEQ);
+
+       /* interrupt enable */
+       if (buf_type == IPP_BUF_ENQUEUE &&
+           fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
+               fimc_handle_irq(ctx, true, false, true);
+
+       /* interrupt disable */
+       if (buf_type == IPP_BUF_DEQUEUE &&
+           fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
+               fimc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+       mutex_unlock(&ctx->lock);
+       return ret;
+}
+
+static int fimc_dst_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+       struct drm_exynos_ipp_config *config;
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EINVAL;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+               property->prop_id, buf_id, buf_type);
+
+       if (buf_id > FIMC_MAX_DST) {
+               dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+               return -ENOMEM;
+       }
+
+       /* address register set */
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               config = &property->config[EXYNOS_DRM_OPS_DST];
+
+               fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+                       EXYNOS_CIOYSA(buf_id));
+
+               if (config->fmt == DRM_FORMAT_YVU420) {
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                               EXYNOS_CIOCBSA(buf_id));
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                               EXYNOS_CIOCRSA(buf_id));
+               } else {
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                               EXYNOS_CIOCBSA(buf_id));
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                               EXYNOS_CIOCRSA(buf_id));
+               }
+               break;
+       case IPP_BUF_DEQUEUE:
+               fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
+               fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
+               fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops fimc_dst_ops = {
+       .set_fmt = fimc_dst_set_fmt,
+       .set_transf = fimc_dst_set_transf,
+       .set_size = fimc_dst_set_size,
+       .set_addr = fimc_dst_set_addr,
+};
+
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       if (enable) {
+               clk_enable(ctx->sclk_fimc_clk);
+               clk_enable(ctx->fimc_clk);
+               clk_enable(ctx->wb_clk);
+               ctx->suspended = false;
+       } else {
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_disable(ctx->fimc_clk);
+               clk_disable(ctx->wb_clk);
+               ctx->suspended = true;
+       }
+
+       return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
+{
+       struct fimc_context *ctx = dev_id;
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_event_work *event_work =
+               c_node->event_work;
+       int buf_id;
+
+       DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
+
+       fimc_clear_irq(ctx);
+       if (fimc_check_ovf(ctx))
+               return IRQ_NONE;
+
+       if (!fimc_check_frame_end(ctx))
+               return IRQ_NONE;
+
+       buf_id = fimc_get_buf_id(ctx);
+       if (buf_id < 0)
+               return IRQ_HANDLED;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+       if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
+               DRM_ERROR("failed to dequeue.\n");
+               return IRQ_HANDLED;
+       }
+
+       event_work->ippdrv = ippdrv;
+       event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+       queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
+
+       return IRQ_HANDLED;
+}
+
+static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+       struct drm_exynos_ipp_prop_list *prop_list;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+       if (!prop_list) {
+               DRM_ERROR("failed to alloc property list.\n");
+               return -ENOMEM;
+       }
+
+       prop_list->version = 1;
+       prop_list->writeback = 1;
+       prop_list->refresh_min = FIMC_REFRESH_MIN;
+       prop_list->refresh_max = FIMC_REFRESH_MAX;
+       prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
+                               (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+                               (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+       prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+                               (1 << EXYNOS_DRM_DEGREE_90) |
+                               (1 << EXYNOS_DRM_DEGREE_180) |
+                               (1 << EXYNOS_DRM_DEGREE_270);
+       prop_list->csc = 1;
+       prop_list->crop = 1;
+       prop_list->crop_max.hsize = FIMC_CROP_MAX;
+       prop_list->crop_max.vsize = FIMC_CROP_MAX;
+       prop_list->crop_min.hsize = FIMC_CROP_MIN;
+       prop_list->crop_min.vsize = FIMC_CROP_MIN;
+       prop_list->scale = 1;
+       prop_list->scale_max.hsize = FIMC_SCALE_MAX;
+       prop_list->scale_max.vsize = FIMC_SCALE_MAX;
+       prop_list->scale_min.hsize = FIMC_SCALE_MIN;
+       prop_list->scale_min.vsize = FIMC_SCALE_MIN;
+
+       ippdrv->prop_list = prop_list;
+
+       return 0;
+}
+
+static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
+{
+       switch (flip) {
+       case EXYNOS_DRM_FLIP_NONE:
+       case EXYNOS_DRM_FLIP_VERTICAL:
+       case EXYNOS_DRM_FLIP_HORIZONTAL:
+               return true;
+       default:
+               DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+               return false;
+       }
+}
+
+static int fimc_ippdrv_check_property(struct device *dev,
+               struct drm_exynos_ipp_property *property)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+       struct drm_exynos_ipp_config *config;
+       struct drm_exynos_pos *pos;
+       struct drm_exynos_sz *sz;
+       bool swap;
+       int i;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       for_each_ipp_ops(i) {
+               if ((i == EXYNOS_DRM_OPS_SRC) &&
+                       (property->cmd == IPP_CMD_WB))
+                       continue;
+
+               config = &property->config[i];
+               pos = &config->pos;
+               sz = &config->sz;
+
+               /* check for flip */
+               if (!fimc_check_drm_flip(config->flip)) {
+                       DRM_ERROR("invalid flip.\n");
+                       goto err_property;
+               }
+
+               /* check for degree */
+               switch (config->degree) {
+               case EXYNOS_DRM_DEGREE_90:
+               case EXYNOS_DRM_DEGREE_270:
+                       swap = true;
+                       break;
+               case EXYNOS_DRM_DEGREE_0:
+               case EXYNOS_DRM_DEGREE_180:
+                       swap = false;
+                       break;
+               default:
+                       DRM_ERROR("invalid degree.\n");
+                       goto err_property;
+               }
+
+               /* check for buffer bound */
+               if ((pos->x + pos->w > sz->hsize) ||
+                       (pos->y + pos->h > sz->vsize)) {
+                       DRM_ERROR("out of buf bound.\n");
+                       goto err_property;
+               }
+
+               /* check for crop */
+               if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+                       if (swap) {
+                               if ((pos->h < pp->crop_min.hsize) ||
+                                       (sz->vsize > pp->crop_max.hsize) ||
+                                       (pos->w < pp->crop_min.vsize) ||
+                                       (sz->hsize > pp->crop_max.vsize)) {
+                                       DRM_ERROR("out of crop size.\n");
+                                       goto err_property;
+                               }
+                       } else {
+                               if ((pos->w < pp->crop_min.hsize) ||
+                                       (sz->hsize > pp->crop_max.hsize) ||
+                                       (pos->h < pp->crop_min.vsize) ||
+                                       (sz->vsize > pp->crop_max.vsize)) {
+                                       DRM_ERROR("out of crop size.\n");
+                                       goto err_property;
+                               }
+                       }
+               }
+
+               /* check for scale */
+               if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+                       if (swap) {
+                               if ((pos->h < pp->scale_min.hsize) ||
+                                       (sz->vsize > pp->scale_max.hsize) ||
+                                       (pos->w < pp->scale_min.vsize) ||
+                                       (sz->hsize > pp->scale_max.vsize)) {
+                                       DRM_ERROR("out of scale size.\n");
+                                       goto err_property;
+                               }
+                       } else {
+                               if ((pos->w < pp->scale_min.hsize) ||
+                                       (sz->hsize > pp->scale_max.hsize) ||
+                                       (pos->h < pp->scale_min.vsize) ||
+                                       (sz->vsize > pp->scale_max.vsize)) {
+                                       DRM_ERROR("out of scale size.\n");
+                                       goto err_property;
+                               }
+                       }
+               }
+       }
+
+       return 0;
+
+err_property:
+       for_each_ipp_ops(i) {
+               if ((i == EXYNOS_DRM_OPS_SRC) &&
+                       (property->cmd == IPP_CMD_WB))
+                       continue;
+
+               config = &property->config[i];
+               pos = &config->pos;
+               sz = &config->sz;
+
+               DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+                       i ? "dst" : "src", config->flip, config->degree,
+                       pos->x, pos->y, pos->w, pos->h,
+                       sz->hsize, sz->vsize);
+       }
+
+       return -EINVAL;
+}
+
+static void fimc_clear_addr(struct fimc_context *ctx)
+{
+       int i;
+
+       DRM_DEBUG_KMS("%s:\n", __func__);
+
+       for (i = 0; i < FIMC_MAX_SRC; i++) {
+               fimc_write(0, EXYNOS_CIIYSA(i));
+               fimc_write(0, EXYNOS_CIICBSA(i));
+               fimc_write(0, EXYNOS_CIICRSA(i));
+       }
+
+       for (i = 0; i < FIMC_MAX_DST; i++) {
+               fimc_write(0, EXYNOS_CIOYSA(i));
+               fimc_write(0, EXYNOS_CIOCBSA(i));
+               fimc_write(0, EXYNOS_CIOCRSA(i));
+       }
+}
+
+static int fimc_ippdrv_reset(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* reset h/w block */
+       fimc_sw_reset(ctx, false);
+
+       /* reset scaler capability */
+       memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+
+       fimc_clear_addr(ctx);
+
+       return 0;
+}
+
+static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+       struct drm_exynos_ipp_config *config;
+       struct drm_exynos_pos   img_pos[EXYNOS_DRM_OPS_MAX];
+       struct drm_exynos_ipp_set_wb set_wb;
+       int ret, i;
+       u32 cfg0, cfg1;
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EINVAL;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       fimc_handle_irq(ctx, true, false, true);
+
+       for_each_ipp_ops(i) {
+               config = &property->config[i];
+               img_pos[i] = config->pos;
+       }
+
+       ret = fimc_set_prescaler(ctx, &ctx->sc,
+               &img_pos[EXYNOS_DRM_OPS_SRC],
+               &img_pos[EXYNOS_DRM_OPS_DST]);
+       if (ret) {
+               dev_err(dev, "failed to set precalser.\n");
+               return ret;
+       }
+
+       /* If set ture, we can save jpeg about screen */
+       fimc_handle_jpeg(ctx, false);
+       fimc_set_scaler(ctx, &ctx->sc);
+       fimc_set_polarity(ctx, &ctx->pol);
+
+       switch (cmd) {
+       case IPP_CMD_M2M:
+               fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
+               fimc_handle_lastend(ctx, false);
+
+               /* setup dma */
+               cfg0 = fimc_read(EXYNOS_MSCTRL);
+               cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
+               cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
+               fimc_write(cfg0, EXYNOS_MSCTRL);
+               break;
+       case IPP_CMD_WB:
+               fimc_set_type_ctrl(ctx, FIMC_WB_A);
+               fimc_handle_lastend(ctx, true);
+
+               /* setup FIMD */
+               fimc_set_camblk_fimd0_wb(ctx);
+
+               set_wb.enable = 1;
+               set_wb.refresh = property->refresh_rate;
+               exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+               break;
+       case IPP_CMD_OUTPUT:
+       default:
+               ret = -EINVAL;
+               dev_err(dev, "invalid operations.\n");
+               return ret;
+       }
+
+       /* Reset status */
+       fimc_write(0x0, EXYNOS_CISTATUS);
+
+       cfg0 = fimc_read(EXYNOS_CIIMGCPT);
+       cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+       cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+
+       /* Scaler */
+       cfg1 = fimc_read(EXYNOS_CISCCTRL);
+       cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
+       cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
+               EXYNOS_CISCCTRL_SCALERSTART);
+
+       fimc_write(cfg1, EXYNOS_CISCCTRL);
+
+       /* Enable image capture*/
+       cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
+       fimc_write(cfg0, EXYNOS_CIIMGCPT);
+
+       /* Disable frame end irq */
+       cfg0 = fimc_read(EXYNOS_CIGCTRL);
+       cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+       fimc_write(cfg0, EXYNOS_CIGCTRL);
+
+       cfg0 = fimc_read(EXYNOS_CIOCTRL);
+       cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
+       fimc_write(cfg0, EXYNOS_CIOCTRL);
+
+       if (cmd == IPP_CMD_M2M) {
+               cfg0 = fimc_read(EXYNOS_MSCTRL);
+               cfg0 |= EXYNOS_MSCTRL_ENVID;
+               fimc_write(cfg0, EXYNOS_MSCTRL);
+
+               cfg0 = fimc_read(EXYNOS_MSCTRL);
+               cfg0 |= EXYNOS_MSCTRL_ENVID;
+               fimc_write(cfg0, EXYNOS_MSCTRL);
+       }
+
+       return 0;
+}
+
+static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+       switch (cmd) {
+       case IPP_CMD_M2M:
+               /* Source clear */
+               cfg = fimc_read(EXYNOS_MSCTRL);
+               cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
+               cfg &= ~EXYNOS_MSCTRL_ENVID;
+               fimc_write(cfg, EXYNOS_MSCTRL);
+               break;
+       case IPP_CMD_WB:
+               exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+               break;
+       case IPP_CMD_OUTPUT:
+       default:
+               dev_err(dev, "invalid operations.\n");
+               break;
+       }
+
+       fimc_handle_irq(ctx, false, false, true);
+
+       /* reset sequence */
+       fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+
+       /* Scaler disable */
+       cfg = fimc_read(EXYNOS_CISCCTRL);
+       cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
+       fimc_write(cfg, EXYNOS_CISCCTRL);
+
+       /* Disable image capture */
+       cfg = fimc_read(EXYNOS_CIIMGCPT);
+       cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+       fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+       /* Enable frame end irq */
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static int __devinit fimc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct fimc_context *ctx;
+       struct clk      *parent_clk;
+       struct resource *res;
+       struct exynos_drm_ippdrv *ippdrv;
+       struct exynos_drm_fimc_pdata *pdata;
+       struct fimc_driverdata *ddata;
+       int ret;
+
+       pdata = pdev->dev.platform_data;
+       if (!pdata) {
+               dev_err(dev, "no platform data specified.\n");
+               return -EINVAL;
+       }
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       ddata = (struct fimc_driverdata *)
+               platform_get_device_id(pdev)->driver_data;
+
+       /* clock control */
+       ctx->sclk_fimc_clk = clk_get(dev, "sclk_fimc");
+       if (IS_ERR(ctx->sclk_fimc_clk)) {
+               dev_err(dev, "failed to get src fimc clock.\n");
+               ret = PTR_ERR(ctx->sclk_fimc_clk);
+               goto err_ctx;
+       }
+       clk_enable(ctx->sclk_fimc_clk);
+
+       ctx->fimc_clk = clk_get(dev, "fimc");
+       if (IS_ERR(ctx->fimc_clk)) {
+               dev_err(dev, "failed to get fimc clock.\n");
+               ret = PTR_ERR(ctx->fimc_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               goto err_ctx;
+       }
+
+       ctx->wb_clk = clk_get(dev, "pxl_async0");
+       if (IS_ERR(ctx->wb_clk)) {
+               dev_err(dev, "failed to get writeback a clock.\n");
+               ret = PTR_ERR(ctx->wb_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               clk_put(ctx->fimc_clk);
+               goto err_ctx;
+       }
+
+       ctx->wb_b_clk = clk_get(dev, "pxl_async1");
+       if (IS_ERR(ctx->wb_b_clk)) {
+               dev_err(dev, "failed to get writeback b clock.\n");
+               ret = PTR_ERR(ctx->wb_b_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               clk_put(ctx->fimc_clk);
+               clk_put(ctx->wb_clk);
+               goto err_ctx;
+       }
+
+       parent_clk = clk_get(dev, ddata->parent_clk);
+
+       if (IS_ERR(parent_clk)) {
+               dev_err(dev, "failed to get parent clock.\n");
+               ret = PTR_ERR(parent_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               clk_put(ctx->fimc_clk);
+               clk_put(ctx->wb_clk);
+               clk_put(ctx->wb_b_clk);
+               goto err_ctx;
+       }
+
+       if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
+               dev_err(dev, "failed to set parent.\n");
+               ret = -EINVAL;
+               clk_put(parent_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               clk_put(ctx->fimc_clk);
+               clk_put(ctx->wb_clk);
+               clk_put(ctx->wb_b_clk);
+               goto err_ctx;
+       }
+
+       clk_put(parent_clk);
+       clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
+
+       /* resource memory */
+       ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!ctx->regs_res) {
+               dev_err(dev, "failed to find registers.\n");
+               ret = -ENOENT;
+               goto err_clk;
+       }
+
+       ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+       if (!ctx->regs) {
+               dev_err(dev, "failed to map registers.\n");
+               ret = -ENXIO;
+               goto err_clk;
+       }
+
+       /* resource irq */
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!res) {
+               dev_err(dev, "failed to request irq resource.\n");
+               ret = -ENOENT;
+               goto err_get_regs;
+       }
+
+       ctx->irq = res->start;
+       ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
+               IRQF_ONESHOT, "drm_fimc", ctx);
+       if (ret < 0) {
+               dev_err(dev, "failed to request irq.\n");
+               goto err_get_regs;
+       }
+
+       /* context initailization */
+       ctx->id = pdev->id;
+       ctx->pol = pdata->pol;
+       ctx->ddata = ddata;
+
+       ippdrv = &ctx->ippdrv;
+       ippdrv->dev = dev;
+       ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
+       ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
+       ippdrv->check_property = fimc_ippdrv_check_property;
+       ippdrv->reset = fimc_ippdrv_reset;
+       ippdrv->start = fimc_ippdrv_start;
+       ippdrv->stop = fimc_ippdrv_stop;
+       ret = fimc_init_prop_list(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to init property list.\n");
+               goto err_get_irq;
+       }
+
+       DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+               (int)ippdrv);
+
+       mutex_init(&ctx->lock);
+       platform_set_drvdata(pdev, ctx);
+
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       ret = exynos_drm_ippdrv_register(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to register drm fimc device.\n");
+               goto err_ippdrv_register;
+       }
+
+       dev_info(&pdev->dev, "drm fimc registered successfully.\n");
+
+       return 0;
+
+err_ippdrv_register:
+       devm_kfree(dev, ippdrv->prop_list);
+       pm_runtime_disable(dev);
+err_get_irq:
+       free_irq(ctx->irq, ctx);
+err_get_regs:
+       devm_iounmap(dev, ctx->regs);
+err_clk:
+       clk_put(ctx->sclk_fimc_clk);
+       clk_put(ctx->fimc_clk);
+       clk_put(ctx->wb_clk);
+       clk_put(ctx->wb_b_clk);
+err_ctx:
+       devm_kfree(dev, ctx);
+       return ret;
+}
+
+static int __devexit fimc_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+       devm_kfree(dev, ippdrv->prop_list);
+       exynos_drm_ippdrv_unregister(ippdrv);
+       mutex_destroy(&ctx->lock);
+
+       pm_runtime_set_suspended(dev);
+       pm_runtime_disable(dev);
+
+       free_irq(ctx->irq, ctx);
+       devm_iounmap(dev, ctx->regs);
+
+       clk_put(ctx->sclk_fimc_clk);
+       clk_put(ctx->fimc_clk);
+       clk_put(ctx->wb_clk);
+       clk_put(ctx->wb_b_clk);
+
+       devm_kfree(dev, ctx);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_suspend(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_resume(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       if (!pm_runtime_suspended(dev))
+               return fimc_clk_ctrl(ctx, true);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_runtime_suspend(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       return  fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_runtime_resume(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       return  fimc_clk_ctrl(ctx, true);
+}
+#endif
+
+static struct fimc_driverdata exynos4210_fimc_data = {
+       .parent_clk = "mout_mpll",
+};
+
+static struct fimc_driverdata exynos4410_fimc_data = {
+       .parent_clk = "mout_mpll_user",
+};
+
+static struct platform_device_id fimc_driver_ids[] = {
+       {
+               .name           = "exynos4210-fimc",
+               .driver_data    = (unsigned long)&exynos4210_fimc_data,
+       }, {
+               .name           = "exynos4412-fimc",
+               .driver_data    = (unsigned long)&exynos4410_fimc_data,
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+
+static const struct dev_pm_ops fimc_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+       SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+struct platform_driver fimc_driver = {
+       .probe          = fimc_probe,
+       .remove         = __devexit_p(fimc_remove),
+       .id_table       = fimc_driver_ids,
+       .driver         = {
+               .name   = "exynos-drm-fimc",
+               .owner  = THIS_MODULE,
+               .pm     = &fimc_pm_ops,
+       },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644 (file)
index 0000000..dc970fa
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_FIMC_H_
+#define _EXYNOS_DRM_FIMC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_FIMC_H_ */
index 00bd266a31bbd0813434d7f548f68c57add9e262..bf0d9baca2bc2436c077cf62391da74fe2288e83 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 
 #include <video/samsung_fimd.h>
@@ -79,10 +80,10 @@ struct fimd_win_data {
        unsigned int            fb_height;
        unsigned int            bpp;
        dma_addr_t              dma_addr;
-       void __iomem            *vaddr;
        unsigned int            buf_offsize;
        unsigned int            line_size;      /* bytes */
        bool                    enabled;
+       bool                    resume;
 };
 
 struct fimd_context {
@@ -100,13 +101,34 @@ struct fimd_context {
        u32                             vidcon1;
        bool                            suspended;
        struct mutex                    lock;
+       wait_queue_head_t               wait_vsync_queue;
+       atomic_t                        wait_vsync_event;
 
        struct exynos_drm_panel_info *panel;
 };
 
+#ifdef CONFIG_OF
+static const struct of_device_id fimd_driver_dt_match[] = {
+       { .compatible = "samsung,exynos4-fimd",
+         .data = &exynos4_fimd_driver_data },
+       { .compatible = "samsung,exynos5-fimd",
+         .data = &exynos5_fimd_driver_data },
+       {},
+};
+MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
+#endif
+
 static inline struct fimd_driver_data *drm_fimd_get_driver_data(
        struct platform_device *pdev)
 {
+#ifdef CONFIG_OF
+       const struct of_device_id *of_id =
+                       of_match_device(fimd_driver_dt_match, &pdev->dev);
+
+       if (of_id)
+               return (struct fimd_driver_data *)of_id->data;
+#endif
+
        return (struct fimd_driver_data *)
                platform_get_device_id(pdev)->driver_data;
 }
@@ -241,7 +263,9 @@ static void fimd_commit(struct device *dev)
 
        /* setup horizontal and vertical display size. */
        val = VIDTCON2_LINEVAL(timing->yres - 1) |
-              VIDTCON2_HOZVAL(timing->xres - 1);
+              VIDTCON2_HOZVAL(timing->xres - 1) |
+              VIDTCON2_LINEVAL_E(timing->yres - 1) |
+              VIDTCON2_HOZVAL_E(timing->xres - 1);
        writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
 
        /* setup clock source, clock divider, enable dma. */
@@ -308,12 +332,32 @@ static void fimd_disable_vblank(struct device *dev)
        }
 }
 
+static void fimd_wait_for_vblank(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+
+       if (ctx->suspended)
+               return;
+
+       atomic_set(&ctx->wait_vsync_event, 1);
+
+       /*
+        * wait for FIMD to signal VSYNC interrupt or return after
+        * timeout which is set to 50ms (refresh rate of 20).
+        */
+       if (!wait_event_timeout(ctx->wait_vsync_queue,
+                               !atomic_read(&ctx->wait_vsync_event),
+                               DRM_HZ/20))
+               DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
 static struct exynos_drm_manager_ops fimd_manager_ops = {
        .dpms = fimd_dpms,
        .apply = fimd_apply,
        .commit = fimd_commit,
        .enable_vblank = fimd_enable_vblank,
        .disable_vblank = fimd_disable_vblank,
+       .wait_for_vblank = fimd_wait_for_vblank,
 };
 
 static void fimd_win_mode_set(struct device *dev,
@@ -352,7 +396,6 @@ static void fimd_win_mode_set(struct device *dev,
        win_data->fb_width = overlay->fb_width;
        win_data->fb_height = overlay->fb_height;
        win_data->dma_addr = overlay->dma_addr[0] + offset;
-       win_data->vaddr = overlay->vaddr[0] + offset;
        win_data->bpp = overlay->bpp;
        win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
                                (overlay->bpp >> 3);
@@ -362,9 +405,7 @@ static void fimd_win_mode_set(struct device *dev,
                        win_data->offset_x, win_data->offset_y);
        DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
                        win_data->ovl_width, win_data->ovl_height);
-       DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
-                       (unsigned long)win_data->dma_addr,
-                       (unsigned long)win_data->vaddr);
+       DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
        DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
                        overlay->fb_width, overlay->crtc_width);
 }
@@ -452,6 +493,8 @@ static void fimd_win_commit(struct device *dev, int zpos)
        struct fimd_win_data *win_data;
        int win = zpos;
        unsigned long val, alpha, size;
+       unsigned int last_x;
+       unsigned int last_y;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -497,24 +540,32 @@ static void fimd_win_commit(struct device *dev, int zpos)
 
        /* buffer size */
        val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
-               VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size);
+               VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
+               VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
+               VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
        writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
 
        /* OSD position */
        val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
-               VIDOSDxA_TOPLEFT_Y(win_data->offset_y);
+               VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
+               VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
+               VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
        writel(val, ctx->regs + VIDOSD_A(win));
 
-       val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x +
-                                       win_data->ovl_width - 1) |
-               VIDOSDxB_BOTRIGHT_Y(win_data->offset_y +
-                                       win_data->ovl_height - 1);
+       last_x = win_data->offset_x + win_data->ovl_width;
+       if (last_x)
+               last_x--;
+       last_y = win_data->offset_y + win_data->ovl_height;
+       if (last_y)
+               last_y--;
+
+       val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
+               VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
+
        writel(val, ctx->regs + VIDOSD_B(win));
 
        DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
-                       win_data->offset_x, win_data->offset_y,
-                       win_data->offset_x + win_data->ovl_width - 1,
-                       win_data->offset_y + win_data->ovl_height - 1);
+                       win_data->offset_x, win_data->offset_y, last_x, last_y);
 
        /* hardware window 0 doesn't support alpha channel. */
        if (win != 0) {
@@ -574,6 +625,12 @@ static void fimd_win_disable(struct device *dev, int zpos)
 
        win_data = &ctx->win_data[win];
 
+       if (ctx->suspended) {
+               /* do not resume this window*/
+               win_data->resume = false;
+               return;
+       }
+
        /* protect windows */
        val = readl(ctx->regs + SHADOWCON);
        val |= SHADOWCON_WINx_PROTECT(win);
@@ -593,22 +650,10 @@ static void fimd_win_disable(struct device *dev, int zpos)
        win_data->enabled = false;
 }
 
-static void fimd_wait_for_vblank(struct device *dev)
-{
-       struct fimd_context *ctx = get_fimd_context(dev);
-       int ret;
-
-       ret = wait_for((__raw_readl(ctx->regs + VIDCON1) &
-                                       VIDCON1_VSTATUS_VSYNC), 50);
-       if (ret < 0)
-               DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
 static struct exynos_drm_overlay_ops fimd_overlay_ops = {
        .mode_set = fimd_win_mode_set,
        .commit = fimd_win_commit,
        .disable = fimd_win_disable,
-       .wait_for_vblank = fimd_wait_for_vblank,
 };
 
 static struct exynos_drm_manager fimd_manager = {
@@ -667,6 +712,11 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
        drm_handle_vblank(drm_dev, manager->pipe);
        fimd_finish_pageflip(drm_dev, manager->pipe);
 
+       /* set wait vsync event to zero and wake up queue. */
+       if (atomic_read(&ctx->wait_vsync_event)) {
+               atomic_set(&ctx->wait_vsync_event, 0);
+               DRM_WAKEUP(&ctx->wait_vsync_queue);
+       }
 out:
        return IRQ_HANDLED;
 }
@@ -794,11 +844,38 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
        return 0;
 }
 
+static void fimd_window_suspend(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+       struct fimd_win_data *win_data;
+       int i;
+
+       for (i = 0; i < WINDOWS_NR; i++) {
+               win_data = &ctx->win_data[i];
+               win_data->resume = win_data->enabled;
+               fimd_win_disable(dev, i);
+       }
+       fimd_wait_for_vblank(dev);
+}
+
+static void fimd_window_resume(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+       struct fimd_win_data *win_data;
+       int i;
+
+       for (i = 0; i < WINDOWS_NR; i++) {
+               win_data = &ctx->win_data[i];
+               win_data->enabled = win_data->resume;
+               win_data->resume = false;
+       }
+}
+
 static int fimd_activate(struct fimd_context *ctx, bool enable)
 {
+       struct device *dev = ctx->subdrv.dev;
        if (enable) {
                int ret;
-               struct device *dev = ctx->subdrv.dev;
 
                ret = fimd_clock(ctx, true);
                if (ret < 0)
@@ -809,7 +886,11 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
                /* if vblank was enabled status, enable it again. */
                if (test_and_clear_bit(0, &ctx->irq_flags))
                        fimd_enable_vblank(dev);
+
+               fimd_window_resume(dev);
        } else {
+               fimd_window_suspend(dev);
+
                fimd_clock(ctx, false);
                ctx->suspended = true;
        }
@@ -885,6 +966,8 @@ static int __devinit fimd_probe(struct platform_device *pdev)
        ctx->vidcon1 = pdata->vidcon1;
        ctx->default_win = pdata->default_win;
        ctx->panel = panel;
+       DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+       atomic_set(&ctx->wait_vsync_event, 0);
 
        subdrv = &ctx->subdrv;
 
@@ -1028,5 +1111,6 @@ struct platform_driver fimd_driver = {
                .name   = "exynos4-fb",
                .owner  = THIS_MODULE,
                .pm     = &fimd_pm_ops,
+               .of_match_table = of_match_ptr(fimd_driver_dt_match),
        },
 };
index 99227246ce82b9e0471ee0d2669bd3508d43b2bb..d48183e7e056d56a8932ae06baf1bc172ce02893 100644 (file)
@@ -400,7 +400,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
        if (vm_size > buffer->size)
                return -EINVAL;
 
-       ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->kvaddr,
+       ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
                                buffer->dma_addr, buffer->size,
                                &buffer->dma_attrs);
        if (ret < 0) {
index d3ea106a9a77a2bbb9f034eb6dc55d948faac1c3..f11f2afd5bfc58254eb0b064535ae4b8131b91c3 100644 (file)
@@ -40,6 +40,7 @@
  *     - this address could be physical address without IOMMU and
  *     device address with IOMMU.
  * @write: whether pages will be written to by the caller.
+ * @pages: Array of backing pages.
  * @sgt: sg table to transfer page data.
  * @size: size of allocated memory region.
  * @pfnmap: indicate whether memory region from userptr is mmaped with
@@ -51,6 +52,7 @@ struct exynos_drm_gem_buf {
        dma_addr_t              dma_addr;
        struct dma_attrs        dma_attrs;
        unsigned int            write;
+       struct page             **pages;
        struct sg_table         *sgt;
        unsigned long           size;
        bool                    pfnmap;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644 (file)
index 0000000..5639353
--- /dev/null
@@ -0,0 +1,1870 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-gsc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_gsc.h"
+
+/*
+ * GSC is stand for General SCaler and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * GSC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> GSC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> GSC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> GSC H/W ----> FIMD, Mixer.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define GSC_MAX_DEVS   4
+#define GSC_MAX_SRC            4
+#define GSC_MAX_DST            16
+#define GSC_RESET_TIMEOUT      50
+#define GSC_BUF_STOP   1
+#define GSC_BUF_START  2
+#define GSC_REG_SZ             16
+#define GSC_WIDTH_ITU_709      1280
+#define GSC_SC_UP_MAX_RATIO            65536
+#define GSC_SC_DOWN_RATIO_7_8          74898
+#define GSC_SC_DOWN_RATIO_6_8          87381
+#define GSC_SC_DOWN_RATIO_5_8          104857
+#define GSC_SC_DOWN_RATIO_4_8          131072
+#define GSC_SC_DOWN_RATIO_3_8          174762
+#define GSC_SC_DOWN_RATIO_2_8          262144
+#define GSC_REFRESH_MIN        12
+#define GSC_REFRESH_MAX        60
+#define GSC_CROP_MAX   8192
+#define GSC_CROP_MIN   32
+#define GSC_SCALE_MAX  4224
+#define GSC_SCALE_MIN  32
+#define GSC_COEF_RATIO 7
+#define GSC_COEF_PHASE 9
+#define GSC_COEF_ATTR  16
+#define GSC_COEF_H_8T  8
+#define GSC_COEF_V_4T  4
+#define GSC_COEF_DEPTH 3
+
+#define get_gsc_context(dev)   platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)    container_of(ippdrv,\
+                                       struct gsc_context, ippdrv);
+#define gsc_read(offset)               readl(ctx->regs + (offset))
+#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @pre_shfactor: pre sclaer shift factor.
+ * @pre_hratio: horizontal ratio of the prescaler.
+ * @pre_vratio: vertical ratio of the prescaler.
+ * @main_hratio: the main scaler's horizontal ratio.
+ * @main_vratio: the main scaler's vertical ratio.
+ */
+struct gsc_scaler {
+       bool    range;
+       u32     pre_shfactor;
+       u32     pre_hratio;
+       u32     pre_vratio;
+       unsigned long main_hratio;
+       unsigned long main_vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual 49.2 features.
+ * @tile_w: tile mode or rotation width.
+ * @tile_h: tile mode or rotation height.
+ * @w: other cases width.
+ * @h: other cases height.
+ */
+struct gsc_capability {
+       /* tile or rotation */
+       u32     tile_w;
+       u32     tile_h;
+       /* other cases */
+       u32     w;
+       u32     h;
+};
+
+/*
+ * A structure of gsc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @gsc_clk: gsc gate clock.
+ * @sc: scaler infomations.
+ * @id: gsc id.
+ * @irq: irq number.
+ * @rotation: supports rotation of src.
+ * @suspended: qos operations.
+ */
+struct gsc_context {
+       struct exynos_drm_ippdrv        ippdrv;
+       struct resource *regs_res;
+       void __iomem    *regs;
+       struct mutex    lock;
+       struct clk      *gsc_clk;
+       struct gsc_scaler       sc;
+       int     id;
+       int     irq;
+       bool    rotation;
+       bool    suspended;
+};
+
+/* 8-tap Filter Coefficient */
+static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
+       {       /* Ratio <= 65536 (~8:8) */
+               {  0,  0,   0, 128,   0,   0,  0,  0 },
+               { -1,  2,  -6, 127,   7,  -2,  1,  0 },
+               { -1,  4, -12, 125,  16,  -5,  1,  0 },
+               { -1,  5, -15, 120,  25,  -8,  2,  0 },
+               { -1,  6, -18, 114,  35, -10,  3, -1 },
+               { -1,  6, -20, 107,  46, -13,  4, -1 },
+               { -2,  7, -21,  99,  57, -16,  5, -1 },
+               { -1,  6, -20,  89,  68, -18,  5, -1 },
+               { -1,  6, -20,  79,  79, -20,  6, -1 },
+               { -1,  5, -18,  68,  89, -20,  6, -1 },
+               { -1,  5, -16,  57,  99, -21,  7, -2 },
+               { -1,  4, -13,  46, 107, -20,  6, -1 },
+               { -1,  3, -10,  35, 114, -18,  6, -1 },
+               {  0,  2,  -8,  25, 120, -15,  5, -1 },
+               {  0,  1,  -5,  16, 125, -12,  4, -1 },
+               {  0,  1,  -2,   7, 127,  -6,  2, -1 }
+       }, {    /* 65536 < Ratio <= 74898 (~8:7) */
+               {  3, -8,  14, 111,  13,  -8,  3,  0 },
+               {  2, -6,   7, 112,  21, -10,  3, -1 },
+               {  2, -4,   1, 110,  28, -12,  4, -1 },
+               {  1, -2,  -3, 106,  36, -13,  4, -1 },
+               {  1, -1,  -7, 103,  44, -15,  4, -1 },
+               {  1,  1, -11,  97,  53, -16,  4, -1 },
+               {  0,  2, -13,  91,  61, -16,  4, -1 },
+               {  0,  3, -15,  85,  69, -17,  4, -1 },
+               {  0,  3, -16,  77,  77, -16,  3,  0 },
+               { -1,  4, -17,  69,  85, -15,  3,  0 },
+               { -1,  4, -16,  61,  91, -13,  2,  0 },
+               { -1,  4, -16,  53,  97, -11,  1,  1 },
+               { -1,  4, -15,  44, 103,  -7, -1,  1 },
+               { -1,  4, -13,  36, 106,  -3, -2,  1 },
+               { -1,  4, -12,  28, 110,   1, -4,  2 },
+               { -1,  3, -10,  21, 112,   7, -6,  2 }
+       }, {    /* 74898 < Ratio <= 87381 (~8:6) */
+               { 2, -11,  25,  96, 25, -11,   2,  0 },
+               { 2, -10,  19,  96, 31, -12,   2,  0 },
+               { 2,  -9,  14,  94, 37, -12,   2,  0 },
+               { 2,  -8,  10,  92, 43, -12,   1,  0 },
+               { 2,  -7,   5,  90, 49, -12,   1,  0 },
+               { 2,  -5,   1,  86, 55, -12,   0,  1 },
+               { 2,  -4,  -2,  82, 61, -11,  -1,  1 },
+               { 1,  -3,  -5,  77, 67,  -9,  -1,  1 },
+               { 1,  -2,  -7,  72, 72,  -7,  -2,  1 },
+               { 1,  -1,  -9,  67, 77,  -5,  -3,  1 },
+               { 1,  -1, -11,  61, 82,  -2,  -4,  2 },
+               { 1,   0, -12,  55, 86,   1,  -5,  2 },
+               { 0,   1, -12,  49, 90,   5,  -7,  2 },
+               { 0,   1, -12,  43, 92,  10,  -8,  2 },
+               { 0,   2, -12,  37, 94,  14,  -9,  2 },
+               { 0,   2, -12,  31, 96,  19, -10,  2 }
+       }, {    /* 87381 < Ratio <= 104857 (~8:5) */
+               { -1,  -8, 33,  80, 33,  -8,  -1,  0 },
+               { -1,  -8, 28,  80, 37,  -7,  -2,  1 },
+               {  0,  -8, 24,  79, 41,  -7,  -2,  1 },
+               {  0,  -8, 20,  78, 46,  -6,  -3,  1 },
+               {  0,  -8, 16,  76, 50,  -4,  -3,  1 },
+               {  0,  -7, 13,  74, 54,  -3,  -4,  1 },
+               {  1,  -7, 10,  71, 58,  -1,  -5,  1 },
+               {  1,  -6,  6,  68, 62,   1,  -5,  1 },
+               {  1,  -6,  4,  65, 65,   4,  -6,  1 },
+               {  1,  -5,  1,  62, 68,   6,  -6,  1 },
+               {  1,  -5, -1,  58, 71,  10,  -7,  1 },
+               {  1,  -4, -3,  54, 74,  13,  -7,  0 },
+               {  1,  -3, -4,  50, 76,  16,  -8,  0 },
+               {  1,  -3, -6,  46, 78,  20,  -8,  0 },
+               {  1,  -2, -7,  41, 79,  24,  -8,  0 },
+               {  1,  -2, -7,  37, 80,  28,  -8, -1 }
+       }, {    /* 104857 < Ratio <= 131072 (~8:4) */
+               { -3,   0, 35,  64, 35,   0,  -3,  0 },
+               { -3,  -1, 32,  64, 38,   1,  -3,  0 },
+               { -2,  -2, 29,  63, 41,   2,  -3,  0 },
+               { -2,  -3, 27,  63, 43,   4,  -4,  0 },
+               { -2,  -3, 24,  61, 46,   6,  -4,  0 },
+               { -2,  -3, 21,  60, 49,   7,  -4,  0 },
+               { -1,  -4, 19,  59, 51,   9,  -4, -1 },
+               { -1,  -4, 16,  57, 53,  12,  -4, -1 },
+               { -1,  -4, 14,  55, 55,  14,  -4, -1 },
+               { -1,  -4, 12,  53, 57,  16,  -4, -1 },
+               { -1,  -4,  9,  51, 59,  19,  -4, -1 },
+               {  0,  -4,  7,  49, 60,  21,  -3, -2 },
+               {  0,  -4,  6,  46, 61,  24,  -3, -2 },
+               {  0,  -4,  4,  43, 63,  27,  -3, -2 },
+               {  0,  -3,  2,  41, 63,  29,  -2, -2 },
+               {  0,  -3,  1,  38, 64,  32,  -1, -3 }
+       }, {    /* 131072 < Ratio <= 174762 (~8:3) */
+               { -1,   8, 33,  48, 33,   8,  -1,  0 },
+               { -1,   7, 31,  49, 35,   9,  -1, -1 },
+               { -1,   6, 30,  49, 36,  10,  -1, -1 },
+               { -1,   5, 28,  48, 38,  12,  -1, -1 },
+               { -1,   4, 26,  48, 39,  13,   0, -1 },
+               { -1,   3, 24,  47, 41,  15,   0, -1 },
+               { -1,   2, 23,  47, 42,  16,   0, -1 },
+               { -1,   2, 21,  45, 43,  18,   1, -1 },
+               { -1,   1, 19,  45, 45,  19,   1, -1 },
+               { -1,   1, 18,  43, 45,  21,   2, -1 },
+               { -1,   0, 16,  42, 47,  23,   2, -1 },
+               { -1,   0, 15,  41, 47,  24,   3, -1 },
+               { -1,   0, 13,  39, 48,  26,   4, -1 },
+               { -1,  -1, 12,  38, 48,  28,   5, -1 },
+               { -1,  -1, 10,  36, 49,  30,   6, -1 },
+               { -1,  -1,  9,  35, 49,  31,   7, -1 }
+       }, {    /* 174762 < Ratio <= 262144 (~8:2) */
+               {  2,  13, 30,  38, 30,  13,   2,  0 },
+               {  2,  12, 29,  38, 30,  14,   3,  0 },
+               {  2,  11, 28,  38, 31,  15,   3,  0 },
+               {  2,  10, 26,  38, 32,  16,   4,  0 },
+               {  1,  10, 26,  37, 33,  17,   4,  0 },
+               {  1,   9, 24,  37, 34,  18,   5,  0 },
+               {  1,   8, 24,  37, 34,  19,   5,  0 },
+               {  1,   7, 22,  36, 35,  20,   6,  1 },
+               {  1,   6, 21,  36, 36,  21,   6,  1 },
+               {  1,   6, 20,  35, 36,  22,   7,  1 },
+               {  0,   5, 19,  34, 37,  24,   8,  1 },
+               {  0,   5, 18,  34, 37,  24,   9,  1 },
+               {  0,   4, 17,  33, 37,  26,  10,  1 },
+               {  0,   4, 16,  32, 38,  26,  10,  2 },
+               {  0,   3, 15,  31, 38,  28,  11,  2 },
+               {  0,   3, 14,  30, 38,  29,  12,  2 }
+       }
+};
+
+/* 4-tap Filter Coefficient */
+static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
+       {       /* Ratio <= 65536 (~8:8) */
+               {  0, 128,   0,  0 },
+               { -4, 127,   5,  0 },
+               { -6, 124,  11, -1 },
+               { -8, 118,  19, -1 },
+               { -8, 111,  27, -2 },
+               { -8, 102,  37, -3 },
+               { -8,  92,  48, -4 },
+               { -7,  81,  59, -5 },
+               { -6,  70,  70, -6 },
+               { -5,  59,  81, -7 },
+               { -4,  48,  92, -8 },
+               { -3,  37, 102, -8 },
+               { -2,  27, 111, -8 },
+               { -1,  19, 118, -8 },
+               { -1,  11, 124, -6 },
+               {  0,   5, 127, -4 }
+       }, {    /* 65536 < Ratio <= 74898 (~8:7) */
+               {  8, 112,   8,  0 },
+               {  4, 111,  14, -1 },
+               {  1, 109,  20, -2 },
+               { -2, 105,  27, -2 },
+               { -3, 100,  34, -3 },
+               { -5,  93,  43, -3 },
+               { -5,  86,  51, -4 },
+               { -5,  77,  60, -4 },
+               { -5,  69,  69, -5 },
+               { -4,  60,  77, -5 },
+               { -4,  51,  86, -5 },
+               { -3,  43,  93, -5 },
+               { -3,  34, 100, -3 },
+               { -2,  27, 105, -2 },
+               { -2,  20, 109,  1 },
+               { -1,  14, 111,  4 }
+       }, {    /* 74898 < Ratio <= 87381 (~8:6) */
+               { 16,  96,  16,  0 },
+               { 12,  97,  21, -2 },
+               {  8,  96,  26, -2 },
+               {  5,  93,  32, -2 },
+               {  2,  89,  39, -2 },
+               {  0,  84,  46, -2 },
+               { -1,  79,  53, -3 },
+               { -2,  73,  59, -2 },
+               { -2,  66,  66, -2 },
+               { -2,  59,  73, -2 },
+               { -3,  53,  79, -1 },
+               { -2,  46,  84,  0 },
+               { -2,  39,  89,  2 },
+               { -2,  32,  93,  5 },
+               { -2,  26,  96,  8 },
+               { -2,  21,  97, 12 }
+       }, {    /* 87381 < Ratio <= 104857 (~8:5) */
+               { 22,  84,  22,  0 },
+               { 18,  85,  26, -1 },
+               { 14,  84,  31, -1 },
+               { 11,  82,  36, -1 },
+               {  8,  79,  42, -1 },
+               {  6,  76,  47, -1 },
+               {  4,  72,  52,  0 },
+               {  2,  68,  58,  0 },
+               {  1,  63,  63,  1 },
+               {  0,  58,  68,  2 },
+               {  0,  52,  72,  4 },
+               { -1,  47,  76,  6 },
+               { -1,  42,  79,  8 },
+               { -1,  36,  82, 11 },
+               { -1,  31,  84, 14 },
+               { -1,  26,  85, 18 }
+       }, {    /* 104857 < Ratio <= 131072 (~8:4) */
+               { 26,  76,  26,  0 },
+               { 22,  76,  30,  0 },
+               { 19,  75,  34,  0 },
+               { 16,  73,  38,  1 },
+               { 13,  71,  43,  1 },
+               { 10,  69,  47,  2 },
+               {  8,  66,  51,  3 },
+               {  6,  63,  55,  4 },
+               {  5,  59,  59,  5 },
+               {  4,  55,  63,  6 },
+               {  3,  51,  66,  8 },
+               {  2,  47,  69, 10 },
+               {  1,  43,  71, 13 },
+               {  1,  38,  73, 16 },
+               {  0,  34,  75, 19 },
+               {  0,  30,  76, 22 }
+       }, {    /* 131072 < Ratio <= 174762 (~8:3) */
+               { 29,  70,  29,  0 },
+               { 26,  68,  32,  2 },
+               { 23,  67,  36,  2 },
+               { 20,  66,  39,  3 },
+               { 17,  65,  43,  3 },
+               { 15,  63,  46,  4 },
+               { 12,  61,  50,  5 },
+               { 10,  58,  53,  7 },
+               {  8,  56,  56,  8 },
+               {  7,  53,  58, 10 },
+               {  5,  50,  61, 12 },
+               {  4,  46,  63, 15 },
+               {  3,  43,  65, 17 },
+               {  3,  39,  66, 20 },
+               {  2,  36,  67, 23 },
+               {  2,  32,  68, 26 }
+       }, {    /* 174762 < Ratio <= 262144 (~8:2) */
+               { 32,  64,  32,  0 },
+               { 28,  63,  34,  3 },
+               { 25,  62,  37,  4 },
+               { 22,  62,  40,  4 },
+               { 19,  61,  43,  5 },
+               { 17,  59,  46,  6 },
+               { 15,  58,  48,  7 },
+               { 13,  55,  51,  9 },
+               { 11,  53,  53, 11 },
+               {  9,  51,  55, 13 },
+               {  7,  48,  58, 15 },
+               {  6,  46,  59, 17 },
+               {  5,  43,  61, 19 },
+               {  4,  40,  62, 22 },
+               {  4,  37,  62, 25 },
+               {  3,  34,  63, 28 }
+       }
+};
+
+static int gsc_sw_reset(struct gsc_context *ctx)
+{
+       u32 cfg;
+       int count = GSC_RESET_TIMEOUT;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* s/w reset */
+       cfg = (GSC_SW_RESET_SRESET);
+       gsc_write(cfg, GSC_SW_RESET);
+
+       /* wait s/w reset complete */
+       while (count--) {
+               cfg = gsc_read(GSC_SW_RESET);
+               if (!cfg)
+                       break;
+               usleep_range(1000, 2000);
+       }
+
+       if (cfg) {
+               DRM_ERROR("failed to reset gsc h/w.\n");
+               return -EBUSY;
+       }
+
+       /* reset sequence */
+       cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+       cfg |= (GSC_IN_BASE_ADDR_MASK |
+               GSC_IN_BASE_ADDR_PINGPONG(0));
+       gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+       gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+       gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+       cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+       cfg |= (GSC_OUT_BASE_ADDR_MASK |
+               GSC_OUT_BASE_ADDR_PINGPONG(0));
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+       return 0;
+}
+
+static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
+{
+       u32 gscblk_cfg;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
+
+       if (enable)
+               gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
+                               GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
+                               GSC_BLK_SW_RESET_WB_DEST(ctx->id);
+       else
+               gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
+
+       writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
+}
+
+static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
+               bool overflow, bool done)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+                       enable, overflow, done);
+
+       cfg = gsc_read(GSC_IRQ);
+       cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
+
+       if (enable)
+               cfg |= GSC_IRQ_ENABLE;
+       else
+               cfg &= ~GSC_IRQ_ENABLE;
+
+       if (overflow)
+               cfg &= ~GSC_IRQ_OR_MASK;
+       else
+               cfg |= GSC_IRQ_OR_MASK;
+
+       if (done)
+               cfg &= ~GSC_IRQ_FRMDONE_MASK;
+       else
+               cfg |= GSC_IRQ_FRMDONE_MASK;
+
+       gsc_write(cfg, GSC_IRQ);
+}
+
+
+static int gsc_src_set_fmt(struct device *dev, u32 fmt)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       cfg = gsc_read(GSC_IN_CON);
+       cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
+                GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
+                GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
+                GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+               cfg |= GSC_IN_RGB565;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               cfg |= GSC_IN_XRGB8888;
+               break;
+       case DRM_FORMAT_BGRX8888:
+               cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
+               break;
+       case DRM_FORMAT_YUYV:
+               cfg |= (GSC_IN_YUV422_1P |
+                       GSC_IN_YUV422_1P_ORDER_LSB_Y |
+                       GSC_IN_CHROMA_ORDER_CBCR);
+               break;
+       case DRM_FORMAT_YVYU:
+               cfg |= (GSC_IN_YUV422_1P |
+                       GSC_IN_YUV422_1P_ORDER_LSB_Y |
+                       GSC_IN_CHROMA_ORDER_CRCB);
+               break;
+       case DRM_FORMAT_UYVY:
+               cfg |= (GSC_IN_YUV422_1P |
+                       GSC_IN_YUV422_1P_OEDER_LSB_C |
+                       GSC_IN_CHROMA_ORDER_CBCR);
+               break;
+       case DRM_FORMAT_VYUY:
+               cfg |= (GSC_IN_YUV422_1P |
+                       GSC_IN_YUV422_1P_OEDER_LSB_C |
+                       GSC_IN_CHROMA_ORDER_CRCB);
+               break;
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV61:
+               cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
+                       GSC_IN_YUV420_2P);
+               break;
+       case DRM_FORMAT_YUV422:
+               cfg |= GSC_IN_YUV422_3P;
+               break;
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               cfg |= GSC_IN_YUV420_3P;
+               break;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV16:
+               cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
+                       GSC_IN_YUV420_2P);
+               break;
+       case DRM_FORMAT_NV12MT:
+               cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       gsc_write(cfg, GSC_IN_CON);
+
+       return 0;
+}
+
+static int gsc_src_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+               degree, flip);
+
+       cfg = gsc_read(GSC_IN_CON);
+       cfg &= ~GSC_IN_ROT_MASK;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_0:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= GSC_IN_ROT_XFLIP;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= GSC_IN_ROT_YFLIP;
+               break;
+       case EXYNOS_DRM_DEGREE_90:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= GSC_IN_ROT_90_XFLIP;
+               else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= GSC_IN_ROT_90_YFLIP;
+               else
+                       cfg |= GSC_IN_ROT_90;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               cfg |= GSC_IN_ROT_180;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               cfg |= GSC_IN_ROT_270;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+               return -EINVAL;
+       }
+
+       gsc_write(cfg, GSC_IN_CON);
+
+       ctx->rotation = cfg &
+               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       *swap = ctx->rotation;
+
+       return 0;
+}
+
+static int gsc_src_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct drm_exynos_pos img_pos = *pos;
+       struct gsc_scaler *sc = &ctx->sc;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+               __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+       if (swap) {
+               img_pos.w = pos->h;
+               img_pos.h = pos->w;
+       }
+
+       /* pixel offset */
+       cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
+               GSC_SRCIMG_OFFSET_Y(img_pos.y));
+       gsc_write(cfg, GSC_SRCIMG_OFFSET);
+
+       /* cropped size */
+       cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
+               GSC_CROPPED_HEIGHT(img_pos.h));
+       gsc_write(cfg, GSC_CROPPED_SIZE);
+
+       DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+               __func__, sz->hsize, sz->vsize);
+
+       /* original size */
+       cfg = gsc_read(GSC_SRCIMG_SIZE);
+       cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
+               GSC_SRCIMG_WIDTH_MASK);
+
+       cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
+               GSC_SRCIMG_HEIGHT(sz->vsize));
+
+       gsc_write(cfg, GSC_SRCIMG_SIZE);
+
+       cfg = gsc_read(GSC_IN_CON);
+       cfg &= ~GSC_IN_RGB_TYPE_MASK;
+
+       DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+               __func__, pos->w, sc->range);
+
+       if (pos->w >= GSC_WIDTH_ITU_709)
+               if (sc->range)
+                       cfg |= GSC_IN_RGB_HD_WIDE;
+               else
+                       cfg |= GSC_IN_RGB_HD_NARROW;
+       else
+               if (sc->range)
+                       cfg |= GSC_IN_RGB_SD_WIDE;
+               else
+                       cfg |= GSC_IN_RGB_SD_NARROW;
+
+       gsc_write(cfg, GSC_IN_CON);
+
+       return 0;
+}
+
+static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       bool masked;
+       u32 cfg;
+       u32 mask = 0x00000001 << buf_id;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+               buf_id, buf_type);
+
+       /* mask register set */
+       cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               masked = false;
+               break;
+       case IPP_BUF_DEQUEUE:
+               masked = true;
+               break;
+       default:
+               dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+               return -EINVAL;
+       }
+
+       /* sequence id */
+       cfg &= ~mask;
+       cfg |= masked << buf_id;
+       gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+       gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+       gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+       return 0;
+}
+
+static int gsc_src_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EFAULT;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EFAULT;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+               property->prop_id, buf_id, buf_type);
+
+       if (buf_id > GSC_MAX_SRC) {
+               dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+               return -EINVAL;
+       }
+
+       /* address register set */
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+                       GSC_IN_BASE_ADDR_Y(buf_id));
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                       GSC_IN_BASE_ADDR_CB(buf_id));
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                       GSC_IN_BASE_ADDR_CR(buf_id));
+               break;
+       case IPP_BUF_DEQUEUE:
+               gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
+               gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
+               gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_src_ops = {
+       .set_fmt = gsc_src_set_fmt,
+       .set_transf = gsc_src_set_transf,
+       .set_size = gsc_src_set_size,
+       .set_addr = gsc_src_set_addr,
+};
+
+static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       cfg = gsc_read(GSC_OUT_CON);
+       cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
+                GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
+                GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
+                GSC_OUT_GLOBAL_ALPHA_MASK);
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+               cfg |= GSC_OUT_RGB565;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               cfg |= GSC_OUT_XRGB8888;
+               break;
+       case DRM_FORMAT_BGRX8888:
+               cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
+               break;
+       case DRM_FORMAT_YUYV:
+               cfg |= (GSC_OUT_YUV422_1P |
+                       GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+                       GSC_OUT_CHROMA_ORDER_CBCR);
+               break;
+       case DRM_FORMAT_YVYU:
+               cfg |= (GSC_OUT_YUV422_1P |
+                       GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+                       GSC_OUT_CHROMA_ORDER_CRCB);
+               break;
+       case DRM_FORMAT_UYVY:
+               cfg |= (GSC_OUT_YUV422_1P |
+                       GSC_OUT_YUV422_1P_OEDER_LSB_C |
+                       GSC_OUT_CHROMA_ORDER_CBCR);
+               break;
+       case DRM_FORMAT_VYUY:
+               cfg |= (GSC_OUT_YUV422_1P |
+                       GSC_OUT_YUV422_1P_OEDER_LSB_C |
+                       GSC_OUT_CHROMA_ORDER_CRCB);
+               break;
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV61:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
+               break;
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               cfg |= GSC_OUT_YUV420_3P;
+               break;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV16:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
+                       GSC_OUT_YUV420_2P);
+               break;
+       case DRM_FORMAT_NV12MT:
+               cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       gsc_write(cfg, GSC_OUT_CON);
+
+       return 0;
+}
+
+static int gsc_dst_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+               degree, flip);
+
+       cfg = gsc_read(GSC_IN_CON);
+       cfg &= ~GSC_IN_ROT_MASK;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_0:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= GSC_IN_ROT_XFLIP;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= GSC_IN_ROT_YFLIP;
+               break;
+       case EXYNOS_DRM_DEGREE_90:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= GSC_IN_ROT_90_XFLIP;
+               else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= GSC_IN_ROT_90_YFLIP;
+               else
+                       cfg |= GSC_IN_ROT_90;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               cfg |= GSC_IN_ROT_180;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               cfg |= GSC_IN_ROT_270;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+               return -EINVAL;
+       }
+
+       gsc_write(cfg, GSC_IN_CON);
+
+       ctx->rotation = cfg &
+               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       *swap = ctx->rotation;
+
+       return 0;
+}
+
+static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
+{
+       DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+       if (src >= dst * 8) {
+               DRM_ERROR("failed to make ratio and shift.\n");
+               return -EINVAL;
+       } else if (src >= dst * 4)
+               *ratio = 4;
+       else if (src >= dst * 2)
+               *ratio = 2;
+       else
+               *ratio = 1;
+
+       return 0;
+}
+
+static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
+{
+       if (hratio == 4 && vratio == 4)
+               *shfactor = 4;
+       else if ((hratio == 4 && vratio == 2) ||
+                (hratio == 2 && vratio == 4))
+               *shfactor = 3;
+       else if ((hratio == 4 && vratio == 1) ||
+                (hratio == 1 && vratio == 4) ||
+                (hratio == 2 && vratio == 2))
+               *shfactor = 2;
+       else if (hratio == 1 && vratio == 1)
+               *shfactor = 0;
+       else
+               *shfactor = 1;
+}
+
+static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
+               struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+       u32 src_w, src_h, dst_w, dst_h;
+       int ret = 0;
+
+       src_w = src->w;
+       src_h = src->h;
+
+       if (ctx->rotation) {
+               dst_w = dst->h;
+               dst_h = dst->w;
+       } else {
+               dst_w = dst->w;
+               dst_h = dst->h;
+       }
+
+       ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
+       if (ret) {
+               dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+               return ret;
+       }
+
+       ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
+       if (ret) {
+               dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+               return ret;
+       }
+
+       DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
+               __func__, sc->pre_hratio, sc->pre_vratio);
+
+       sc->main_hratio = (src_w << 16) / dst_w;
+       sc->main_vratio = (src_h << 16) / dst_h;
+
+       DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+               __func__, sc->main_hratio, sc->main_vratio);
+
+       gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
+               &sc->pre_shfactor);
+
+       DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
+               sc->pre_shfactor);
+
+       cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
+               GSC_PRESC_H_RATIO(sc->pre_hratio) |
+               GSC_PRESC_V_RATIO(sc->pre_vratio));
+       gsc_write(cfg, GSC_PRE_SCALE_RATIO);
+
+       return ret;
+}
+
+static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
+{
+       int i, j, k, sc_ratio;
+
+       if (main_hratio <= GSC_SC_UP_MAX_RATIO)
+               sc_ratio = 0;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
+               sc_ratio = 1;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
+               sc_ratio = 2;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
+               sc_ratio = 3;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
+               sc_ratio = 4;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
+               sc_ratio = 5;
+       else
+               sc_ratio = 6;
+
+       for (i = 0; i < GSC_COEF_PHASE; i++)
+               for (j = 0; j < GSC_COEF_H_8T; j++)
+                       for (k = 0; k < GSC_COEF_DEPTH; k++)
+                               gsc_write(h_coef_8t[sc_ratio][i][j],
+                                       GSC_HCOEF(i, j, k));
+}
+
+static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
+{
+       int i, j, k, sc_ratio;
+
+       if (main_vratio <= GSC_SC_UP_MAX_RATIO)
+               sc_ratio = 0;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
+               sc_ratio = 1;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
+               sc_ratio = 2;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
+               sc_ratio = 3;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
+               sc_ratio = 4;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
+               sc_ratio = 5;
+       else
+               sc_ratio = 6;
+
+       for (i = 0; i < GSC_COEF_PHASE; i++)
+               for (j = 0; j < GSC_COEF_V_4T; j++)
+                       for (k = 0; k < GSC_COEF_DEPTH; k++)
+                               gsc_write(v_coef_4t[sc_ratio][i][j],
+                                       GSC_VCOEF(i, j, k));
+}
+
+static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+               __func__, sc->main_hratio, sc->main_vratio);
+
+       gsc_set_h_coef(ctx, sc->main_hratio);
+       cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
+       gsc_write(cfg, GSC_MAIN_H_RATIO);
+
+       gsc_set_v_coef(ctx, sc->main_vratio);
+       cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
+       gsc_write(cfg, GSC_MAIN_V_RATIO);
+}
+
+static int gsc_dst_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct drm_exynos_pos img_pos = *pos;
+       struct gsc_scaler *sc = &ctx->sc;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+               __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+       if (swap) {
+               img_pos.w = pos->h;
+               img_pos.h = pos->w;
+       }
+
+       /* pixel offset */
+       cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
+               GSC_DSTIMG_OFFSET_Y(pos->y));
+       gsc_write(cfg, GSC_DSTIMG_OFFSET);
+
+       /* scaled size */
+       cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
+       gsc_write(cfg, GSC_SCALED_SIZE);
+
+       DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+               __func__, sz->hsize, sz->vsize);
+
+       /* original size */
+       cfg = gsc_read(GSC_DSTIMG_SIZE);
+       cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
+               GSC_DSTIMG_WIDTH_MASK);
+       cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
+               GSC_DSTIMG_HEIGHT(sz->vsize));
+       gsc_write(cfg, GSC_DSTIMG_SIZE);
+
+       cfg = gsc_read(GSC_OUT_CON);
+       cfg &= ~GSC_OUT_RGB_TYPE_MASK;
+
+       DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+               __func__, pos->w, sc->range);
+
+       if (pos->w >= GSC_WIDTH_ITU_709)
+               if (sc->range)
+                       cfg |= GSC_OUT_RGB_HD_WIDE;
+               else
+                       cfg |= GSC_OUT_RGB_HD_NARROW;
+       else
+               if (sc->range)
+                       cfg |= GSC_OUT_RGB_SD_WIDE;
+               else
+                       cfg |= GSC_OUT_RGB_SD_NARROW;
+
+       gsc_write(cfg, GSC_OUT_CON);
+
+       return 0;
+}
+
+static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
+{
+       u32 cfg, i, buf_num = GSC_REG_SZ;
+       u32 mask = 0x00000001;
+
+       cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+       for (i = 0; i < GSC_REG_SZ; i++)
+               if (cfg & (mask << i))
+                       buf_num--;
+
+       DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+       return buf_num;
+}
+
+static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       bool masked;
+       u32 cfg;
+       u32 mask = 0x00000001 << buf_id;
+       int ret = 0;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+               buf_id, buf_type);
+
+       mutex_lock(&ctx->lock);
+
+       /* mask register set */
+       cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               masked = false;
+               break;
+       case IPP_BUF_DEQUEUE:
+               masked = true;
+               break;
+       default:
+               dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+               ret =  -EINVAL;
+               goto err_unlock;
+       }
+
+       /* sequence id */
+       cfg &= ~mask;
+       cfg |= masked << buf_id;
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+       /* interrupt enable */
+       if (buf_type == IPP_BUF_ENQUEUE &&
+           gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
+               gsc_handle_irq(ctx, true, false, true);
+
+       /* interrupt disable */
+       if (buf_type == IPP_BUF_DEQUEUE &&
+           gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
+               gsc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+       mutex_unlock(&ctx->lock);
+       return ret;
+}
+
+static int gsc_dst_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EFAULT;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EFAULT;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+               property->prop_id, buf_id, buf_type);
+
+       if (buf_id > GSC_MAX_DST) {
+               dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+               return -EINVAL;
+       }
+
+       /* address register set */
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+                       GSC_OUT_BASE_ADDR_Y(buf_id));
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                       GSC_OUT_BASE_ADDR_CB(buf_id));
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                       GSC_OUT_BASE_ADDR_CR(buf_id));
+               break;
+       case IPP_BUF_DEQUEUE:
+               gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
+               gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
+               gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_dst_ops = {
+       .set_fmt = gsc_dst_set_fmt,
+       .set_transf = gsc_dst_set_transf,
+       .set_size = gsc_dst_set_size,
+       .set_addr = gsc_dst_set_addr,
+};
+
+static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
+{
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       if (enable) {
+               clk_enable(ctx->gsc_clk);
+               ctx->suspended = false;
+       } else {
+               clk_disable(ctx->gsc_clk);
+               ctx->suspended = true;
+       }
+
+       return 0;
+}
+
+static int gsc_get_src_buf_index(struct gsc_context *ctx)
+{
+       u32 cfg, curr_index, i;
+       u32 buf_id = GSC_MAX_SRC;
+       int ret;
+
+       DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+       cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+       curr_index = GSC_IN_CURR_GET_INDEX(cfg);
+
+       for (i = curr_index; i < GSC_MAX_SRC; i++) {
+               if (!((cfg >> i) & 0x1)) {
+                       buf_id = i;
+                       break;
+               }
+       }
+
+       if (buf_id == GSC_MAX_SRC) {
+               DRM_ERROR("failed to get in buffer index.\n");
+               return -EINVAL;
+       }
+
+       ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+       if (ret < 0) {
+               DRM_ERROR("failed to dequeue.\n");
+               return ret;
+       }
+
+       DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+               curr_index, buf_id);
+
+       return buf_id;
+}
+
+static int gsc_get_dst_buf_index(struct gsc_context *ctx)
+{
+       u32 cfg, curr_index, i;
+       u32 buf_id = GSC_MAX_DST;
+       int ret;
+
+       DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+       cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+       curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
+
+       for (i = curr_index; i < GSC_MAX_DST; i++) {
+               if (!((cfg >> i) & 0x1)) {
+                       buf_id = i;
+                       break;
+               }
+       }
+
+       if (buf_id == GSC_MAX_DST) {
+               DRM_ERROR("failed to get out buffer index.\n");
+               return -EINVAL;
+       }
+
+       ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+       if (ret < 0) {
+               DRM_ERROR("failed to dequeue.\n");
+               return ret;
+       }
+
+       DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+               curr_index, buf_id);
+
+       return buf_id;
+}
+
+static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
+{
+       struct gsc_context *ctx = dev_id;
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_event_work *event_work =
+               c_node->event_work;
+       u32 status;
+       int buf_id[EXYNOS_DRM_OPS_MAX];
+
+       DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+       status = gsc_read(GSC_IRQ);
+       if (status & GSC_IRQ_STATUS_OR_IRQ) {
+               dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+                       ctx->id, status);
+               return IRQ_NONE;
+       }
+
+       if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
+               dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
+                       ctx->id, status);
+
+               buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
+               if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
+                       return IRQ_HANDLED;
+
+               buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
+               if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
+                       return IRQ_HANDLED;
+
+               DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
+                       buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
+
+               event_work->ippdrv = ippdrv;
+               event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
+                       buf_id[EXYNOS_DRM_OPS_SRC];
+               event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+                       buf_id[EXYNOS_DRM_OPS_DST];
+               queue_work(ippdrv->event_workq,
+                       (struct work_struct *)event_work);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+       struct drm_exynos_ipp_prop_list *prop_list;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+       if (!prop_list) {
+               DRM_ERROR("failed to alloc property list.\n");
+               return -ENOMEM;
+       }
+
+       prop_list->version = 1;
+       prop_list->writeback = 1;
+       prop_list->refresh_min = GSC_REFRESH_MIN;
+       prop_list->refresh_max = GSC_REFRESH_MAX;
+       prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+                               (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+       prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+                               (1 << EXYNOS_DRM_DEGREE_90) |
+                               (1 << EXYNOS_DRM_DEGREE_180) |
+                               (1 << EXYNOS_DRM_DEGREE_270);
+       prop_list->csc = 1;
+       prop_list->crop = 1;
+       prop_list->crop_max.hsize = GSC_CROP_MAX;
+       prop_list->crop_max.vsize = GSC_CROP_MAX;
+       prop_list->crop_min.hsize = GSC_CROP_MIN;
+       prop_list->crop_min.vsize = GSC_CROP_MIN;
+       prop_list->scale = 1;
+       prop_list->scale_max.hsize = GSC_SCALE_MAX;
+       prop_list->scale_max.vsize = GSC_SCALE_MAX;
+       prop_list->scale_min.hsize = GSC_SCALE_MIN;
+       prop_list->scale_min.vsize = GSC_SCALE_MIN;
+
+       ippdrv->prop_list = prop_list;
+
+       return 0;
+}
+
+static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
+{
+       switch (flip) {
+       case EXYNOS_DRM_FLIP_NONE:
+       case EXYNOS_DRM_FLIP_VERTICAL:
+       case EXYNOS_DRM_FLIP_HORIZONTAL:
+       case EXYNOS_DRM_FLIP_VERTICAL | EXYNOS_DRM_FLIP_HORIZONTAL:
+               return true;
+       default:
+               DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+               return false;
+       }
+}
+
+static int gsc_ippdrv_check_property(struct device *dev,
+               struct drm_exynos_ipp_property *property)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+       struct drm_exynos_ipp_config *config;
+       struct drm_exynos_pos *pos;
+       struct drm_exynos_sz *sz;
+       bool swap;
+       int i;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       for_each_ipp_ops(i) {
+               if ((i == EXYNOS_DRM_OPS_SRC) &&
+                       (property->cmd == IPP_CMD_WB))
+                       continue;
+
+               config = &property->config[i];
+               pos = &config->pos;
+               sz = &config->sz;
+
+               /* check for flip */
+               if (!gsc_check_drm_flip(config->flip)) {
+                       DRM_ERROR("invalid flip.\n");
+                       goto err_property;
+               }
+
+               /* check for degree */
+               switch (config->degree) {
+               case EXYNOS_DRM_DEGREE_90:
+               case EXYNOS_DRM_DEGREE_270:
+                       swap = true;
+                       break;
+               case EXYNOS_DRM_DEGREE_0:
+               case EXYNOS_DRM_DEGREE_180:
+                       swap = false;
+                       break;
+               default:
+                       DRM_ERROR("invalid degree.\n");
+                       goto err_property;
+               }
+
+               /* check for buffer bound */
+               if ((pos->x + pos->w > sz->hsize) ||
+                       (pos->y + pos->h > sz->vsize)) {
+                       DRM_ERROR("out of buf bound.\n");
+                       goto err_property;
+               }
+
+               /* check for crop */
+               if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+                       if (swap) {
+                               if ((pos->h < pp->crop_min.hsize) ||
+                                       (sz->vsize > pp->crop_max.hsize) ||
+                                       (pos->w < pp->crop_min.vsize) ||
+                                       (sz->hsize > pp->crop_max.vsize)) {
+                                       DRM_ERROR("out of crop size.\n");
+                                       goto err_property;
+                               }
+                       } else {
+                               if ((pos->w < pp->crop_min.hsize) ||
+                                       (sz->hsize > pp->crop_max.hsize) ||
+                                       (pos->h < pp->crop_min.vsize) ||
+                                       (sz->vsize > pp->crop_max.vsize)) {
+                                       DRM_ERROR("out of crop size.\n");
+                                       goto err_property;
+                               }
+                       }
+               }
+
+               /* check for scale */
+               if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+                       if (swap) {
+                               if ((pos->h < pp->scale_min.hsize) ||
+                                       (sz->vsize > pp->scale_max.hsize) ||
+                                       (pos->w < pp->scale_min.vsize) ||
+                                       (sz->hsize > pp->scale_max.vsize)) {
+                                       DRM_ERROR("out of scale size.\n");
+                                       goto err_property;
+                               }
+                       } else {
+                               if ((pos->w < pp->scale_min.hsize) ||
+                                       (sz->hsize > pp->scale_max.hsize) ||
+                                       (pos->h < pp->scale_min.vsize) ||
+                                       (sz->vsize > pp->scale_max.vsize)) {
+                                       DRM_ERROR("out of scale size.\n");
+                                       goto err_property;
+                               }
+                       }
+               }
+       }
+
+       return 0;
+
+err_property:
+       for_each_ipp_ops(i) {
+               if ((i == EXYNOS_DRM_OPS_SRC) &&
+                       (property->cmd == IPP_CMD_WB))
+                       continue;
+
+               config = &property->config[i];
+               pos = &config->pos;
+               sz = &config->sz;
+
+               DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+                       i ? "dst" : "src", config->flip, config->degree,
+                       pos->x, pos->y, pos->w, pos->h,
+                       sz->hsize, sz->vsize);
+       }
+
+       return -EINVAL;
+}
+
+
+static int gsc_ippdrv_reset(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct gsc_scaler *sc = &ctx->sc;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* reset h/w block */
+       ret = gsc_sw_reset(ctx);
+       if (ret < 0) {
+               dev_err(dev, "failed to reset hardware.\n");
+               return ret;
+       }
+
+       /* scaler setting */
+       memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+       sc->range = true;
+
+       return 0;
+}
+
+static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+       struct drm_exynos_ipp_config *config;
+       struct drm_exynos_pos   img_pos[EXYNOS_DRM_OPS_MAX];
+       struct drm_exynos_ipp_set_wb set_wb;
+       u32 cfg;
+       int ret, i;
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EINVAL;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       gsc_handle_irq(ctx, true, false, true);
+
+       for_each_ipp_ops(i) {
+               config = &property->config[i];
+               img_pos[i] = config->pos;
+       }
+
+       switch (cmd) {
+       case IPP_CMD_M2M:
+               /* enable one shot */
+               cfg = gsc_read(GSC_ENABLE);
+               cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
+                       GSC_ENABLE_CLK_GATE_MODE_MASK);
+               cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
+               gsc_write(cfg, GSC_ENABLE);
+
+               /* src dma memory */
+               cfg = gsc_read(GSC_IN_CON);
+               cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+               cfg |= GSC_IN_PATH_MEMORY;
+               gsc_write(cfg, GSC_IN_CON);
+
+               /* dst dma memory */
+               cfg = gsc_read(GSC_OUT_CON);
+               cfg |= GSC_OUT_PATH_MEMORY;
+               gsc_write(cfg, GSC_OUT_CON);
+               break;
+       case IPP_CMD_WB:
+               set_wb.enable = 1;
+               set_wb.refresh = property->refresh_rate;
+               gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+               exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+
+               /* src local path */
+               cfg = readl(GSC_IN_CON);
+               cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+               cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
+               gsc_write(cfg, GSC_IN_CON);
+
+               /* dst dma memory */
+               cfg = gsc_read(GSC_OUT_CON);
+               cfg |= GSC_OUT_PATH_MEMORY;
+               gsc_write(cfg, GSC_OUT_CON);
+               break;
+       case IPP_CMD_OUTPUT:
+               /* src dma memory */
+               cfg = gsc_read(GSC_IN_CON);
+               cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+               cfg |= GSC_IN_PATH_MEMORY;
+               gsc_write(cfg, GSC_IN_CON);
+
+               /* dst local path */
+               cfg = gsc_read(GSC_OUT_CON);
+               cfg |= GSC_OUT_PATH_MEMORY;
+               gsc_write(cfg, GSC_OUT_CON);
+               break;
+       default:
+               ret = -EINVAL;
+               dev_err(dev, "invalid operations.\n");
+               return ret;
+       }
+
+       ret = gsc_set_prescaler(ctx, &ctx->sc,
+               &img_pos[EXYNOS_DRM_OPS_SRC],
+               &img_pos[EXYNOS_DRM_OPS_DST]);
+       if (ret) {
+               dev_err(dev, "failed to set precalser.\n");
+               return ret;
+       }
+
+       gsc_set_scaler(ctx, &ctx->sc);
+
+       cfg = gsc_read(GSC_ENABLE);
+       cfg |= GSC_ENABLE_ON;
+       gsc_write(cfg, GSC_ENABLE);
+
+       return 0;
+}
+
+static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+       switch (cmd) {
+       case IPP_CMD_M2M:
+               /* bypass */
+               break;
+       case IPP_CMD_WB:
+               gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+               exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+               break;
+       case IPP_CMD_OUTPUT:
+       default:
+               dev_err(dev, "invalid operations.\n");
+               break;
+       }
+
+       gsc_handle_irq(ctx, false, false, true);
+
+       /* reset sequence */
+       gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
+       gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
+       gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
+
+       cfg = gsc_read(GSC_ENABLE);
+       cfg &= ~GSC_ENABLE_ON;
+       gsc_write(cfg, GSC_ENABLE);
+}
+
+static int __devinit gsc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct gsc_context *ctx;
+       struct resource *res;
+       struct exynos_drm_ippdrv *ippdrv;
+       int ret;
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       /* clock control */
+       ctx->gsc_clk = clk_get(dev, "gscl");
+       if (IS_ERR(ctx->gsc_clk)) {
+               dev_err(dev, "failed to get gsc clock.\n");
+               ret = PTR_ERR(ctx->gsc_clk);
+               goto err_ctx;
+       }
+
+       /* resource memory */
+       ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!ctx->regs_res) {
+               dev_err(dev, "failed to find registers.\n");
+               ret = -ENOENT;
+               goto err_clk;
+       }
+
+       ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+       if (!ctx->regs) {
+               dev_err(dev, "failed to map registers.\n");
+               ret = -ENXIO;
+               goto err_clk;
+       }
+
+       /* resource irq */
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!res) {
+               dev_err(dev, "failed to request irq resource.\n");
+               ret = -ENOENT;
+               goto err_get_regs;
+       }
+
+       ctx->irq = res->start;
+       ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
+               IRQF_ONESHOT, "drm_gsc", ctx);
+       if (ret < 0) {
+               dev_err(dev, "failed to request irq.\n");
+               goto err_get_regs;
+       }
+
+       /* context initailization */
+       ctx->id = pdev->id;
+
+       ippdrv = &ctx->ippdrv;
+       ippdrv->dev = dev;
+       ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
+       ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
+       ippdrv->check_property = gsc_ippdrv_check_property;
+       ippdrv->reset = gsc_ippdrv_reset;
+       ippdrv->start = gsc_ippdrv_start;
+       ippdrv->stop = gsc_ippdrv_stop;
+       ret = gsc_init_prop_list(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to init property list.\n");
+               goto err_get_irq;
+       }
+
+       DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+               (int)ippdrv);
+
+       mutex_init(&ctx->lock);
+       platform_set_drvdata(pdev, ctx);
+
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       ret = exynos_drm_ippdrv_register(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to register drm gsc device.\n");
+               goto err_ippdrv_register;
+       }
+
+       dev_info(&pdev->dev, "drm gsc registered successfully.\n");
+
+       return 0;
+
+err_ippdrv_register:
+       devm_kfree(dev, ippdrv->prop_list);
+       pm_runtime_disable(dev);
+err_get_irq:
+       free_irq(ctx->irq, ctx);
+err_get_regs:
+       devm_iounmap(dev, ctx->regs);
+err_clk:
+       clk_put(ctx->gsc_clk);
+err_ctx:
+       devm_kfree(dev, ctx);
+       return ret;
+}
+
+static int __devexit gsc_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+       devm_kfree(dev, ippdrv->prop_list);
+       exynos_drm_ippdrv_unregister(ippdrv);
+       mutex_destroy(&ctx->lock);
+
+       pm_runtime_set_suspended(dev);
+       pm_runtime_disable(dev);
+
+       free_irq(ctx->irq, ctx);
+       devm_iounmap(dev, ctx->regs);
+
+       clk_put(ctx->gsc_clk);
+
+       devm_kfree(dev, ctx);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gsc_suspend(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_resume(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       if (!pm_runtime_suspended(dev))
+               return gsc_clk_ctrl(ctx, true);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int gsc_runtime_suspend(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       return  gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_runtime_resume(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
+
+       return  gsc_clk_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops gsc_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+       SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
+};
+
+struct platform_driver gsc_driver = {
+       .probe          = gsc_probe,
+       .remove         = __devexit_p(gsc_remove),
+       .driver         = {
+               .name   = "exynos-drm-gsc",
+               .owner  = THIS_MODULE,
+               .pm     = &gsc_pm_ops,
+       },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644 (file)
index 0000000..b3c3bc6
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_GSC_H_
+#define _EXYNOS_DRM_GSC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ * Mixer output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_GSC_H_ */
index 2d11e70b601a4776c7b951ba82ecc66eb855a475..55793c46e3c21c3c0e86125aa51a29f77382b14e 100644 (file)
@@ -29,6 +29,9 @@
 #define get_ctx_from_subdrv(subdrv)    container_of(subdrv,\
                                        struct drm_hdmi_context, subdrv);
 
+/* platform device pointer for common drm hdmi device. */
+static struct platform_device *exynos_drm_hdmi_pdev;
+
 /* Common hdmi subdrv needs to access the hdmi and mixer though context.
 * These should be initialied by the repective drivers */
 static struct exynos_drm_hdmi_context *hdmi_ctx;
@@ -46,6 +49,25 @@ struct drm_hdmi_context {
        bool    enabled[MIXER_WIN_NR];
 };
 
+int exynos_platform_device_hdmi_register(void)
+{
+       if (exynos_drm_hdmi_pdev)
+               return -EEXIST;
+
+       exynos_drm_hdmi_pdev = platform_device_register_simple(
+                       "exynos-drm-hdmi", -1, NULL, 0);
+       if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
+               return PTR_ERR(exynos_drm_hdmi_pdev);
+
+       return 0;
+}
+
+void exynos_platform_device_hdmi_unregister(void)
+{
+       if (exynos_drm_hdmi_pdev)
+               platform_device_unregister(exynos_drm_hdmi_pdev);
+}
+
 void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
 {
        if (ctx)
@@ -157,6 +179,16 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
                return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
 }
 
+static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (mixer_ops && mixer_ops->wait_for_vblank)
+               mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
+}
+
 static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
                                struct drm_connector *connector,
                                const struct drm_display_mode *mode,
@@ -238,6 +270,7 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
        .apply = drm_hdmi_apply,
        .enable_vblank = drm_hdmi_enable_vblank,
        .disable_vblank = drm_hdmi_disable_vblank,
+       .wait_for_vblank = drm_hdmi_wait_for_vblank,
        .mode_fixup = drm_hdmi_mode_fixup,
        .mode_set = drm_hdmi_mode_set,
        .get_max_resol = drm_hdmi_get_max_resol,
@@ -291,21 +324,10 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
        ctx->enabled[win] = false;
 }
 
-static void drm_mixer_wait_for_vblank(struct device *subdrv_dev)
-{
-       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
-       DRM_DEBUG_KMS("%s\n", __FILE__);
-
-       if (mixer_ops && mixer_ops->wait_for_vblank)
-               mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
-}
-
 static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
        .mode_set = drm_mixer_mode_set,
        .commit = drm_mixer_commit,
        .disable = drm_mixer_disable,
-       .wait_for_vblank = drm_mixer_wait_for_vblank,
 };
 
 static struct exynos_drm_manager hdmi_manager = {
index 54b522353e48a1e92de37b2fd7631db0c786e87d..fcc3093ec8fe3f488379e8880e194037784c56b3 100644 (file)
@@ -65,10 +65,10 @@ struct exynos_mixer_ops {
        int (*iommu_on)(void *ctx, bool enable);
        int (*enable_vblank)(void *ctx, int pipe);
        void (*disable_vblank)(void *ctx);
+       void (*wait_for_vblank)(void *ctx);
        void (*dpms)(void *ctx, int mode);
 
        /* overlay */
-       void (*wait_for_vblank)(void *ctx);
        void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
        void (*win_commit)(void *ctx, int zpos);
        void (*win_disable)(void *ctx, int zpos);
index 09db1983eb1a56e6cba3f60043c32b63b2c1162e..2482b7f96341ac196c98e10eeed359dcdc0d70ea 100644 (file)
@@ -56,8 +56,8 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev)
        mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
                                                priv->da_space_size,
                                                priv->da_space_order);
-       if (!mapping)
-               return -ENOMEM;
+       if (IS_ERR(mapping))
+               return PTR_ERR(mapping);
 
        dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
                                        GFP_KERNEL);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644 (file)
index 0000000..49eebe9
--- /dev/null
@@ -0,0 +1,2060 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * IPP is stand for Image Post Processing and
+ * supports image scaler/rotator and input/output DMA operations.
+ * using FIMC, GSC, Rotator, so on.
+ * IPP is integration device driver of same attribute h/w
+ */
+
+/*
+ * TODO
+ * 1. expand command control id.
+ * 2. integrate        property and config.
+ * 3. removed send_event id check routine.
+ * 4. compare send_event id if needed.
+ * 5. free subdrv_remove notifier callback list if needed.
+ * 6. need to check subdrv_open about multi-open.
+ * 7. need to power_on implement power and sysmmu ctrl.
+ */
+
+#define get_ipp_context(dev)   platform_get_drvdata(to_platform_device(dev))
+#define ipp_is_m2m_cmd(c)      (c == IPP_CMD_M2M)
+
+/*
+ * A structure of event.
+ *
+ * @base: base of event.
+ * @event: ipp event.
+ */
+struct drm_exynos_ipp_send_event {
+       struct drm_pending_event        base;
+       struct drm_exynos_ipp_event     event;
+};
+
+/*
+ * A structure of memory node.
+ *
+ * @list: list head to memory queue information.
+ * @ops_id: id of operations.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @buf_info: gem objects and dma address, size.
+ * @filp: a pointer to drm_file.
+ */
+struct drm_exynos_ipp_mem_node {
+       struct list_head        list;
+       enum drm_exynos_ops_id  ops_id;
+       u32     prop_id;
+       u32     buf_id;
+       struct drm_exynos_ipp_buf_info  buf_info;
+       struct drm_file         *filp;
+};
+
+/*
+ * A structure of ipp context.
+ *
+ * @subdrv: prepare initialization using subdrv.
+ * @ipp_lock: lock for synchronization of access to ipp_idr.
+ * @prop_lock: lock for synchronization of access to prop_idr.
+ * @ipp_idr: ipp driver idr.
+ * @prop_idr: property idr.
+ * @event_workq: event work queue.
+ * @cmd_workq: command work queue.
+ */
+struct ipp_context {
+       struct exynos_drm_subdrv        subdrv;
+       struct mutex    ipp_lock;
+       struct mutex    prop_lock;
+       struct idr      ipp_idr;
+       struct idr      prop_idr;
+       struct workqueue_struct *event_workq;
+       struct workqueue_struct *cmd_workq;
+};
+
+static LIST_HEAD(exynos_drm_ippdrv_list);
+static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
+static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
+
+int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ippdrv)
+               return -EINVAL;
+
+       mutex_lock(&exynos_drm_ippdrv_lock);
+       list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
+       mutex_unlock(&exynos_drm_ippdrv_lock);
+
+       return 0;
+}
+
+int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ippdrv)
+               return -EINVAL;
+
+       mutex_lock(&exynos_drm_ippdrv_lock);
+       list_del(&ippdrv->drv_list);
+       mutex_unlock(&exynos_drm_ippdrv_lock);
+
+       return 0;
+}
+
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
+               u32 *idp)
+{
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+again:
+       /* ensure there is space available to allocate a handle */
+       if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("failed to get idr.\n");
+               return -ENOMEM;
+       }
+
+       /* do the allocation under our mutexlock */
+       mutex_lock(lock);
+       ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
+       mutex_unlock(lock);
+       if (ret == -EAGAIN)
+               goto again;
+
+       return ret;
+}
+
+static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
+{
+       void *obj;
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
+
+       mutex_lock(lock);
+
+       /* find object using handle */
+       obj = idr_find(id_idr, id);
+       if (!obj) {
+               DRM_ERROR("failed to find object.\n");
+               mutex_unlock(lock);
+               return ERR_PTR(-ENODEV);
+       }
+
+       mutex_unlock(lock);
+
+       return obj;
+}
+
+static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
+               enum drm_exynos_ipp_cmd cmd)
+{
+       /*
+        * check dedicated flag and WB, OUTPUT operation with
+        * power on state.
+        */
+       if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
+           !pm_runtime_suspended(ippdrv->dev)))
+               return true;
+
+       return false;
+}
+
+static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
+               struct drm_exynos_ipp_property *property)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+       u32 ipp_id = property->ipp_id;
+
+       DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
+
+       if (ipp_id) {
+               /* find ipp driver using idr */
+               ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+                       ipp_id);
+               if (IS_ERR_OR_NULL(ippdrv)) {
+                       DRM_ERROR("not found ipp%d driver.\n", ipp_id);
+                       return ippdrv;
+               }
+
+               /*
+                * WB, OUTPUT opertion not supported multi-operation.
+                * so, make dedicated state at set property ioctl.
+                * when ipp driver finished operations, clear dedicated flags.
+                */
+               if (ipp_check_dedicated(ippdrv, property->cmd)) {
+                       DRM_ERROR("already used choose device.\n");
+                       return ERR_PTR(-EBUSY);
+               }
+
+               /*
+                * This is necessary to find correct device in ipp drivers.
+                * ipp drivers have different abilities,
+                * so need to check property.
+                */
+               if (ippdrv->check_property &&
+                   ippdrv->check_property(ippdrv->dev, property)) {
+                       DRM_ERROR("not support property.\n");
+                       return ERR_PTR(-EINVAL);
+               }
+
+               return ippdrv;
+       } else {
+               /*
+                * This case is search all ipp driver for finding.
+                * user application don't set ipp_id in this case,
+                * so ipp subsystem search correct driver in driver list.
+                */
+               list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+                       if (ipp_check_dedicated(ippdrv, property->cmd)) {
+                               DRM_DEBUG_KMS("%s:used device.\n", __func__);
+                               continue;
+                       }
+
+                       if (ippdrv->check_property &&
+                           ippdrv->check_property(ippdrv->dev, property)) {
+                               DRM_DEBUG_KMS("%s:not support property.\n",
+                                       __func__);
+                               continue;
+                       }
+
+                       return ippdrv;
+               }
+
+               DRM_ERROR("not support ipp driver operations.\n");
+       }
+
+       return ERR_PTR(-ENODEV);
+}
+
+static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+       if (list_empty(&exynos_drm_ippdrv_list)) {
+               DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+               return ERR_PTR(-ENODEV);
+       }
+
+       /*
+        * This case is search ipp driver by prop_id handle.
+        * sometimes, ipp subsystem find driver by prop_id.
+        * e.g PAUSE state, queue buf, command contro.
+        */
+       list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+               DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
+                       count++, (int)ippdrv);
+
+               if (!list_empty(&ippdrv->cmd_list)) {
+                       list_for_each_entry(c_node, &ippdrv->cmd_list, list)
+                               if (c_node->property.prop_id == prop_id)
+                                       return ippdrv;
+               }
+       }
+
+       return ERR_PTR(-ENODEV);
+}
+
+int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct device *dev = priv->dev;
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct drm_exynos_ipp_prop_list *prop_list = data;
+       struct exynos_drm_ippdrv *ippdrv;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ctx) {
+               DRM_ERROR("invalid context.\n");
+               return -EINVAL;
+       }
+
+       if (!prop_list) {
+               DRM_ERROR("invalid property parameter.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
+
+       if (!prop_list->ipp_id) {
+               list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+                       count++;
+               /*
+                * Supports ippdrv list count for user application.
+                * First step user application getting ippdrv count.
+                * and second step getting ippdrv capability using ipp_id.
+                */
+               prop_list->count = count;
+       } else {
+               /*
+                * Getting ippdrv capability by ipp_id.
+                * some deivce not supported wb, output interface.
+                * so, user application detect correct ipp driver
+                * using this ioctl.
+                */
+               ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+                                               prop_list->ipp_id);
+               if (!ippdrv) {
+                       DRM_ERROR("not found ipp%d driver.\n",
+                                       prop_list->ipp_id);
+                       return -EINVAL;
+               }
+
+               prop_list = ippdrv->prop_list;
+       }
+
+       return 0;
+}
+
+static void ipp_print_property(struct drm_exynos_ipp_property *property,
+               int idx)
+{
+       struct drm_exynos_ipp_config *config = &property->config[idx];
+       struct drm_exynos_pos *pos = &config->pos;
+       struct drm_exynos_sz *sz = &config->sz;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
+               __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
+
+       DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
+               __func__, pos->x, pos->y, pos->w, pos->h,
+               sz->hsize, sz->vsize, config->flip, config->degree);
+}
+
+static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       u32 prop_id = property->prop_id;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+       ippdrv = ipp_find_drv_by_handle(prop_id);
+       if (IS_ERR_OR_NULL(ippdrv)) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Find command node using command list in ippdrv.
+        * when we find this command no using prop_id.
+        * return property information set in this command node.
+        */
+       list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
+               if ((c_node->property.prop_id == prop_id) &&
+                   (c_node->state == IPP_STATE_STOP)) {
+                       DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
+                               __func__, property->cmd, (int)ippdrv);
+
+                       c_node->property = *property;
+                       return 0;
+               }
+       }
+
+       DRM_ERROR("failed to search property.\n");
+
+       return -EINVAL;
+}
+
+static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
+{
+       struct drm_exynos_ipp_cmd_work *cmd_work;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
+       if (!cmd_work) {
+               DRM_ERROR("failed to alloc cmd_work.\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
+
+       return cmd_work;
+}
+
+static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
+{
+       struct drm_exynos_ipp_event_work *event_work;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
+       if (!event_work) {
+               DRM_ERROR("failed to alloc event_work.\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+
+       return event_work;
+}
+
+int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct device *dev = priv->dev;
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct drm_exynos_ipp_property *property = data;
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       int ret, i;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ctx) {
+               DRM_ERROR("invalid context.\n");
+               return -EINVAL;
+       }
+
+       if (!property) {
+               DRM_ERROR("invalid property parameter.\n");
+               return -EINVAL;
+       }
+
+       /*
+        * This is log print for user application property.
+        * user application set various property.
+        */
+       for_each_ipp_ops(i)
+               ipp_print_property(property, i);
+
+       /*
+        * set property ioctl generated new prop_id.
+        * but in this case already asigned prop_id using old set property.
+        * e.g PAUSE state. this case supports find current prop_id and use it
+        * instead of allocation.
+        */
+       if (property->prop_id) {
+               DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+               return ipp_find_and_set_property(property);
+       }
+
+       /* find ipp driver using ipp id */
+       ippdrv = ipp_find_driver(ctx, property);
+       if (IS_ERR_OR_NULL(ippdrv)) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return -EINVAL;
+       }
+
+       /* allocate command node */
+       c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
+       if (!c_node) {
+               DRM_ERROR("failed to allocate map node.\n");
+               return -ENOMEM;
+       }
+
+       /* create property id */
+       ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
+               &property->prop_id);
+       if (ret) {
+               DRM_ERROR("failed to create id.\n");
+               goto err_clear;
+       }
+
+       DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
+               __func__, property->prop_id, property->cmd, (int)ippdrv);
+
+       /* stored property information and ippdrv in private data */
+       c_node->priv = priv;
+       c_node->property = *property;
+       c_node->state = IPP_STATE_IDLE;
+
+       c_node->start_work = ipp_create_cmd_work();
+       if (IS_ERR_OR_NULL(c_node->start_work)) {
+               DRM_ERROR("failed to create start work.\n");
+               goto err_clear;
+       }
+
+       c_node->stop_work = ipp_create_cmd_work();
+       if (IS_ERR_OR_NULL(c_node->stop_work)) {
+               DRM_ERROR("failed to create stop work.\n");
+               goto err_free_start;
+       }
+
+       c_node->event_work = ipp_create_event_work();
+       if (IS_ERR_OR_NULL(c_node->event_work)) {
+               DRM_ERROR("failed to create event work.\n");
+               goto err_free_stop;
+       }
+
+       mutex_init(&c_node->cmd_lock);
+       mutex_init(&c_node->mem_lock);
+       mutex_init(&c_node->event_lock);
+
+       init_completion(&c_node->start_complete);
+       init_completion(&c_node->stop_complete);
+
+       for_each_ipp_ops(i)
+               INIT_LIST_HEAD(&c_node->mem_list[i]);
+
+       INIT_LIST_HEAD(&c_node->event_list);
+       list_splice_init(&priv->event_list, &c_node->event_list);
+       list_add_tail(&c_node->list, &ippdrv->cmd_list);
+
+       /* make dedicated state without m2m */
+       if (!ipp_is_m2m_cmd(property->cmd))
+               ippdrv->dedicated = true;
+
+       return 0;
+
+err_free_stop:
+       kfree(c_node->stop_work);
+err_free_start:
+       kfree(c_node->start_work);
+err_clear:
+       kfree(c_node);
+       return ret;
+}
+
+static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
+{
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* delete list */
+       list_del(&c_node->list);
+
+       /* destroy mutex */
+       mutex_destroy(&c_node->cmd_lock);
+       mutex_destroy(&c_node->mem_lock);
+       mutex_destroy(&c_node->event_lock);
+
+       /* free command node */
+       kfree(c_node->start_work);
+       kfree(c_node->stop_work);
+       kfree(c_node->event_work);
+       kfree(c_node);
+}
+
+static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+{
+       struct drm_exynos_ipp_property *property = &c_node->property;
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct list_head *head;
+       int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       mutex_lock(&c_node->mem_lock);
+
+       for_each_ipp_ops(i) {
+               /* source/destination memory list */
+               head = &c_node->mem_list[i];
+
+               if (list_empty(head)) {
+                       DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
+                               i ? "dst" : "src");
+                       continue;
+               }
+
+               /* find memory node entry */
+               list_for_each_entry(m_node, head, list) {
+                       DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
+                               i ? "dst" : "src", count[i], (int)m_node);
+                       count[i]++;
+               }
+       }
+
+       DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
+               min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
+               max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
+
+       /*
+        * M2M operations should be need paired memory address.
+        * so, need to check minimum count about src, dst.
+        * other case not use paired memory, so use maximum count
+        */
+       if (ipp_is_m2m_cmd(property->cmd))
+               ret = min(count[EXYNOS_DRM_OPS_SRC],
+                       count[EXYNOS_DRM_OPS_DST]);
+       else
+               ret = max(count[EXYNOS_DRM_OPS_SRC],
+                       count[EXYNOS_DRM_OPS_DST]);
+
+       mutex_unlock(&c_node->mem_lock);
+
+       return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+               *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct list_head *head;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
+
+       /* source/destination memory list */
+       head = &c_node->mem_list[qbuf->ops_id];
+
+       /* find memory node from memory list */
+       list_for_each_entry(m_node, head, list) {
+               DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
+                       __func__, count++, (int)m_node);
+
+               /* compare buffer id */
+               if (m_node->buf_id == qbuf->buf_id)
+                       return m_node;
+       }
+
+       return NULL;
+}
+
+static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_mem_node *m_node)
+{
+       struct exynos_drm_ipp_ops *ops = NULL;
+       int ret = 0;
+
+       DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+       if (!m_node) {
+               DRM_ERROR("invalid queue node.\n");
+               return -EFAULT;
+       }
+
+       mutex_lock(&c_node->mem_lock);
+
+       DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+       /* get operations callback */
+       ops = ippdrv->ops[m_node->ops_id];
+       if (!ops) {
+               DRM_ERROR("not support ops.\n");
+               ret = -EFAULT;
+               goto err_unlock;
+       }
+
+       /* set address and enable irq */
+       if (ops->set_addr) {
+               ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
+                       m_node->buf_id, IPP_BUF_ENQUEUE);
+               if (ret) {
+                       DRM_ERROR("failed to set addr.\n");
+                       goto err_unlock;
+               }
+       }
+
+err_unlock:
+       mutex_unlock(&c_node->mem_lock);
+       return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+               *ipp_get_mem_node(struct drm_device *drm_dev,
+               struct drm_file *file,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct drm_exynos_ipp_buf_info buf_info;
+       void *addr;
+       int i;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       mutex_lock(&c_node->mem_lock);
+
+       m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
+       if (!m_node) {
+               DRM_ERROR("failed to allocate queue node.\n");
+               goto err_unlock;
+       }
+
+       /* clear base address for error handling */
+       memset(&buf_info, 0x0, sizeof(buf_info));
+
+       /* operations, buffer id */
+       m_node->ops_id = qbuf->ops_id;
+       m_node->prop_id = qbuf->prop_id;
+       m_node->buf_id = qbuf->buf_id;
+
+       DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
+               (int)m_node, qbuf->ops_id);
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
+               qbuf->prop_id, m_node->buf_id);
+
+       for_each_ipp_planar(i) {
+               DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
+                       i, qbuf->handle[i]);
+
+               /* get dma address by handle */
+               if (qbuf->handle[i]) {
+                       addr = exynos_drm_gem_get_dma_addr(drm_dev,
+                                       qbuf->handle[i], file);
+                       if (IS_ERR(addr)) {
+                               DRM_ERROR("failed to get addr.\n");
+                               goto err_clear;
+                       }
+
+                       buf_info.handles[i] = qbuf->handle[i];
+                       buf_info.base[i] = *(dma_addr_t *) addr;
+                       DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
+                               __func__, i, buf_info.base[i],
+                               (int)buf_info.handles[i]);
+               }
+       }
+
+       m_node->filp = file;
+       m_node->buf_info = buf_info;
+       list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
+
+       mutex_unlock(&c_node->mem_lock);
+       return m_node;
+
+err_clear:
+       kfree(m_node);
+err_unlock:
+       mutex_unlock(&c_node->mem_lock);
+       return ERR_PTR(-EFAULT);
+}
+
+static int ipp_put_mem_node(struct drm_device *drm_dev,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_mem_node *m_node)
+{
+       int i;
+
+       DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+       if (!m_node) {
+               DRM_ERROR("invalid dequeue node.\n");
+               return -EFAULT;
+       }
+
+       if (list_empty(&m_node->list)) {
+               DRM_ERROR("empty memory node.\n");
+               return -ENOMEM;
+       }
+
+       mutex_lock(&c_node->mem_lock);
+
+       DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+       /* put gem buffer */
+       for_each_ipp_planar(i) {
+               unsigned long handle = m_node->buf_info.handles[i];
+               if (handle)
+                       exynos_drm_gem_put_dma_addr(drm_dev, handle,
+                                                       m_node->filp);
+       }
+
+       /* delete list in queue */
+       list_del(&m_node->list);
+       kfree(m_node);
+
+       mutex_unlock(&c_node->mem_lock);
+
+       return 0;
+}
+
+static void ipp_free_event(struct drm_pending_event *event)
+{
+       kfree(event);
+}
+
+static int ipp_get_event(struct drm_device *drm_dev,
+               struct drm_file *file,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_send_event *e;
+       unsigned long flags;
+
+       DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
+               qbuf->ops_id, qbuf->buf_id);
+
+       e = kzalloc(sizeof(*e), GFP_KERNEL);
+
+       if (!e) {
+               DRM_ERROR("failed to allocate event.\n");
+               spin_lock_irqsave(&drm_dev->event_lock, flags);
+               file->event_space += sizeof(e->event);
+               spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+               return -ENOMEM;
+       }
+
+       /* make event */
+       e->event.base.type = DRM_EXYNOS_IPP_EVENT;
+       e->event.base.length = sizeof(e->event);
+       e->event.user_data = qbuf->user_data;
+       e->event.prop_id = qbuf->prop_id;
+       e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
+       e->base.event = &e->event.base;
+       e->base.file_priv = file;
+       e->base.destroy = ipp_free_event;
+       list_add_tail(&e->base.link, &c_node->event_list);
+
+       return 0;
+}
+
+static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_send_event *e, *te;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (list_empty(&c_node->event_list)) {
+               DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
+               return;
+       }
+
+       list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
+               DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
+                       __func__, count++, (int)e);
+
+               /*
+                * quf == NULL condition means all event deletion.
+                * stop operations want to delete all event list.
+                * another case delete only same buf id.
+                */
+               if (!qbuf) {
+                       /* delete list */
+                       list_del(&e->base.link);
+                       kfree(e);
+               }
+
+               /* compare buffer id */
+               if (qbuf && (qbuf->buf_id ==
+                   e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
+                       /* delete list */
+                       list_del(&e->base.link);
+                       kfree(e);
+                       return;
+               }
+       }
+}
+
+void ipp_handle_cmd_work(struct device *dev,
+               struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_work *cmd_work,
+               struct drm_exynos_ipp_cmd_node *c_node)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       cmd_work->ippdrv = ippdrv;
+       cmd_work->c_node = c_node;
+       queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
+}
+
+static int ipp_queue_buf_with_run(struct device *dev,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_mem_node *m_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_property *property;
+       struct exynos_drm_ipp_ops *ops;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
+       if (IS_ERR_OR_NULL(ippdrv)) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return -EFAULT;
+       }
+
+       ops = ippdrv->ops[qbuf->ops_id];
+       if (!ops) {
+               DRM_ERROR("failed to get ops.\n");
+               return -EFAULT;
+       }
+
+       property = &c_node->property;
+
+       if (c_node->state != IPP_STATE_START) {
+               DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
+               return 0;
+       }
+
+       if (!ipp_check_mem_list(c_node)) {
+               DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+               return 0;
+       }
+
+       /*
+        * If set destination buffer and enabled clock,
+        * then m2m operations need start operations at queue_buf
+        */
+       if (ipp_is_m2m_cmd(property->cmd)) {
+               struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
+
+               cmd_work->ctrl = IPP_CTRL_PLAY;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+       } else {
+               ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+               if (ret) {
+                       DRM_ERROR("failed to set m node.\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static void ipp_clean_queue_buf(struct drm_device *drm_dev,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
+               /* delete list */
+               list_for_each_entry_safe(m_node, tm_node,
+                       &c_node->mem_list[qbuf->ops_id], list) {
+                       if (m_node->buf_id == qbuf->buf_id &&
+                           m_node->ops_id == qbuf->ops_id)
+                               ipp_put_mem_node(drm_dev, c_node, m_node);
+               }
+       }
+}
+
+int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct device *dev = priv->dev;
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct drm_exynos_ipp_queue_buf *qbuf = data;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       struct drm_exynos_ipp_mem_node *m_node;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!qbuf) {
+               DRM_ERROR("invalid buf parameter.\n");
+               return -EINVAL;
+       }
+
+       if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
+               DRM_ERROR("invalid ops parameter.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
+               __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
+               qbuf->buf_id, qbuf->buf_type);
+
+       /* find command node */
+       c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+               qbuf->prop_id);
+       if (!c_node) {
+               DRM_ERROR("failed to get command node.\n");
+               return -EFAULT;
+       }
+
+       /* buffer control */
+       switch (qbuf->buf_type) {
+       case IPP_BUF_ENQUEUE:
+               /* get memory node */
+               m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
+               if (IS_ERR(m_node)) {
+                       DRM_ERROR("failed to get m_node.\n");
+                       return PTR_ERR(m_node);
+               }
+
+               /*
+                * first step get event for destination buffer.
+                * and second step when M2M case run with destination buffer
+                * if needed.
+                */
+               if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
+                       /* get event for destination buffer */
+                       ret = ipp_get_event(drm_dev, file, c_node, qbuf);
+                       if (ret) {
+                               DRM_ERROR("failed to get event.\n");
+                               goto err_clean_node;
+                       }
+
+                       /*
+                        * M2M case run play control for streaming feature.
+                        * other case set address and waiting.
+                        */
+                       ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
+                       if (ret) {
+                               DRM_ERROR("failed to run command.\n");
+                               goto err_clean_node;
+                       }
+               }
+               break;
+       case IPP_BUF_DEQUEUE:
+               mutex_lock(&c_node->cmd_lock);
+
+               /* put event for destination buffer */
+               if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
+                       ipp_put_event(c_node, qbuf);
+
+               ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+
+               mutex_unlock(&c_node->cmd_lock);
+               break;
+       default:
+               DRM_ERROR("invalid buffer control.\n");
+               return -EINVAL;
+       }
+
+       return 0;
+
+err_clean_node:
+       DRM_ERROR("clean memory nodes.\n");
+
+       ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+       return ret;
+}
+
+static bool exynos_drm_ipp_check_valid(struct device *dev,
+               enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
+{
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (ctrl != IPP_CTRL_PLAY) {
+               if (pm_runtime_suspended(dev)) {
+                       DRM_ERROR("pm:runtime_suspended.\n");
+                       goto err_status;
+               }
+       }
+
+       switch (ctrl) {
+       case IPP_CTRL_PLAY:
+               if (state != IPP_STATE_IDLE)
+                       goto err_status;
+               break;
+       case IPP_CTRL_STOP:
+               if (state == IPP_STATE_STOP)
+                       goto err_status;
+               break;
+       case IPP_CTRL_PAUSE:
+               if (state != IPP_STATE_START)
+                       goto err_status;
+               break;
+       case IPP_CTRL_RESUME:
+               if (state != IPP_STATE_STOP)
+                       goto err_status;
+               break;
+       default:
+               DRM_ERROR("invalid state.\n");
+               goto err_status;
+               break;
+       }
+
+       return true;
+
+err_status:
+       DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
+       return false;
+}
+
+int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct exynos_drm_ippdrv *ippdrv = NULL;
+       struct device *dev = priv->dev;
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
+       struct drm_exynos_ipp_cmd_work *cmd_work;
+       struct drm_exynos_ipp_cmd_node *c_node;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ctx) {
+               DRM_ERROR("invalid context.\n");
+               return -EINVAL;
+       }
+
+       if (!cmd_ctrl) {
+               DRM_ERROR("invalid control parameter.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
+               cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+       ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
+       if (IS_ERR(ippdrv)) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return PTR_ERR(ippdrv);
+       }
+
+       c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+               cmd_ctrl->prop_id);
+       if (!c_node) {
+               DRM_ERROR("invalid command node list.\n");
+               return -EINVAL;
+       }
+
+       if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
+           c_node->state)) {
+               DRM_ERROR("invalid state.\n");
+               return -EINVAL;
+       }
+
+       switch (cmd_ctrl->ctrl) {
+       case IPP_CTRL_PLAY:
+               if (pm_runtime_suspended(ippdrv->dev))
+                       pm_runtime_get_sync(ippdrv->dev);
+               c_node->state = IPP_STATE_START;
+
+               cmd_work = c_node->start_work;
+               cmd_work->ctrl = cmd_ctrl->ctrl;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+               c_node->state = IPP_STATE_START;
+               break;
+       case IPP_CTRL_STOP:
+               cmd_work = c_node->stop_work;
+               cmd_work->ctrl = cmd_ctrl->ctrl;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+               if (!wait_for_completion_timeout(&c_node->stop_complete,
+                   msecs_to_jiffies(300))) {
+                       DRM_ERROR("timeout stop:prop_id[%d]\n",
+                               c_node->property.prop_id);
+               }
+
+               c_node->state = IPP_STATE_STOP;
+               ippdrv->dedicated = false;
+               ipp_clean_cmd_node(c_node);
+
+               if (list_empty(&ippdrv->cmd_list))
+                       pm_runtime_put_sync(ippdrv->dev);
+               break;
+       case IPP_CTRL_PAUSE:
+               cmd_work = c_node->stop_work;
+               cmd_work->ctrl = cmd_ctrl->ctrl;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+               if (!wait_for_completion_timeout(&c_node->stop_complete,
+                   msecs_to_jiffies(200))) {
+                       DRM_ERROR("timeout stop:prop_id[%d]\n",
+                               c_node->property.prop_id);
+               }
+
+               c_node->state = IPP_STATE_STOP;
+               break;
+       case IPP_CTRL_RESUME:
+               c_node->state = IPP_STATE_START;
+               cmd_work = c_node->start_work;
+               cmd_work->ctrl = cmd_ctrl->ctrl;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+               break;
+       default:
+               DRM_ERROR("could not support this state currently.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
+               cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+       return 0;
+}
+
+int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_register(
+               &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_unregister(
+               &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+       return blocking_notifier_call_chain(
+               &exynos_drm_ippnb_list, val, v);
+}
+
+static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_property *property)
+{
+       struct exynos_drm_ipp_ops *ops = NULL;
+       bool swap = false;
+       int ret, i;
+
+       if (!property) {
+               DRM_ERROR("invalid property parameter.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+       /* reset h/w block */
+       if (ippdrv->reset &&
+           ippdrv->reset(ippdrv->dev)) {
+               DRM_ERROR("failed to reset.\n");
+               return -EINVAL;
+       }
+
+       /* set source,destination operations */
+       for_each_ipp_ops(i) {
+               struct drm_exynos_ipp_config *config =
+                       &property->config[i];
+
+               ops = ippdrv->ops[i];
+               if (!ops || !config) {
+                       DRM_ERROR("not support ops and config.\n");
+                       return -EINVAL;
+               }
+
+               /* set format */
+               if (ops->set_fmt) {
+                       ret = ops->set_fmt(ippdrv->dev, config->fmt);
+                       if (ret) {
+                               DRM_ERROR("not support format.\n");
+                               return ret;
+                       }
+               }
+
+               /* set transform for rotation, flip */
+               if (ops->set_transf) {
+                       ret = ops->set_transf(ippdrv->dev, config->degree,
+                               config->flip, &swap);
+                       if (ret) {
+                               DRM_ERROR("not support tranf.\n");
+                               return -EINVAL;
+                       }
+               }
+
+               /* set size */
+               if (ops->set_size) {
+                       ret = ops->set_size(ippdrv->dev, swap, &config->pos,
+                               &config->sz);
+                       if (ret) {
+                               DRM_ERROR("not support size.\n");
+                               return ret;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_node *c_node)
+{
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct drm_exynos_ipp_property *property = &c_node->property;
+       struct list_head *head;
+       int ret, i;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+       /* store command info in ippdrv */
+       ippdrv->cmd = c_node;
+
+       if (!ipp_check_mem_list(c_node)) {
+               DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+               return -ENOMEM;
+       }
+
+       /* set current property in ippdrv */
+       ret = ipp_set_property(ippdrv, property);
+       if (ret) {
+               DRM_ERROR("failed to set property.\n");
+               ippdrv->cmd = NULL;
+               return ret;
+       }
+
+       /* check command */
+       switch (property->cmd) {
+       case IPP_CMD_M2M:
+               for_each_ipp_ops(i) {
+                       /* source/destination memory list */
+                       head = &c_node->mem_list[i];
+
+                       m_node = list_first_entry(head,
+                               struct drm_exynos_ipp_mem_node, list);
+                       if (!m_node) {
+                               DRM_ERROR("failed to get node.\n");
+                               ret = -EFAULT;
+                               return ret;
+                       }
+
+                       DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
+                               __func__, (int)m_node);
+
+                       ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to set m node.\n");
+                               return ret;
+                       }
+               }
+               break;
+       case IPP_CMD_WB:
+               /* destination memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+               list_for_each_entry(m_node, head, list) {
+                       ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to set m node.\n");
+                               return ret;
+                       }
+               }
+               break;
+       case IPP_CMD_OUTPUT:
+               /* source memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+               list_for_each_entry(m_node, head, list) {
+                       ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to set m node.\n");
+                               return ret;
+                       }
+               }
+               break;
+       default:
+               DRM_ERROR("invalid operations.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
+
+       /* start operations */
+       if (ippdrv->start) {
+               ret = ippdrv->start(ippdrv->dev, property->cmd);
+               if (ret) {
+                       DRM_ERROR("failed to start ops.\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int ipp_stop_property(struct drm_device *drm_dev,
+               struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_node *c_node)
+{
+       struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+       struct drm_exynos_ipp_property *property = &c_node->property;
+       struct list_head *head;
+       int ret = 0, i;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+       /* put event */
+       ipp_put_event(c_node, NULL);
+
+       /* check command */
+       switch (property->cmd) {
+       case IPP_CMD_M2M:
+               for_each_ipp_ops(i) {
+                       /* source/destination memory list */
+                       head = &c_node->mem_list[i];
+
+                       if (list_empty(head)) {
+                               DRM_DEBUG_KMS("%s:mem_list is empty.\n",
+                                       __func__);
+                               break;
+                       }
+
+                       list_for_each_entry_safe(m_node, tm_node,
+                               head, list) {
+                               ret = ipp_put_mem_node(drm_dev, c_node,
+                                       m_node);
+                               if (ret) {
+                                       DRM_ERROR("failed to put m_node.\n");
+                                       goto err_clear;
+                               }
+                       }
+               }
+               break;
+       case IPP_CMD_WB:
+               /* destination memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+               if (list_empty(head)) {
+                       DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+                       break;
+               }
+
+               list_for_each_entry_safe(m_node, tm_node, head, list) {
+                       ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to put m_node.\n");
+                               goto err_clear;
+                       }
+               }
+               break;
+       case IPP_CMD_OUTPUT:
+               /* source memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+               if (list_empty(head)) {
+                       DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+                       break;
+               }
+
+               list_for_each_entry_safe(m_node, tm_node, head, list) {
+                       ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to put m_node.\n");
+                               goto err_clear;
+                       }
+               }
+               break;
+       default:
+               DRM_ERROR("invalid operations.\n");
+               ret = -EINVAL;
+               goto err_clear;
+       }
+
+err_clear:
+       /* stop operations */
+       if (ippdrv->stop)
+               ippdrv->stop(ippdrv->dev, property->cmd);
+
+       return ret;
+}
+
+void ipp_sched_cmd(struct work_struct *work)
+{
+       struct drm_exynos_ipp_cmd_work *cmd_work =
+               (struct drm_exynos_ipp_cmd_work *)work;
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       struct drm_exynos_ipp_property *property;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       ippdrv = cmd_work->ippdrv;
+       if (!ippdrv) {
+               DRM_ERROR("invalid ippdrv list.\n");
+               return;
+       }
+
+       c_node = cmd_work->c_node;
+       if (!c_node) {
+               DRM_ERROR("invalid command node list.\n");
+               return;
+       }
+
+       mutex_lock(&c_node->cmd_lock);
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property:prop_id[%d]\n",
+                       c_node->property.prop_id);
+               goto err_unlock;
+       }
+
+       switch (cmd_work->ctrl) {
+       case IPP_CTRL_PLAY:
+       case IPP_CTRL_RESUME:
+               ret = ipp_start_property(ippdrv, c_node);
+               if (ret) {
+                       DRM_ERROR("failed to start property:prop_id[%d]\n",
+                               c_node->property.prop_id);
+                       goto err_unlock;
+               }
+
+               /*
+                * M2M case supports wait_completion of transfer.
+                * because M2M case supports single unit operation
+                * with multiple queue.
+                * M2M need to wait completion of data transfer.
+                */
+               if (ipp_is_m2m_cmd(property->cmd)) {
+                       if (!wait_for_completion_timeout
+                           (&c_node->start_complete, msecs_to_jiffies(200))) {
+                               DRM_ERROR("timeout event:prop_id[%d]\n",
+                                       c_node->property.prop_id);
+                               goto err_unlock;
+                       }
+               }
+               break;
+       case IPP_CTRL_STOP:
+       case IPP_CTRL_PAUSE:
+               ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
+                       c_node);
+               if (ret) {
+                       DRM_ERROR("failed to stop property.\n");
+                       goto err_unlock;
+               }
+
+               complete(&c_node->stop_complete);
+               break;
+       default:
+               DRM_ERROR("unknown control type\n");
+               break;
+       }
+
+       DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
+
+err_unlock:
+       mutex_unlock(&c_node->cmd_lock);
+}
+
+static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
+{
+       struct drm_device *drm_dev = ippdrv->drm_dev;
+       struct drm_exynos_ipp_property *property = &c_node->property;
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct drm_exynos_ipp_queue_buf qbuf;
+       struct drm_exynos_ipp_send_event *e;
+       struct list_head *head;
+       struct timeval now;
+       unsigned long flags;
+       u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
+       int ret, i;
+
+       for_each_ipp_ops(i)
+               DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+                       i ? "dst" : "src", buf_id[i]);
+
+       if (!drm_dev) {
+               DRM_ERROR("failed to get drm_dev.\n");
+               return -EINVAL;
+       }
+
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       if (list_empty(&c_node->event_list)) {
+               DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
+               return 0;
+       }
+
+       if (!ipp_check_mem_list(c_node)) {
+               DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+               return 0;
+       }
+
+       /* check command */
+       switch (property->cmd) {
+       case IPP_CMD_M2M:
+               for_each_ipp_ops(i) {
+                       /* source/destination memory list */
+                       head = &c_node->mem_list[i];
+
+                       m_node = list_first_entry(head,
+                               struct drm_exynos_ipp_mem_node, list);
+                       if (!m_node) {
+                               DRM_ERROR("empty memory node.\n");
+                               return -ENOMEM;
+                       }
+
+                       tbuf_id[i] = m_node->buf_id;
+                       DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+                               i ? "dst" : "src", tbuf_id[i]);
+
+                       ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+                       if (ret)
+                               DRM_ERROR("failed to put m_node.\n");
+               }
+               break;
+       case IPP_CMD_WB:
+               /* clear buf for finding */
+               memset(&qbuf, 0x0, sizeof(qbuf));
+               qbuf.ops_id = EXYNOS_DRM_OPS_DST;
+               qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
+
+               /* get memory node entry */
+               m_node = ipp_find_mem_node(c_node, &qbuf);
+               if (!m_node) {
+                       DRM_ERROR("empty memory node.\n");
+                       return -ENOMEM;
+               }
+
+               tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
+
+               ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+               if (ret)
+                       DRM_ERROR("failed to put m_node.\n");
+               break;
+       case IPP_CMD_OUTPUT:
+               /* source memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+               m_node = list_first_entry(head,
+                       struct drm_exynos_ipp_mem_node, list);
+               if (!m_node) {
+                       DRM_ERROR("empty memory node.\n");
+                       return -ENOMEM;
+               }
+
+               tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
+
+               ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+               if (ret)
+                       DRM_ERROR("failed to put m_node.\n");
+               break;
+       default:
+               DRM_ERROR("invalid operations.\n");
+               return -EINVAL;
+       }
+
+       if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
+               DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
+                       tbuf_id[1], buf_id[1], property->prop_id);
+
+       /*
+        * command node have event list of destination buffer
+        * If destination buffer enqueue to mem list,
+        * then we make event and link to event list tail.
+        * so, we get first event for first enqueued buffer.
+        */
+       e = list_first_entry(&c_node->event_list,
+               struct drm_exynos_ipp_send_event, base.link);
+
+       if (!e) {
+               DRM_ERROR("empty event.\n");
+               return -EINVAL;
+       }
+
+       do_gettimeofday(&now);
+       DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
+               , __func__, now.tv_sec, now.tv_usec);
+       e->event.tv_sec = now.tv_sec;
+       e->event.tv_usec = now.tv_usec;
+       e->event.prop_id = property->prop_id;
+
+       /* set buffer id about source destination */
+       for_each_ipp_ops(i)
+               e->event.buf_id[i] = tbuf_id[i];
+
+       spin_lock_irqsave(&drm_dev->event_lock, flags);
+       list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+       wake_up_interruptible(&e->base.file_priv->event_wait);
+       spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+       DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
+               property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
+
+       return 0;
+}
+
+void ipp_sched_event(struct work_struct *work)
+{
+       struct drm_exynos_ipp_event_work *event_work =
+               (struct drm_exynos_ipp_event_work *)work;
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       int ret;
+
+       if (!event_work) {
+               DRM_ERROR("failed to get event_work.\n");
+               return;
+       }
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
+               event_work->buf_id[EXYNOS_DRM_OPS_DST]);
+
+       ippdrv = event_work->ippdrv;
+       if (!ippdrv) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return;
+       }
+
+       c_node = ippdrv->cmd;
+       if (!c_node) {
+               DRM_ERROR("failed to get command node.\n");
+               return;
+       }
+
+       /*
+        * IPP supports command thread, event thread synchronization.
+        * If IPP close immediately from user land, then IPP make
+        * synchronization with command thread, so make complete event.
+        * or going out operations.
+        */
+       if (c_node->state != IPP_STATE_START) {
+               DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
+                       __func__, c_node->state, c_node->property.prop_id);
+               goto err_completion;
+       }
+
+       mutex_lock(&c_node->event_lock);
+
+       ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
+       if (ret) {
+               DRM_ERROR("failed to send event.\n");
+               goto err_completion;
+       }
+
+err_completion:
+       if (ipp_is_m2m_cmd(c_node->property.cmd))
+               complete(&c_node->start_complete);
+
+       mutex_unlock(&c_node->event_lock);
+}
+
+static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct exynos_drm_ippdrv *ippdrv;
+       int ret, count = 0;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* get ipp driver entry */
+       list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+               ippdrv->drm_dev = drm_dev;
+
+               ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
+                       &ippdrv->ipp_id);
+               if (ret) {
+                       DRM_ERROR("failed to create id.\n");
+                       goto err_idr;
+               }
+
+               DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
+                       count++, (int)ippdrv, ippdrv->ipp_id);
+
+               if (ippdrv->ipp_id == 0) {
+                       DRM_ERROR("failed to get ipp_id[%d]\n",
+                               ippdrv->ipp_id);
+                       goto err_idr;
+               }
+
+               /* store parent device for node */
+               ippdrv->parent_dev = dev;
+
+               /* store event work queue and handler */
+               ippdrv->event_workq = ctx->event_workq;
+               ippdrv->sched_event = ipp_sched_event;
+               INIT_LIST_HEAD(&ippdrv->cmd_list);
+
+               if (is_drm_iommu_supported(drm_dev)) {
+                       ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+                       if (ret) {
+                               DRM_ERROR("failed to activate iommu\n");
+                               goto err_iommu;
+                       }
+               }
+       }
+
+       return 0;
+
+err_iommu:
+       /* get ipp driver entry */
+       list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+               if (is_drm_iommu_supported(drm_dev))
+                       drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+err_idr:
+       idr_remove_all(&ctx->ipp_idr);
+       idr_remove_all(&ctx->prop_idr);
+       idr_destroy(&ctx->ipp_idr);
+       idr_destroy(&ctx->prop_idr);
+       return ret;
+}
+
+static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* get ipp driver entry */
+       list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+               if (is_drm_iommu_supported(drm_dev))
+                       drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+               ippdrv->drm_dev = NULL;
+               exynos_drm_ippdrv_unregister(ippdrv);
+       }
+}
+
+static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv) {
+               DRM_ERROR("failed to allocate priv.\n");
+               return -ENOMEM;
+       }
+       priv->dev = dev;
+       file_priv->ipp_priv = priv;
+
+       INIT_LIST_HEAD(&priv->event_list);
+
+       DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
+
+       return 0;
+}
+
+static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct exynos_drm_ippdrv *ippdrv = NULL;
+       struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
+
+       if (list_empty(&exynos_drm_ippdrv_list)) {
+               DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+               goto err_clear;
+       }
+
+       list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+               if (list_empty(&ippdrv->cmd_list))
+                       continue;
+
+               list_for_each_entry_safe(c_node, tc_node,
+                       &ippdrv->cmd_list, list) {
+                       DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
+                               __func__, count++, (int)ippdrv);
+
+                       if (c_node->priv == priv) {
+                               /*
+                                * userland goto unnormal state. process killed.
+                                * and close the file.
+                                * so, IPP didn't called stop cmd ctrl.
+                                * so, we are make stop operation in this state.
+                                */
+                               if (c_node->state == IPP_STATE_START) {
+                                       ipp_stop_property(drm_dev, ippdrv,
+                                               c_node);
+                                       c_node->state = IPP_STATE_STOP;
+                               }
+
+                               ippdrv->dedicated = false;
+                               ipp_clean_cmd_node(c_node);
+                               if (list_empty(&ippdrv->cmd_list))
+                                       pm_runtime_put_sync(ippdrv->dev);
+                       }
+               }
+       }
+
+err_clear:
+       kfree(priv);
+       return;
+}
+
+static int __devinit ipp_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ipp_context *ctx;
+       struct exynos_drm_subdrv *subdrv;
+       int ret;
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       mutex_init(&ctx->ipp_lock);
+       mutex_init(&ctx->prop_lock);
+
+       idr_init(&ctx->ipp_idr);
+       idr_init(&ctx->prop_idr);
+
+       /*
+        * create single thread for ipp event
+        * IPP supports event thread for IPP drivers.
+        * IPP driver send event_work to this thread.
+        * and IPP event thread send event to user process.
+        */
+       ctx->event_workq = create_singlethread_workqueue("ipp_event");
+       if (!ctx->event_workq) {
+               dev_err(dev, "failed to create event workqueue\n");
+               ret = -EINVAL;
+               goto err_clear;
+       }
+
+       /*
+        * create single thread for ipp command
+        * IPP supports command thread for user process.
+        * user process make command node using set property ioctl.
+        * and make start_work and send this work to command thread.
+        * and then this command thread start property.
+        */
+       ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
+       if (!ctx->cmd_workq) {
+               dev_err(dev, "failed to create cmd workqueue\n");
+               ret = -EINVAL;
+               goto err_event_workq;
+       }
+
+       /* set sub driver informations */
+       subdrv = &ctx->subdrv;
+       subdrv->dev = dev;
+       subdrv->probe = ipp_subdrv_probe;
+       subdrv->remove = ipp_subdrv_remove;
+       subdrv->open = ipp_subdrv_open;
+       subdrv->close = ipp_subdrv_close;
+
+       platform_set_drvdata(pdev, ctx);
+
+       ret = exynos_drm_subdrv_register(subdrv);
+       if (ret < 0) {
+               DRM_ERROR("failed to register drm ipp device.\n");
+               goto err_cmd_workq;
+       }
+
+       dev_info(&pdev->dev, "drm ipp registered successfully.\n");
+
+       return 0;
+
+err_cmd_workq:
+       destroy_workqueue(ctx->cmd_workq);
+err_event_workq:
+       destroy_workqueue(ctx->event_workq);
+err_clear:
+       kfree(ctx);
+       return ret;
+}
+
+static int __devexit ipp_remove(struct platform_device *pdev)
+{
+       struct ipp_context *ctx = platform_get_drvdata(pdev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* unregister sub driver */
+       exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+       /* remove,destroy ipp idr */
+       idr_remove_all(&ctx->ipp_idr);
+       idr_remove_all(&ctx->prop_idr);
+       idr_destroy(&ctx->ipp_idr);
+       idr_destroy(&ctx->prop_idr);
+
+       mutex_destroy(&ctx->ipp_lock);
+       mutex_destroy(&ctx->prop_lock);
+
+       /* destroy command, event work queue */
+       destroy_workqueue(ctx->cmd_workq);
+       destroy_workqueue(ctx->event_workq);
+
+       kfree(ctx);
+
+       return 0;
+}
+
+static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
+{
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ipp_suspend(struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_resume(struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!pm_runtime_suspended(dev))
+               return ipp_power_ctrl(ctx, true);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int ipp_runtime_suspend(struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_runtime_resume(struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       return ipp_power_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops ipp_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
+       SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
+};
+
+struct platform_driver ipp_driver = {
+       .probe          = ipp_probe,
+       .remove         = __devexit_p(ipp_remove),
+       .driver         = {
+               .name   = "exynos-drm-ipp",
+               .owner  = THIS_MODULE,
+               .pm     = &ipp_pm_ops,
+       },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644 (file)
index 0000000..28ffac9
--- /dev/null
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IPP_H_
+#define _EXYNOS_DRM_IPP_H_
+
+#define for_each_ipp_ops(pos)  \
+       for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
+#define for_each_ipp_planar(pos)       \
+       for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
+
+#define IPP_GET_LCD_WIDTH      _IOR('F', 302, int)
+#define IPP_GET_LCD_HEIGHT     _IOR('F', 303, int)
+#define IPP_SET_WRITEBACK      _IOW('F', 304, u32)
+
+/* definition of state */
+enum drm_exynos_ipp_state {
+       IPP_STATE_IDLE,
+       IPP_STATE_START,
+       IPP_STATE_STOP,
+};
+
+/*
+ * A structure of command work information.
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @c_node: command node information.
+ * @ctrl: command control.
+ */
+struct drm_exynos_ipp_cmd_work {
+       struct work_struct      work;
+       struct exynos_drm_ippdrv        *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       enum drm_exynos_ipp_ctrl        ctrl;
+};
+
+/*
+ * A structure of command node.
+ *
+ * @priv: IPP private infomation.
+ * @list: list head to command queue information.
+ * @event_list: list head of event.
+ * @mem_list: list head to source,destination memory queue information.
+ * @cmd_lock: lock for synchronization of access to ioctl.
+ * @mem_lock: lock for synchronization of access to memory nodes.
+ * @event_lock: lock for synchronization of access to scheduled event.
+ * @start_complete: completion of start of command.
+ * @stop_complete: completion of stop of command.
+ * @property: property information.
+ * @start_work: start command work structure.
+ * @stop_work: stop command work structure.
+ * @event_work: event work structure.
+ * @state: state of command node.
+ */
+struct drm_exynos_ipp_cmd_node {
+       struct exynos_drm_ipp_private *priv;
+       struct list_head        list;
+       struct list_head        event_list;
+       struct list_head        mem_list[EXYNOS_DRM_OPS_MAX];
+       struct mutex    cmd_lock;
+       struct mutex    mem_lock;
+       struct mutex    event_lock;
+       struct completion       start_complete;
+       struct completion       stop_complete;
+       struct drm_exynos_ipp_property  property;
+       struct drm_exynos_ipp_cmd_work *start_work;
+       struct drm_exynos_ipp_cmd_work *stop_work;
+       struct drm_exynos_ipp_event_work *event_work;
+       enum drm_exynos_ipp_state       state;
+};
+
+/*
+ * A structure of buffer information.
+ *
+ * @gem_objs: Y, Cb, Cr each gem object.
+ * @base: Y, Cb, Cr each planar address.
+ */
+struct drm_exynos_ipp_buf_info {
+       unsigned long   handles[EXYNOS_DRM_PLANAR_MAX];
+       dma_addr_t      base[EXYNOS_DRM_PLANAR_MAX];
+};
+
+/*
+ * A structure of wb setting infomation.
+ *
+ * @enable: enable flag for wb.
+ * @refresh: HZ of the refresh rate.
+ */
+struct drm_exynos_ipp_set_wb {
+       __u32   enable;
+       __u32   refresh;
+};
+
+/*
+ * A structure of event work information.
+ *
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @buf_id: id of src, dst buffer.
+ */
+struct drm_exynos_ipp_event_work {
+       struct work_struct      work;
+       struct exynos_drm_ippdrv *ippdrv;
+       u32     buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
+/*
+ * A structure of source,destination operations.
+ *
+ * @set_fmt: set format of image.
+ * @set_transf: set transform(rotations, flip).
+ * @set_size: set size of region.
+ * @set_addr: set address for dma.
+ */
+struct exynos_drm_ipp_ops {
+       int (*set_fmt)(struct device *dev, u32 fmt);
+       int (*set_transf)(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap);
+       int (*set_size)(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
+       int (*set_addr)(struct device *dev,
+                struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type);
+};
+
+/*
+ * A structure of ipp driver.
+ *
+ * @drv_list: list head for registed sub driver information.
+ * @parent_dev: parent device information.
+ * @dev: platform device.
+ * @drm_dev: drm device.
+ * @ipp_id: id of ipp driver.
+ * @dedicated: dedicated ipp device.
+ * @ops: source, destination operations.
+ * @event_workq: event work queue.
+ * @cmd: current command information.
+ * @cmd_list: list head for command information.
+ * @prop_list: property informations of current ipp driver.
+ * @check_property: check property about format, size, buffer.
+ * @reset: reset ipp block.
+ * @start: ipp each device start.
+ * @stop: ipp each device stop.
+ * @sched_event: work schedule handler.
+ */
+struct exynos_drm_ippdrv {
+       struct list_head        drv_list;
+       struct device   *parent_dev;
+       struct device   *dev;
+       struct drm_device       *drm_dev;
+       u32     ipp_id;
+       bool    dedicated;
+       struct exynos_drm_ipp_ops       *ops[EXYNOS_DRM_OPS_MAX];
+       struct workqueue_struct *event_workq;
+       struct drm_exynos_ipp_cmd_node *cmd;
+       struct list_head        cmd_list;
+       struct drm_exynos_ipp_prop_list *prop_list;
+
+       int (*check_property)(struct device *dev,
+               struct drm_exynos_ipp_property *property);
+       int (*reset)(struct device *dev);
+       int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+       void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+       void (*sched_event)(struct work_struct *work);
+};
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+                                        struct drm_file *file);
+extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+                                        struct drm_file *file);
+extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+                                        struct drm_file *file);
+extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+                                        struct drm_file *file);
+extern int exynos_drm_ippnb_register(struct notifier_block *nb);
+extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
+extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
+extern void ipp_sched_cmd(struct work_struct *work);
+extern void ipp_sched_event(struct work_struct *work);
+
+#else
+static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+       return -ENODEV;
+}
+
+static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+       return -ENODEV;
+}
+
+static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
+                                               void *data,
+                                               struct drm_file *file_priv)
+{
+       return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
+                                               void *data,
+                                               struct drm_file *file_priv)
+{
+       return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
+                                               void *data,
+                                               struct drm_file *file)
+{
+       return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
+                                               void *data,
+                                               struct drm_file *file)
+{
+       return -ENOTTY;
+}
+
+static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+       return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+       return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+       return -ENOTTY;
+}
+#endif
+
+#endif /* _EXYNOS_DRM_IPP_H_ */
+
index 862ca1eb21020fdc2faf5ebc5c65b5b3a2899731..83efc662d65ab7c97f64b9c1bf9a79a4d241b73c 100644 (file)
@@ -40,7 +40,7 @@ static const uint32_t formats[] = {
  * CRTC ----------------
  *      ^ start        ^ end
  *
- * There are six cases from a to b.
+ * There are six cases from a to f.
  *
  *             <----- SCREEN ----->
  *             0                 last
@@ -93,11 +93,9 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
                }
 
                overlay->dma_addr[i] = buffer->dma_addr;
-               overlay->vaddr[i] = buffer->kvaddr;
 
-               DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
-                               i, (unsigned long)overlay->vaddr[i],
-                               (unsigned long)overlay->dma_addr[i]);
+               DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
+                               i, (unsigned long)overlay->dma_addr[i]);
        }
 
        actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
@@ -106,16 +104,12 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
        if (crtc_x < 0) {
                if (actual_w)
                        src_x -= crtc_x;
-               else
-                       src_x += crtc_w;
                crtc_x = 0;
        }
 
        if (crtc_y < 0) {
                if (actual_h)
                        src_y -= crtc_y;
-               else
-                       src_y += crtc_h;
                crtc_y = 0;
        }
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
new file mode 100644 (file)
index 0000000..1c23660
--- /dev/null
@@ -0,0 +1,855 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *     YoungJun Cho <yj44.cho@samsung.com>
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-rotator.h"
+#include "exynos_drm.h"
+#include "exynos_drm_ipp.h"
+
+/*
+ * Rotator supports image crop/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> Rotator H/W ----> Memory.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. need to add supported list in prop_list.
+ */
+
+#define get_rot_context(dev)   platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)    container_of(ippdrv,\
+                                       struct rot_context, ippdrv);
+#define rot_read(offset)               readl(rot->regs + (offset))
+#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
+
+enum rot_irq_status {
+       ROT_IRQ_STATUS_COMPLETE = 8,
+       ROT_IRQ_STATUS_ILLEGAL  = 9,
+};
+
+/*
+ * A structure of limitation.
+ *
+ * @min_w: minimum width.
+ * @min_h: minimum height.
+ * @max_w: maximum width.
+ * @max_h: maximum height.
+ * @align: align size.
+ */
+struct rot_limit {
+       u32     min_w;
+       u32     min_h;
+       u32     max_w;
+       u32     max_h;
+       u32     align;
+};
+
+/*
+ * A structure of limitation table.
+ *
+ * @ycbcr420_2p: case of YUV.
+ * @rgb888: case of RGB.
+ */
+struct rot_limit_table {
+       struct rot_limit        ycbcr420_2p;
+       struct rot_limit        rgb888;
+};
+
+/*
+ * A structure of rotator context.
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @clock: rotator gate clock.
+ * @limit_tbl: limitation of rotator.
+ * @irq: irq number.
+ * @cur_buf_id: current operation buffer id.
+ * @suspended: suspended state.
+ */
+struct rot_context {
+       struct exynos_drm_ippdrv        ippdrv;
+       struct resource *regs_res;
+       void __iomem    *regs;
+       struct clk      *clock;
+       struct rot_limit_table  *limit_tbl;
+       int     irq;
+       int     cur_buf_id[EXYNOS_DRM_OPS_MAX];
+       bool    suspended;
+};
+
+static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
+{
+       u32 val = rot_read(ROT_CONFIG);
+
+       if (enable == true)
+               val |= ROT_CONFIG_IRQ;
+       else
+               val &= ~ROT_CONFIG_IRQ;
+
+       rot_write(val, ROT_CONFIG);
+}
+
+static u32 rotator_reg_get_fmt(struct rot_context *rot)
+{
+       u32 val = rot_read(ROT_CONTROL);
+
+       val &= ROT_CONTROL_FMT_MASK;
+
+       return val;
+}
+
+static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
+{
+       u32 val = rot_read(ROT_STATUS);
+
+       val = ROT_STATUS_IRQ(val);
+
+       if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
+               return ROT_IRQ_STATUS_COMPLETE;
+
+       return ROT_IRQ_STATUS_ILLEGAL;
+}
+
+static irqreturn_t rotator_irq_handler(int irq, void *arg)
+{
+       struct rot_context *rot = arg;
+       struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
+       enum rot_irq_status irq_status;
+       u32 val;
+
+       /* Get execution result */
+       irq_status = rotator_reg_get_irq_status(rot);
+
+       /* clear status */
+       val = rot_read(ROT_STATUS);
+       val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
+       rot_write(val, ROT_STATUS);
+
+       if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
+               event_work->ippdrv = ippdrv;
+               event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+                       rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
+               queue_work(ippdrv->event_workq,
+                       (struct work_struct *)event_work);
+       } else
+               DRM_ERROR("the SFR is set illegally\n");
+
+       return IRQ_HANDLED;
+}
+
+static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
+               u32 *vsize)
+{
+       struct rot_limit_table *limit_tbl = rot->limit_tbl;
+       struct rot_limit *limit;
+       u32 mask, val;
+
+       /* Get size limit */
+       if (fmt == ROT_CONTROL_FMT_RGB888)
+               limit = &limit_tbl->rgb888;
+       else
+               limit = &limit_tbl->ycbcr420_2p;
+
+       /* Get mask for rounding to nearest aligned val */
+       mask = ~((1 << limit->align) - 1);
+
+       /* Set aligned width */
+       val = ROT_ALIGN(*hsize, limit->align, mask);
+       if (val < limit->min_w)
+               *hsize = ROT_MIN(limit->min_w, mask);
+       else if (val > limit->max_w)
+               *hsize = ROT_MAX(limit->max_w, mask);
+       else
+               *hsize = val;
+
+       /* Set aligned height */
+       val = ROT_ALIGN(*vsize, limit->align, mask);
+       if (val < limit->min_h)
+               *vsize = ROT_MIN(limit->min_h, mask);
+       else if (val > limit->max_h)
+               *vsize = ROT_MAX(limit->max_h, mask);
+       else
+               *vsize = val;
+}
+
+static int rotator_src_set_fmt(struct device *dev, u32 fmt)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 val;
+
+       val = rot_read(ROT_CONTROL);
+       val &= ~ROT_CONTROL_FMT_MASK;
+
+       switch (fmt) {
+       case DRM_FORMAT_NV12:
+               val |= ROT_CONTROL_FMT_YCBCR420_2P;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               val |= ROT_CONTROL_FMT_RGB888;
+               break;
+       default:
+               DRM_ERROR("invalid image format\n");
+               return -EINVAL;
+       }
+
+       rot_write(val, ROT_CONTROL);
+
+       return 0;
+}
+
+static inline bool rotator_check_reg_fmt(u32 fmt)
+{
+       if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
+           (fmt == ROT_CONTROL_FMT_RGB888))
+               return true;
+
+       return false;
+}
+
+static int rotator_src_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos,
+               struct drm_exynos_sz *sz)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 fmt, hsize, vsize;
+       u32 val;
+
+       /* Get format */
+       fmt = rotator_reg_get_fmt(rot);
+       if (!rotator_check_reg_fmt(fmt)) {
+               DRM_ERROR("%s:invalid format.\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Align buffer size */
+       hsize = sz->hsize;
+       vsize = sz->vsize;
+       rotator_align_size(rot, fmt, &hsize, &vsize);
+
+       /* Set buffer size configuration */
+       val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+       rot_write(val, ROT_SRC_BUF_SIZE);
+
+       /* Set crop image position configuration */
+       val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+       rot_write(val, ROT_SRC_CROP_POS);
+       val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
+       rot_write(val, ROT_SRC_CROP_SIZE);
+
+       return 0;
+}
+
+static int rotator_src_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info,
+               u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+       u32 val, fmt, hsize, vsize;
+       int i;
+
+       /* Set current buf_id */
+       rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               /* Set address configuration */
+               for_each_ipp_planar(i)
+                       addr[i] = buf_info->base[i];
+
+               /* Get format */
+               fmt = rotator_reg_get_fmt(rot);
+               if (!rotator_check_reg_fmt(fmt)) {
+                       DRM_ERROR("%s:invalid format.\n", __func__);
+                       return -EINVAL;
+               }
+
+               /* Re-set cb planar for NV12 format */
+               if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+                   !addr[EXYNOS_DRM_PLANAR_CB]) {
+
+                       val = rot_read(ROT_SRC_BUF_SIZE);
+                       hsize = ROT_GET_BUF_SIZE_W(val);
+                       vsize = ROT_GET_BUF_SIZE_H(val);
+
+                       /* Set cb planar */
+                       addr[EXYNOS_DRM_PLANAR_CB] =
+                               addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+               }
+
+               for_each_ipp_planar(i)
+                       rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
+               break;
+       case IPP_BUF_DEQUEUE:
+               for_each_ipp_planar(i)
+                       rot_write(0x0, ROT_SRC_BUF_ADDR(i));
+               break;
+       default:
+               /* Nothing to do */
+               break;
+       }
+
+       return 0;
+}
+
+static int rotator_dst_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 val;
+
+       /* Set transform configuration */
+       val = rot_read(ROT_CONTROL);
+       val &= ~ROT_CONTROL_FLIP_MASK;
+
+       switch (flip) {
+       case EXYNOS_DRM_FLIP_VERTICAL:
+               val |= ROT_CONTROL_FLIP_VERTICAL;
+               break;
+       case EXYNOS_DRM_FLIP_HORIZONTAL:
+               val |= ROT_CONTROL_FLIP_HORIZONTAL;
+               break;
+       default:
+               /* Flip None */
+               break;
+       }
+
+       val &= ~ROT_CONTROL_ROT_MASK;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_90:
+               val |= ROT_CONTROL_ROT_90;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               val |= ROT_CONTROL_ROT_180;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               val |= ROT_CONTROL_ROT_270;
+               break;
+       default:
+               /* Rotation 0 Degree */
+               break;
+       }
+
+       rot_write(val, ROT_CONTROL);
+
+       /* Check degree for setting buffer size swap */
+       if ((degree == EXYNOS_DRM_DEGREE_90) ||
+           (degree == EXYNOS_DRM_DEGREE_270))
+               *swap = true;
+       else
+               *swap = false;
+
+       return 0;
+}
+
+static int rotator_dst_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos,
+               struct drm_exynos_sz *sz)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 val, fmt, hsize, vsize;
+
+       /* Get format */
+       fmt = rotator_reg_get_fmt(rot);
+       if (!rotator_check_reg_fmt(fmt)) {
+               DRM_ERROR("%s:invalid format.\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Align buffer size */
+       hsize = sz->hsize;
+       vsize = sz->vsize;
+       rotator_align_size(rot, fmt, &hsize, &vsize);
+
+       /* Set buffer size configuration */
+       val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+       rot_write(val, ROT_DST_BUF_SIZE);
+
+       /* Set crop image position configuration */
+       val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+       rot_write(val, ROT_DST_CROP_POS);
+
+       return 0;
+}
+
+static int rotator_dst_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info,
+               u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+       u32 val, fmt, hsize, vsize;
+       int i;
+
+       /* Set current buf_id */
+       rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               /* Set address configuration */
+               for_each_ipp_planar(i)
+                       addr[i] = buf_info->base[i];
+
+               /* Get format */
+               fmt = rotator_reg_get_fmt(rot);
+               if (!rotator_check_reg_fmt(fmt)) {
+                       DRM_ERROR("%s:invalid format.\n", __func__);
+                       return -EINVAL;
+               }
+
+               /* Re-set cb planar for NV12 format */
+               if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+                   !addr[EXYNOS_DRM_PLANAR_CB]) {
+                       /* Get buf size */
+                       val = rot_read(ROT_DST_BUF_SIZE);
+
+                       hsize = ROT_GET_BUF_SIZE_W(val);
+                       vsize = ROT_GET_BUF_SIZE_H(val);
+
+                       /* Set cb planar */
+                       addr[EXYNOS_DRM_PLANAR_CB] =
+                               addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+               }
+
+               for_each_ipp_planar(i)
+                       rot_write(addr[i], ROT_DST_BUF_ADDR(i));
+               break;
+       case IPP_BUF_DEQUEUE:
+               for_each_ipp_planar(i)
+                       rot_write(0x0, ROT_DST_BUF_ADDR(i));
+               break;
+       default:
+               /* Nothing to do */
+               break;
+       }
+
+       return 0;
+}
+
+static struct exynos_drm_ipp_ops rot_src_ops = {
+       .set_fmt        =       rotator_src_set_fmt,
+       .set_size       =       rotator_src_set_size,
+       .set_addr       =       rotator_src_set_addr,
+};
+
+static struct exynos_drm_ipp_ops rot_dst_ops = {
+       .set_transf     =       rotator_dst_set_transf,
+       .set_size       =       rotator_dst_set_size,
+       .set_addr       =       rotator_dst_set_addr,
+};
+
+static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+       struct drm_exynos_ipp_prop_list *prop_list;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+       if (!prop_list) {
+               DRM_ERROR("failed to alloc property list.\n");
+               return -ENOMEM;
+       }
+
+       prop_list->version = 1;
+       prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+                               (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+       prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+                               (1 << EXYNOS_DRM_DEGREE_90) |
+                               (1 << EXYNOS_DRM_DEGREE_180) |
+                               (1 << EXYNOS_DRM_DEGREE_270);
+       prop_list->csc = 0;
+       prop_list->crop = 0;
+       prop_list->scale = 0;
+
+       ippdrv->prop_list = prop_list;
+
+       return 0;
+}
+
+static inline bool rotator_check_drm_fmt(u32 fmt)
+{
+       switch (fmt) {
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_NV12:
+               return true;
+       default:
+               DRM_DEBUG_KMS("%s:not support format\n", __func__);
+               return false;
+       }
+}
+
+static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
+{
+       switch (flip) {
+       case EXYNOS_DRM_FLIP_NONE:
+       case EXYNOS_DRM_FLIP_VERTICAL:
+       case EXYNOS_DRM_FLIP_HORIZONTAL:
+               return true;
+       default:
+               DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+               return false;
+       }
+}
+
+static int rotator_ippdrv_check_property(struct device *dev,
+               struct drm_exynos_ipp_property *property)
+{
+       struct drm_exynos_ipp_config *src_config =
+                                       &property->config[EXYNOS_DRM_OPS_SRC];
+       struct drm_exynos_ipp_config *dst_config =
+                                       &property->config[EXYNOS_DRM_OPS_DST];
+       struct drm_exynos_pos *src_pos = &src_config->pos;
+       struct drm_exynos_pos *dst_pos = &dst_config->pos;
+       struct drm_exynos_sz *src_sz = &src_config->sz;
+       struct drm_exynos_sz *dst_sz = &dst_config->sz;
+       bool swap = false;
+
+       /* Check format configuration */
+       if (src_config->fmt != dst_config->fmt) {
+               DRM_DEBUG_KMS("%s:not support csc feature\n", __func__);
+               return -EINVAL;
+       }
+
+       if (!rotator_check_drm_fmt(dst_config->fmt)) {
+               DRM_DEBUG_KMS("%s:invalid format\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Check transform configuration */
+       if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
+               DRM_DEBUG_KMS("%s:not support source-side rotation\n",
+                       __func__);
+               return -EINVAL;
+       }
+
+       switch (dst_config->degree) {
+       case EXYNOS_DRM_DEGREE_90:
+       case EXYNOS_DRM_DEGREE_270:
+               swap = true;
+       case EXYNOS_DRM_DEGREE_0:
+       case EXYNOS_DRM_DEGREE_180:
+               /* No problem */
+               break;
+       default:
+               DRM_DEBUG_KMS("%s:invalid degree\n", __func__);
+               return -EINVAL;
+       }
+
+       if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
+               DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__);
+               return -EINVAL;
+       }
+
+       if (!rotator_check_drm_flip(dst_config->flip)) {
+               DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Check size configuration */
+       if ((src_pos->x + src_pos->w > src_sz->hsize) ||
+               (src_pos->y + src_pos->h > src_sz->vsize)) {
+               DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__);
+               return -EINVAL;
+       }
+
+       if (swap) {
+               if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
+                       (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
+                       DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+                               __func__);
+                       return -EINVAL;
+               }
+
+               if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
+                       DRM_DEBUG_KMS("%s:not support scale feature\n",
+                               __func__);
+                       return -EINVAL;
+               }
+       } else {
+               if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
+                       (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
+                       DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+                               __func__);
+                       return -EINVAL;
+               }
+
+               if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
+                       DRM_DEBUG_KMS("%s:not support scale feature\n",
+                               __func__);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 val;
+
+       if (rot->suspended) {
+               DRM_ERROR("suspended state\n");
+               return -EPERM;
+       }
+
+       if (cmd != IPP_CMD_M2M) {
+               DRM_ERROR("not support cmd: %d\n", cmd);
+               return -EINVAL;
+       }
+
+       /* Set interrupt enable */
+       rotator_reg_set_irq(rot, true);
+
+       val = rot_read(ROT_CONTROL);
+       val |= ROT_CONTROL_START;
+
+       rot_write(val, ROT_CONTROL);
+
+       return 0;
+}
+
+static int __devinit rotator_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct rot_context *rot;
+       struct exynos_drm_ippdrv *ippdrv;
+       int ret;
+
+       rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
+       if (!rot) {
+               dev_err(dev, "failed to allocate rot\n");
+               return -ENOMEM;
+       }
+
+       rot->limit_tbl = (struct rot_limit_table *)
+                               platform_get_device_id(pdev)->driver_data;
+
+       rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!rot->regs_res) {
+               dev_err(dev, "failed to find registers\n");
+               ret = -ENOENT;
+               goto err_get_resource;
+       }
+
+       rot->regs = devm_request_and_ioremap(dev, rot->regs_res);
+       if (!rot->regs) {
+               dev_err(dev, "failed to map register\n");
+               ret = -ENXIO;
+               goto err_get_resource;
+       }
+
+       rot->irq = platform_get_irq(pdev, 0);
+       if (rot->irq < 0) {
+               dev_err(dev, "failed to get irq\n");
+               ret = rot->irq;
+               goto err_get_irq;
+       }
+
+       ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
+                       IRQF_ONESHOT, "drm_rotator", rot);
+       if (ret < 0) {
+               dev_err(dev, "failed to request irq\n");
+               goto err_get_irq;
+       }
+
+       rot->clock = clk_get(dev, "rotator");
+       if (IS_ERR_OR_NULL(rot->clock)) {
+               dev_err(dev, "failed to get clock\n");
+               ret = PTR_ERR(rot->clock);
+               goto err_clk_get;
+       }
+
+       pm_runtime_enable(dev);
+
+       ippdrv = &rot->ippdrv;
+       ippdrv->dev = dev;
+       ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_o