[media] rename drivers/media/video as .../platform

The remaining drivers are mostly platform drivers. Name the
dir to reflect it.

It makes sense to latter break it into a few other dirs.

Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
new file mode 100644
index 0000000..e1959a8
--- /dev/null
+++ b/drivers/media/platform/Kconfig
@@ -0,0 +1,297 @@
+if MEDIA_CAMERA_SUPPORT
+
+config VIDEO_VIVI
+	tristate "Virtual Video Driver"
+	depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64
+	depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
+	select FONT_8x16
+	select VIDEOBUF2_VMALLOC
+	default n
+	---help---
+	  Enables a virtual video driver. This device shows a color bar
+	  and a timestamp, as a real device would generate by using V4L2
+	  api.
+	  Say Y here if you want to test video apps or debug V4L devices.
+	  In doubt, say N.
+
+#
+# Platform drivers
+#	All drivers here are currently for webcam support
+
+menuconfig V4L_PLATFORM_DRIVERS
+	bool "V4L platform devices"
+	depends on MEDIA_CAMERA_SUPPORT
+	default n
+	---help---
+	  Say Y here to enable support for platform-specific V4L drivers.
+
+if V4L_PLATFORM_DRIVERS
+
+source "drivers/media/platform/marvell-ccic/Kconfig"
+
+config VIDEO_VIA_CAMERA
+	tristate "VIAFB camera controller support"
+	depends on FB_VIA
+	select VIDEOBUF_DMA_SG
+	select VIDEO_OV7670
+	help
+	   Driver support for the integrated camera controller in VIA
+	   Chrome9 chipsets.  Currently only tested on OLPC xo-1.5 systems
+	   with ov7670 sensors.
+
+#
+# Platform multimedia device configuration
+#
+
+source "drivers/media/platform/davinci/Kconfig"
+
+source "drivers/media/platform/omap/Kconfig"
+
+source "drivers/media/platform/blackfin/Kconfig"
+
+config VIDEO_SH_VOU
+	tristate "SuperH VOU video output driver"
+	depends on VIDEO_DEV && ARCH_SHMOBILE
+	select VIDEOBUF_DMA_CONTIG
+	help
+	  Support for the Video Output Unit (VOU) on SuperH SoCs.
+
+config VIDEO_VIU
+	tristate "Freescale VIU Video Driver"
+	depends on VIDEO_V4L2 && PPC_MPC512x
+	select VIDEOBUF_DMA_CONTIG
+	default y
+	---help---
+	  Support for Freescale VIU video driver. This device captures
+	  video data, or overlays video on DIU frame buffer.
+
+	  Say Y here if you want to enable VIU device on MPC5121e Rev2+.
+	  In doubt, say N.
+
+config VIDEO_TIMBERDALE
+	tristate "Support for timberdale Video In/LogiWIN"
+	depends on VIDEO_V4L2 && I2C && DMADEVICES
+	select DMA_ENGINE
+	select TIMB_DMA
+	select VIDEO_ADV7180
+	select VIDEOBUF_DMA_CONTIG
+	---help---
+	  Add support for the Video In peripherial of the timberdale FPGA.
+
+config VIDEO_VINO
+	tristate "SGI Vino Video For Linux"
+	depends on I2C && SGI_IP22 && VIDEO_V4L2
+	select VIDEO_SAA7191 if VIDEO_HELPER_CHIPS_AUTO
+	help
+	  Say Y here to build in support for the Vino video input system found
+	  on SGI Indy machines.
+
+config VIDEO_M32R_AR
+	tristate "AR devices"
+	depends on M32R && VIDEO_V4L2
+	---help---
+	  This is a video4linux driver for the Renesas AR (Artificial Retina)
+	  camera module.
+
+config VIDEO_M32R_AR_M64278
+	tristate "AR device with color module M64278(VGA)"
+	depends on PLAT_M32700UT
+	select VIDEO_M32R_AR
+	---help---
+	  This is a video4linux driver for the Renesas AR (Artificial
+	  Retina) with M64278E-800 camera module.
+	  This module supports VGA(640x480 pixels) resolutions.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called arv.
+
+config VIDEO_OMAP3
+	tristate "OMAP 3 Camera support (EXPERIMENTAL)"
+	depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 && EXPERIMENTAL
+	---help---
+	  Driver for an OMAP 3 camera controller.
+
+config VIDEO_OMAP3_DEBUG
+	bool "OMAP 3 Camera debug messages"
+	depends on VIDEO_OMAP3
+	---help---
+	  Enable debug messages on OMAP 3 camera controller driver.
+
+config SOC_CAMERA
+	tristate "SoC camera support"
+	depends on VIDEO_V4L2 && HAS_DMA && I2C
+	select VIDEOBUF_GEN
+	select VIDEOBUF2_CORE
+	help
+	  SoC Camera is a common API to several cameras, not connecting
+	  over a bus like PCI or USB. For example some i2c camera connected
+	  directly to the data bus of an SoC.
+
+
+config SOC_CAMERA_PLATFORM
+	tristate "platform camera support"
+	depends on SOC_CAMERA
+	help
+	  This is a generic SoC camera platform driver, useful for testing
+
+config MX1_VIDEO
+	bool
+
+config VIDEO_MX1
+	tristate "i.MX1/i.MXL CMOS Sensor Interface driver"
+	depends on VIDEO_DEV && ARCH_MX1 && SOC_CAMERA
+	select FIQ
+	select VIDEOBUF_DMA_CONTIG
+	select MX1_VIDEO
+	---help---
+	  This is a v4l2 driver for the i.MX1/i.MXL CMOS Sensor Interface
+
+config MX3_VIDEO
+	bool
+
+config VIDEO_MX3
+	tristate "i.MX3x Camera Sensor Interface driver"
+	depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA
+	select VIDEOBUF2_DMA_CONTIG
+	select MX3_VIDEO
+	---help---
+	  This is a v4l2 driver for the i.MX3x Camera Sensor Interface
+
+config VIDEO_PXA27x
+	tristate "PXA27x Quick Capture Interface driver"
+	depends on VIDEO_DEV && PXA27x && SOC_CAMERA
+	select VIDEOBUF_DMA_SG
+	---help---
+	  This is a v4l2 driver for the PXA27x Quick Capture Interface
+
+config VIDEO_SH_MOBILE_CSI2
+	tristate "SuperH Mobile MIPI CSI-2 Interface driver"
+	depends on VIDEO_DEV && SOC_CAMERA && HAVE_CLK
+	---help---
+	  This is a v4l2 driver for the SuperH MIPI CSI-2 Interface
+
+config VIDEO_SH_MOBILE_CEU
+	tristate "SuperH Mobile CEU Interface driver"
+	depends on VIDEO_DEV && SOC_CAMERA && HAS_DMA && HAVE_CLK
+	select VIDEOBUF2_DMA_CONTIG
+	---help---
+	  This is a v4l2 driver for the SuperH Mobile CEU Interface
+
+config VIDEO_OMAP1
+	tristate "OMAP1 Camera Interface driver"
+	depends on VIDEO_DEV && ARCH_OMAP1 && SOC_CAMERA
+	select VIDEOBUF_DMA_CONTIG
+	select VIDEOBUF_DMA_SG
+	---help---
+	  This is a v4l2 driver for the TI OMAP1 camera interface
+
+config VIDEO_OMAP2
+	tristate "OMAP2 Camera Capture Interface driver"
+	depends on VIDEO_DEV && ARCH_OMAP2
+	select VIDEOBUF_DMA_SG
+	---help---
+	  This is a v4l2 driver for the TI OMAP2 camera capture interface
+
+config VIDEO_MX2_HOSTSUPPORT
+	bool
+
+config VIDEO_MX2
+	tristate "i.MX27/i.MX25 Camera Sensor Interface driver"
+	depends on VIDEO_DEV && SOC_CAMERA && (MACH_MX27 || (ARCH_MX25 && BROKEN))
+	select VIDEOBUF2_DMA_CONTIG
+	select VIDEO_MX2_HOSTSUPPORT
+	---help---
+	  This is a v4l2 driver for the i.MX27 and the i.MX25 Camera Sensor
+	  Interface
+
+config VIDEO_ATMEL_ISI
+	tristate "ATMEL Image Sensor Interface (ISI) support"
+	depends on VIDEO_DEV && SOC_CAMERA && ARCH_AT91
+	select VIDEOBUF2_DMA_CONTIG
+	---help---
+	  This module makes the ATMEL Image Sensor Interface available
+	  as a v4l2 device.
+
+source "drivers/media/platform/s5p-fimc/Kconfig"
+source "drivers/media/platform/s5p-tv/Kconfig"
+
+endif # V4L_PLATFORM_DRIVERS
+
+menuconfig V4L_MEM2MEM_DRIVERS
+	bool "Memory-to-memory multimedia devices"
+	depends on VIDEO_V4L2
+	default n
+	---help---
+	  Say Y here to enable selecting drivers for V4L devices that
+	  use system memory for both source and destination buffers, as opposed
+	  to capture and output drivers, which use memory buffers for just
+	  one of those.
+
+if V4L_MEM2MEM_DRIVERS
+
+config VIDEO_MEM2MEM_TESTDEV
+	tristate "Virtual test device for mem2mem framework"
+	depends on VIDEO_DEV && VIDEO_V4L2
+	select VIDEOBUF2_VMALLOC
+	select V4L2_MEM2MEM_DEV
+	default n
+	---help---
+	  This is a virtual test device for the memory-to-memory driver
+	  framework.
+
+config VIDEO_CODA
+	tristate "Chips&Media Coda multi-standard codec IP"
+	depends on VIDEO_DEV && VIDEO_V4L2
+	select VIDEOBUF2_DMA_CONTIG
+	select V4L2_MEM2MEM_DEV
+	---help---
+	   Coda is a range of video codec IPs that supports
+	   H.264, MPEG-4, and other video formats.
+
+config VIDEO_MEM2MEM_DEINTERLACE
+	tristate "Deinterlace support"
+	depends on VIDEO_DEV && VIDEO_V4L2 && DMA_ENGINE
+	select VIDEOBUF2_DMA_CONTIG
+	select V4L2_MEM2MEM_DEV
+	help
+	    Generic deinterlacing V4L2 driver.
+
+config VIDEO_SAMSUNG_S5P_G2D
+	tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver"
+	depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P
+	select VIDEOBUF2_DMA_CONTIG
+	select V4L2_MEM2MEM_DEV
+	default n
+	---help---
+	  This is a v4l2 driver for Samsung S5P and EXYNOS4 G2D
+	  2d graphics accelerator.
+
+config VIDEO_SAMSUNG_S5P_JPEG
+	tristate "Samsung S5P/Exynos4 JPEG codec driver (EXPERIMENTAL)"
+	depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P && EXPERIMENTAL
+	select VIDEOBUF2_DMA_CONTIG
+	select V4L2_MEM2MEM_DEV
+	---help---
+	  This is a v4l2 driver for Samsung S5P and EXYNOS4 JPEG codec
+
+config VIDEO_SAMSUNG_S5P_MFC
+	tristate "Samsung S5P MFC 5.1 Video Codec"
+	depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P
+	select VIDEOBUF2_DMA_CONTIG
+	default n
+	help
+	    MFC 5.1 driver for V4L2.
+
+config VIDEO_MX2_EMMAPRP
+	tristate "MX2 eMMa-PrP support"
+	depends on VIDEO_DEV && VIDEO_V4L2 && SOC_IMX27
+	select VIDEOBUF2_DMA_CONTIG
+	select V4L2_MEM2MEM_DEV
+	help
+	    MX2X chips have a PrP that can be used to process buffers from
+	    memory to memory. Operations include resizing and format
+	    conversion.
+
+endif # V4L_MEM2MEM_DRIVERS
+
+endif # MEDIA_CAMERA_SUPPORT
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
new file mode 100644
index 0000000..b3effdc
--- /dev/null
+++ b/drivers/media/platform/Makefile
@@ -0,0 +1,66 @@
+#
+# Makefile for the video capture/playback device drivers.
+#
+
+omap2cam-objs	:=	omap24xxcam.o omap24xxcam-dma.o
+
+obj-$(CONFIG_VIDEO_VINO) += indycam.o
+
+obj-$(CONFIG_VIDEO_VINO) += vino.o
+obj-$(CONFIG_VIDEO_TIMBERDALE)	+= timblogiw.o
+
+
+obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
+
+
+obj-$(CONFIG_VIDEO_CAFE_CCIC) += marvell-ccic/
+obj-$(CONFIG_VIDEO_MMP_CAMERA) += marvell-ccic/
+
+obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
+
+obj-$(CONFIG_VIDEO_OMAP3)	+= omap3isp/
+
+obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
+obj-$(CONFIG_VIDEO_VIVI) += vivi.o
+obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o
+
+
+obj-$(CONFIG_VIDEO_OMAP2)		+= omap2cam.o
+obj-$(CONFIG_SOC_CAMERA)		+= soc_camera.o soc_mediabus.o
+obj-$(CONFIG_SOC_CAMERA_PLATFORM)	+= soc_camera_platform.o
+# soc-camera host drivers have to be linked after camera drivers
+obj-$(CONFIG_VIDEO_MX1)			+= mx1_camera.o
+obj-$(CONFIG_VIDEO_MX2)			+= mx2_camera.o
+obj-$(CONFIG_VIDEO_MX3)			+= mx3_camera.o
+obj-$(CONFIG_VIDEO_PXA27x)		+= pxa_camera.o
+obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2)	+= sh_mobile_csi2.o
+obj-$(CONFIG_VIDEO_SH_MOBILE_CEU)	+= sh_mobile_ceu_camera.o
+obj-$(CONFIG_VIDEO_OMAP1)		+= omap1_camera.o
+obj-$(CONFIG_VIDEO_ATMEL_ISI)		+= atmel-isi.o
+
+obj-$(CONFIG_VIDEO_MX2_EMMAPRP)		+= mx2_emmaprp.o
+obj-$(CONFIG_VIDEO_CODA) 			+= coda.o
+
+obj-$(CONFIG_VIDEO_MEM2MEM_DEINTERLACE)	+= m2m-deinterlace.o
+
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) 	+= s5p-fimc/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG)	+= s5p-jpeg/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC)	+= s5p-mfc/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_TV)	+= s5p-tv/
+
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_G2D)	+= s5p-g2d/
+
+obj-$(CONFIG_BLACKFIN)                  += blackfin/
+
+obj-$(CONFIG_ARCH_DAVINCI)		+= davinci/
+
+obj-$(CONFIG_VIDEO_SH_VOU)		+= sh_vou.o
+
+obj-y	+= davinci/
+
+obj-$(CONFIG_ARCH_OMAP)	+= omap/
+
+ccflags-y += -I$(srctree)/drivers/media/dvb-core
+ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
+ccflags-y += -I$(srctree)/drivers/media/tuners
+ccflags-y += -I$(srctree)/drivers/media/i2c/soc_camera
diff --git a/drivers/media/platform/arv.c b/drivers/media/platform/arv.c
new file mode 100644
index 0000000..e346d32d
--- /dev/null
+++ b/drivers/media/platform/arv.c
@@ -0,0 +1,885 @@
+/*
+ * Colour AR M64278(VGA) driver for Video4Linux
+ *
+ * Copyright (C) 2003	Takeo Takahashi <takahashi.takeo@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Some code is taken from AR driver sample program for M3T-M32700UT.
+ *
+ * AR driver sample (M32R SDK):
+ *     Copyright (c) 2003 RENESAS TECHNOROGY CORPORATION
+ *     AND RENESAS SOLUTIONS CORPORATION
+ *     All Rights Reserved.
+ *
+ * 2003-09-01:	Support w3cam by Takeo Takahashi
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <linux/mutex.h>
+
+#include <asm/uaccess.h>
+#include <asm/m32r.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#if 0
+#define DEBUG(n, args...) printk(KERN_INFO args)
+#define CHECK_LOST	1
+#else
+#define DEBUG(n, args...)
+#define CHECK_LOST	0
+#endif
+
+/*
+ * USE_INT is always 0, interrupt mode is not available
+ * on linux due to lack of speed
+ */
+#define USE_INT		0	/* Don't modify */
+
+#define VERSION	"0.0.5"
+
+#define ar_inl(addr) 		inl((unsigned long)(addr))
+#define ar_outl(val, addr)	outl((unsigned long)(val), (unsigned long)(addr))
+
+extern struct cpuinfo_m32r	boot_cpu_data;
+
+/*
+ * CCD pixel size
+ *	Note that M32700UT does not support CIF mode, but QVGA is
+ *	supported by M32700UT hardware using VGA mode of AR LSI.
+ *
+ * 	Supported: VGA  (Normal mode, Interlace mode)
+ *		   QVGA (Always Interlace mode of VGA)
+ *
+ */
+#define AR_WIDTH_VGA		640
+#define AR_HEIGHT_VGA		480
+#define AR_WIDTH_QVGA		320
+#define AR_HEIGHT_QVGA		240
+#define MIN_AR_WIDTH		AR_WIDTH_QVGA
+#define MIN_AR_HEIGHT		AR_HEIGHT_QVGA
+#define MAX_AR_WIDTH		AR_WIDTH_VGA
+#define MAX_AR_HEIGHT		AR_HEIGHT_VGA
+
+/* bits & bytes per pixel */
+#define AR_BITS_PER_PIXEL	16
+#define AR_BYTES_PER_PIXEL	(AR_BITS_PER_PIXEL / 8)
+
+/* line buffer size */
+#define AR_LINE_BYTES_VGA	(AR_WIDTH_VGA * AR_BYTES_PER_PIXEL)
+#define AR_LINE_BYTES_QVGA	(AR_WIDTH_QVGA * AR_BYTES_PER_PIXEL)
+#define MAX_AR_LINE_BYTES	AR_LINE_BYTES_VGA
+
+/* frame size & type */
+#define AR_FRAME_BYTES_VGA \
+	(AR_WIDTH_VGA * AR_HEIGHT_VGA * AR_BYTES_PER_PIXEL)
+#define AR_FRAME_BYTES_QVGA \
+	(AR_WIDTH_QVGA * AR_HEIGHT_QVGA * AR_BYTES_PER_PIXEL)
+#define MAX_AR_FRAME_BYTES \
+	(MAX_AR_WIDTH * MAX_AR_HEIGHT * AR_BYTES_PER_PIXEL)
+
+#define AR_MAX_FRAME		15
+
+/* capture size */
+#define AR_SIZE_VGA		0
+#define AR_SIZE_QVGA		1
+
+/* capture mode */
+#define AR_MODE_INTERLACE	0
+#define AR_MODE_NORMAL		1
+
+struct ar {
+	struct v4l2_device v4l2_dev;
+	struct video_device vdev;
+	unsigned int start_capture;	/* duaring capture in INT. mode. */
+#if USE_INT
+	unsigned char *line_buff;	/* DMA line buffer */
+#endif
+	unsigned char *frame[MAX_AR_HEIGHT];	/* frame data */
+	short size;			/* capture size */
+	short mode;			/* capture mode */
+	int width, height;
+	int frame_bytes, line_bytes;
+	wait_queue_head_t wait;
+	struct mutex lock;
+};
+
+static struct ar ardev;
+
+static int video_nr = -1;	/* video device number (first free) */
+static unsigned char yuv[MAX_AR_FRAME_BYTES];
+
+/* module parameters */
+/* default frequency */
+#define DEFAULT_FREQ	50	/* 50 or 75 (MHz) is available as BCLK */
+static int freq = DEFAULT_FREQ;	/* BCLK: available 50 or 70 (MHz) */
+static int vga;			/* default mode(0:QVGA mode, other:VGA mode) */
+static int vga_interlace;	/* 0 is normal mode for, else interlace mode */
+module_param(freq, int, 0);
+module_param(vga, int, 0);
+module_param(vga_interlace, int, 0);
+
+static void wait_for_vsync(void)
+{
+	while (ar_inl(ARVCR0) & ARVCR0_VDS)	/* wait for VSYNC */
+		cpu_relax();
+	while (!(ar_inl(ARVCR0) & ARVCR0_VDS))	/* wait for VSYNC */
+		cpu_relax();
+}
+
+static void wait_acknowledge(void)
+{
+	int i;
+
+	for (i = 0; i < 1000; i++)
+		cpu_relax();
+	while (ar_inl(PLDI2CSTS) & PLDI2CSTS_NOACK)
+		cpu_relax();
+}
+
+/*******************************************************************
+ * I2C functions
+ *******************************************************************/
+static void iic(int n, unsigned long addr, unsigned long data1, unsigned long data2,
+	 unsigned long data3)
+{
+	int i;
+
+	/* Slave Address */
+	ar_outl(addr, PLDI2CDATA);
+	wait_for_vsync();
+
+	/* Start */
+	ar_outl(1, PLDI2CCND);
+	wait_acknowledge();
+
+	/* Transfer data 1 */
+	ar_outl(data1, PLDI2CDATA);
+	wait_for_vsync();
+	ar_outl(PLDI2CSTEN_STEN, PLDI2CSTEN);
+	wait_acknowledge();
+
+	/* Transfer data 2 */
+	ar_outl(data2, PLDI2CDATA);
+	wait_for_vsync();
+	ar_outl(PLDI2CSTEN_STEN, PLDI2CSTEN);
+	wait_acknowledge();
+
+	if (n == 3) {
+		/* Transfer data 3 */
+		ar_outl(data3, PLDI2CDATA);
+		wait_for_vsync();
+		ar_outl(PLDI2CSTEN_STEN, PLDI2CSTEN);
+		wait_acknowledge();
+	}
+
+	/* Stop */
+	for (i = 0; i < 100; i++)
+		cpu_relax();
+	ar_outl(2, PLDI2CCND);
+	ar_outl(2, PLDI2CCND);
+
+	while (ar_inl(PLDI2CSTS) & PLDI2CSTS_BB)
+		cpu_relax();
+}
+
+
+static void init_iic(void)
+{
+	DEBUG(1, "init_iic:\n");
+
+	/*
+	 * ICU Setting (iic)
+	 */
+	/* I2C Setting */
+	ar_outl(0x0, PLDI2CCR);      	/* I2CCR Disable                   */
+	ar_outl(0x0300, PLDI2CMOD); 	/* I2CMOD ACK/8b-data/7b-addr/auto */
+	ar_outl(0x1, PLDI2CACK);	/* I2CACK ACK                      */
+
+	/* I2C CLK */
+	/* 50MH-100k */
+	if (freq == 75)
+		ar_outl(369, PLDI2CFREQ);	/* BCLK = 75MHz */
+	else if (freq == 50)
+		ar_outl(244, PLDI2CFREQ);	/* BCLK = 50MHz */
+	else
+		ar_outl(244, PLDI2CFREQ);	/* default: BCLK = 50MHz */
+	ar_outl(0x1, PLDI2CCR); 	/* I2CCR Enable */
+}
+
+/**************************************************************************
+ *
+ * Video4Linux Interface functions
+ *
+ **************************************************************************/
+
+static inline void disable_dma(void)
+{
+	ar_outl(0x8000, M32R_DMAEN_PORTL);	/* disable DMA0 */
+}
+
+static inline void enable_dma(void)
+{
+	ar_outl(0x8080, M32R_DMAEN_PORTL);	/* enable DMA0 */
+}
+
+static inline void clear_dma_status(void)
+{
+	ar_outl(0x8000, M32R_DMAEDET_PORTL);	/* clear status */
+}
+
+static void wait_for_vertical_sync(struct ar *ar, int exp_line)
+{
+#if CHECK_LOST
+	int tmout = 10000;	/* FIXME */
+	int l;
+
+	/*
+	 * check HCOUNT because we cannot check vertical sync.
+	 */
+	for (; tmout >= 0; tmout--) {
+		l = ar_inl(ARVHCOUNT);
+		if (l == exp_line)
+			break;
+	}
+	if (tmout < 0)
+		v4l2_err(&ar->v4l2_dev, "lost %d -> %d\n", exp_line, l);
+#else
+	while (ar_inl(ARVHCOUNT) != exp_line)
+		cpu_relax();
+#endif
+}
+
+static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
+{
+	struct ar *ar = video_drvdata(file);
+	long ret = ar->frame_bytes;		/* return read bytes */
+	unsigned long arvcr1 = 0;
+	unsigned long flags;
+	unsigned char *p;
+	int h, w;
+	unsigned char *py, *pu, *pv;
+#if !USE_INT
+	int l;
+#endif
+
+	DEBUG(1, "ar_read()\n");
+
+	if (ar->size == AR_SIZE_QVGA)
+		arvcr1 |= ARVCR1_QVGA;
+	if (ar->mode == AR_MODE_NORMAL)
+		arvcr1 |= ARVCR1_NORMAL;
+
+	mutex_lock(&ar->lock);
+
+#if USE_INT
+	local_irq_save(flags);
+	disable_dma();
+	ar_outl(0xa1871300, M32R_DMA0CR0_PORTL);
+	ar_outl(0x01000000, M32R_DMA0CR1_PORTL);
+
+	/* set AR FIFO address as source(BSEL5) */
+	ar_outl(ARDATA32, M32R_DMA0CSA_PORTL);
+	ar_outl(ARDATA32, M32R_DMA0RSA_PORTL);
+	ar_outl(ar->line_buff, M32R_DMA0CDA_PORTL);	/* destination addr. */
+	ar_outl(ar->line_buff, M32R_DMA0RDA_PORTL); 	/* reload address */
+	ar_outl(ar->line_bytes, M32R_DMA0CBCUT_PORTL); 	/* byte count (bytes) */
+	ar_outl(ar->line_bytes, M32R_DMA0RBCUT_PORTL); 	/* reload count (bytes) */
+
+	/*
+	 * Okay, kick AR LSI to invoke an interrupt
+	 */
+	ar->start_capture = 0;
+	ar_outl(arvcr1 | ARVCR1_HIEN, ARVCR1);
+	local_irq_restore(flags);
+	/* .... AR interrupts .... */
+	interruptible_sleep_on(&ar->wait);
+	if (signal_pending(current)) {
+		printk(KERN_ERR "arv: interrupted while get frame data.\n");
+		ret = -EINTR;
+		goto out_up;
+	}
+#else	/* ! USE_INT */
+	/* polling */
+	ar_outl(arvcr1, ARVCR1);
+	disable_dma();
+	ar_outl(0x8000, M32R_DMAEDET_PORTL);
+	ar_outl(0xa0861300, M32R_DMA0CR0_PORTL);
+	ar_outl(0x01000000, M32R_DMA0CR1_PORTL);
+	ar_outl(ARDATA32, M32R_DMA0CSA_PORTL);
+	ar_outl(ARDATA32, M32R_DMA0RSA_PORTL);
+	ar_outl(ar->line_bytes, M32R_DMA0CBCUT_PORTL);
+	ar_outl(ar->line_bytes, M32R_DMA0RBCUT_PORTL);
+
+	local_irq_save(flags);
+	while (ar_inl(ARVHCOUNT) != 0)		/* wait for 0 */
+		cpu_relax();
+	if (ar->mode == AR_MODE_INTERLACE && ar->size == AR_SIZE_VGA) {
+		for (h = 0; h < ar->height; h++) {
+			wait_for_vertical_sync(ar, h);
+			if (h < (AR_HEIGHT_VGA/2))
+				l = h << 1;
+			else
+				l = (((h - (AR_HEIGHT_VGA/2)) << 1) + 1);
+			ar_outl(virt_to_phys(ar->frame[l]), M32R_DMA0CDA_PORTL);
+			enable_dma();
+			while (!(ar_inl(M32R_DMAEDET_PORTL) & 0x8000))
+				cpu_relax();
+			disable_dma();
+			clear_dma_status();
+			ar_outl(0xa0861300, M32R_DMA0CR0_PORTL);
+		}
+	} else {
+		for (h = 0; h < ar->height; h++) {
+			wait_for_vertical_sync(ar, h);
+			ar_outl(virt_to_phys(ar->frame[h]), M32R_DMA0CDA_PORTL);
+			enable_dma();
+			while (!(ar_inl(M32R_DMAEDET_PORTL) & 0x8000))
+				cpu_relax();
+			disable_dma();
+			clear_dma_status();
+			ar_outl(0xa0861300, M32R_DMA0CR0_PORTL);
+		}
+	}
+	local_irq_restore(flags);
+#endif	/* ! USE_INT */
+
+	/*
+	 * convert YUV422 to YUV422P
+	 * 	+--------------------+
+	 *	|  Y0,Y1,...	     |
+	 *	|  ..............Yn  |
+	 *	+--------------------+
+	 *	|  U0,U1,........Un  |
+	 *	+--------------------+
+	 *	|  V0,V1,........Vn  |
+	 *	+--------------------+
+	 */
+	py = yuv;
+	pu = py + (ar->frame_bytes / 2);
+	pv = pu + (ar->frame_bytes / 4);
+	for (h = 0; h < ar->height; h++) {
+		p = ar->frame[h];
+		for (w = 0; w < ar->line_bytes; w += 4) {
+			*py++ = *p++;
+			*pu++ = *p++;
+			*py++ = *p++;
+			*pv++ = *p++;
+		}
+	}
+	if (copy_to_user(buf, yuv, ar->frame_bytes)) {
+		v4l2_err(&ar->v4l2_dev, "failed while copy_to_user yuv.\n");
+		ret = -EFAULT;
+		goto out_up;
+	}
+	DEBUG(1, "ret = %d\n", ret);
+out_up:
+	mutex_unlock(&ar->lock);
+	return ret;
+}
+
+static int ar_querycap(struct file *file, void  *priv,
+					struct v4l2_capability *vcap)
+{
+	struct ar *ar = video_drvdata(file);
+
+	strlcpy(vcap->driver, ar->vdev.name, sizeof(vcap->driver));
+	strlcpy(vcap->card, "Colour AR VGA", sizeof(vcap->card));
+	strlcpy(vcap->bus_info, "Platform", sizeof(vcap->bus_info));
+	vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+	vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
+	return 0;
+}
+
+static int ar_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
+{
+	if (vin->index > 0)
+		return -EINVAL;
+	strlcpy(vin->name, "Camera", sizeof(vin->name));
+	vin->type = V4L2_INPUT_TYPE_CAMERA;
+	vin->audioset = 0;
+	vin->tuner = 0;
+	vin->std = V4L2_STD_ALL;
+	vin->status = 0;
+	return 0;
+}
+
+static int ar_g_input(struct file *file, void *fh, unsigned int *inp)
+{
+	*inp = 0;
+	return 0;
+}
+
+static int ar_s_input(struct file *file, void *fh, unsigned int inp)
+{
+	return inp ? -EINVAL : 0;
+}
+
+static int ar_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+	struct ar *ar = video_drvdata(file);
+	struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+	pix->width = ar->width;
+	pix->height = ar->height;
+	pix->pixelformat = V4L2_PIX_FMT_YUV422P;
+	pix->field = (ar->mode == AR_MODE_NORMAL) ? V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED;
+	pix->bytesperline = ar->width;
+	pix->sizeimage = 2 * ar->width * ar->height;
+	/* Just a guess */
+	pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+	return 0;
+}
+
+static int ar_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+	struct ar *ar = video_drvdata(file);
+	struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+	if (pix->height <= AR_HEIGHT_QVGA || pix->width <= AR_WIDTH_QVGA) {
+		pix->height = AR_HEIGHT_QVGA;
+		pix->width = AR_WIDTH_QVGA;
+		pix->field = V4L2_FIELD_INTERLACED;
+	} else {
+		pix->height = AR_HEIGHT_VGA;
+		pix->width = AR_WIDTH_VGA;
+		pix->field = vga_interlace ? V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE;
+	}
+	pix->pixelformat = V4L2_PIX_FMT_YUV422P;
+	pix->bytesperline = ar->width;
+	pix->sizeimage = 2 * ar->width * ar->height;
+	/* Just a guess */
+	pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+	return 0;
+}
+
+static int ar_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+	struct ar *ar = video_drvdata(file);
+	struct v4l2_pix_format *pix = &fmt->fmt.pix;
+	int ret = ar_try_fmt_vid_cap(file, fh, fmt);
+
+	if (ret)
+		return ret;
+	mutex_lock(&ar->lock);
+	ar->width = pix->width;
+	ar->height = pix->height;
+	if (ar->width == AR_WIDTH_VGA) {
+		ar->size = AR_SIZE_VGA;
+		ar->frame_bytes = AR_FRAME_BYTES_VGA;
+		ar->line_bytes = AR_LINE_BYTES_VGA;
+		if (vga_interlace)
+			ar->mode = AR_MODE_INTERLACE;
+		else
+			ar->mode = AR_MODE_NORMAL;
+	} else {
+		ar->size = AR_SIZE_QVGA;
+		ar->frame_bytes = AR_FRAME_BYTES_QVGA;
+		ar->line_bytes = AR_LINE_BYTES_QVGA;
+		ar->mode = AR_MODE_INTERLACE;
+	}
+	/* Ok we figured out what to use from our wide choice */
+	mutex_unlock(&ar->lock);
+	return 0;
+}
+
+static int ar_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
+{
+	static struct v4l2_fmtdesc formats[] = {
+		{ 0, 0, 0,
+		  "YUV 4:2:2 Planar", V4L2_PIX_FMT_YUV422P,
+		  { 0, 0, 0, 0 }
+		},
+	};
+	enum v4l2_buf_type type = fmt->type;
+
+	if (fmt->index > 0)
+		return -EINVAL;
+
+	*fmt = formats[fmt->index];
+	fmt->type = type;
+	return 0;
+}
+
+#if USE_INT
+/*
+ * Interrupt handler
+ */
+static void ar_interrupt(int irq, void *dev)
+{
+	struct ar *ar = dev;
+	unsigned int line_count;
+	unsigned int line_number;
+	unsigned int arvcr1;
+
+	line_count = ar_inl(ARVHCOUNT);			/* line number */
+	if (ar->mode == AR_MODE_INTERLACE && ar->size == AR_SIZE_VGA) {
+		/* operations for interlace mode */
+		if (line_count < (AR_HEIGHT_VGA / 2)) 	/* even line */
+			line_number = (line_count << 1);
+		else 					/* odd line */
+			line_number =
+			(((line_count - (AR_HEIGHT_VGA / 2)) << 1) + 1);
+	} else {
+		line_number = line_count;
+	}
+
+	if (line_number == 0) {
+		/*
+		 * It is an interrupt for line 0.
+		 * we have to start capture.
+		 */
+		disable_dma();
+#if 0
+		ar_outl(ar->line_buff, M32R_DMA0CDA_PORTL);	/* needless? */
+#endif
+		memcpy(ar->frame[0], ar->line_buff, ar->line_bytes);
+#if 0
+		ar_outl(0xa1861300, M32R_DMA0CR0_PORTL);
+#endif
+		enable_dma();
+		ar->start_capture = 1;			/* during capture */
+		return;
+	}
+
+	if (ar->start_capture == 1 && line_number <= (ar->height - 1)) {
+		disable_dma();
+		memcpy(ar->frame[line_number], ar->line_buff, ar->line_bytes);
+
+		/*
+		 * if captured all line of a frame, disable AR interrupt
+		 * and wake a process up.
+		 */
+		if (line_number == (ar->height - 1)) { 	/* end  of line */
+
+			ar->start_capture = 0;
+
+			/* disable AR interrupt request */
+			arvcr1 = ar_inl(ARVCR1);
+			arvcr1 &= ~ARVCR1_HIEN;		/* clear int. flag */
+			ar_outl(arvcr1, ARVCR1);	/* disable */
+			wake_up_interruptible(&ar->wait);
+		} else {
+#if 0
+			ar_outl(ar->line_buff, M32R_DMA0CDA_PORTL);
+			ar_outl(0xa1861300, M32R_DMA0CR0_PORTL);
+#endif
+			enable_dma();
+		}
+	}
+}
+#endif
+
+/*
+ * ar_initialize()
+ * 	ar_initialize() is called by video_register_device() and
+ *	initializes AR LSI and peripherals.
+ *
+ *	-1 is returned in all failures.
+ *	0 is returned in success.
+ *
+ */
+static int ar_initialize(struct ar *ar)
+{
+	unsigned long cr = 0;
+	int i, found = 0;
+
+	DEBUG(1, "ar_initialize:\n");
+
+	/*
+	 * initialize AR LSI
+	 */
+	ar_outl(0, ARVCR0);		/* assert reset of AR LSI */
+	for (i = 0; i < 0x18; i++)	/* wait for over 10 cycles @ 27MHz */
+		cpu_relax();
+	ar_outl(ARVCR0_RST, ARVCR0);	/* negate reset of AR LSI (enable) */
+	for (i = 0; i < 0x40d; i++)	/* wait for over 420 cycles @ 27MHz */
+		cpu_relax();
+
+	/* AR uses INT3 of CPU as interrupt pin. */
+	ar_outl(ARINTSEL_INT3, ARINTSEL);
+
+	if (ar->size == AR_SIZE_QVGA)
+		cr |= ARVCR1_QVGA;
+	if (ar->mode == AR_MODE_NORMAL)
+		cr |= ARVCR1_NORMAL;
+	ar_outl(cr, ARVCR1);
+
+	/*
+	 * Initialize IIC so that CPU can communicate with AR LSI,
+	 * and send boot commands to AR LSI.
+	 */
+	init_iic();
+
+	for (i = 0; i < 0x100000; i++) {	/* > 0xa1d10,  56ms */
+		if ((ar_inl(ARVCR0) & ARVCR0_VDS)) {	/* VSYNC */
+			found = 1;
+			break;
+		}
+	}
+
+	if (found == 0)
+		return -ENODEV;
+
+	v4l2_info(&ar->v4l2_dev, "Initializing ");
+
+	iic(2, 0x78, 0x11, 0x01, 0x00);	/* start */
+	iic(3, 0x78, 0x12, 0x00, 0x06);
+	iic(3, 0x78, 0x12, 0x12, 0x30);
+	iic(3, 0x78, 0x12, 0x15, 0x58);
+	iic(3, 0x78, 0x12, 0x17, 0x30);
+	printk(KERN_CONT ".");
+	iic(3, 0x78, 0x12, 0x1a, 0x97);
+	iic(3, 0x78, 0x12, 0x1b, 0xff);
+	iic(3, 0x78, 0x12, 0x1c, 0xff);
+	iic(3, 0x78, 0x12, 0x26, 0x10);
+	iic(3, 0x78, 0x12, 0x27, 0x00);
+	printk(KERN_CONT ".");
+	iic(2, 0x78, 0x34, 0x02, 0x00);
+	iic(2, 0x78, 0x7a, 0x10, 0x00);
+	iic(2, 0x78, 0x80, 0x39, 0x00);
+	iic(2, 0x78, 0x81, 0xe6, 0x00);
+	iic(2, 0x78, 0x8d, 0x00, 0x00);
+	printk(KERN_CONT ".");
+	iic(2, 0x78, 0x8e, 0x0c, 0x00);
+	iic(2, 0x78, 0x8f, 0x00, 0x00);
+#if 0
+	iic(2, 0x78, 0x90, 0x00, 0x00);	/* AWB on=1 off=0 */
+#endif
+	iic(2, 0x78, 0x93, 0x01, 0x00);
+	iic(2, 0x78, 0x94, 0xcd, 0x00);
+	iic(2, 0x78, 0x95, 0x00, 0x00);
+	printk(KERN_CONT ".");
+	iic(2, 0x78, 0x96, 0xa0, 0x00);
+	iic(2, 0x78, 0x97, 0x00, 0x00);
+	iic(2, 0x78, 0x98, 0x60, 0x00);
+	iic(2, 0x78, 0x99, 0x01, 0x00);
+	iic(2, 0x78, 0x9a, 0x19, 0x00);
+	printk(KERN_CONT ".");
+	iic(2, 0x78, 0x9b, 0x02, 0x00);
+	iic(2, 0x78, 0x9c, 0xe8, 0x00);
+	iic(2, 0x78, 0x9d, 0x02, 0x00);
+	iic(2, 0x78, 0x9e, 0x2e, 0x00);
+	iic(2, 0x78, 0xb8, 0x78, 0x00);
+	iic(2, 0x78, 0xba, 0x05, 0x00);
+#if 0
+	iic(2, 0x78, 0x83, 0x8c, 0x00);	/* brightness */
+#endif
+	printk(KERN_CONT ".");
+
+	/* color correction */
+	iic(3, 0x78, 0x49, 0x00, 0x95);	/* a		*/
+	iic(3, 0x78, 0x49, 0x01, 0x96);	/* b		*/
+	iic(3, 0x78, 0x49, 0x03, 0x85);	/* c		*/
+	iic(3, 0x78, 0x49, 0x04, 0x97);	/* d		*/
+	iic(3, 0x78, 0x49, 0x02, 0x7e);	/* e(Lo)	*/
+	iic(3, 0x78, 0x49, 0x05, 0xa4);	/* f(Lo)	*/
+	iic(3, 0x78, 0x49, 0x06, 0x04);	/* e(Hi)	*/
+	iic(3, 0x78, 0x49, 0x07, 0x04);	/* e(Hi)	*/
+	iic(2, 0x78, 0x48, 0x01, 0x00);	/* on=1 off=0	*/
+
+	printk(KERN_CONT ".");
+	iic(2, 0x78, 0x11, 0x00, 0x00);	/* end */
+	printk(KERN_CONT " done\n");
+	return 0;
+}
+
+
+/****************************************************************************
+ *
+ * Video4Linux Module functions
+ *
+ ****************************************************************************/
+
+static const struct v4l2_file_operations ar_fops = {
+	.owner		= THIS_MODULE,
+	.open		= v4l2_fh_open,
+	.release	= v4l2_fh_release,
+	.read		= ar_read,
+	.unlocked_ioctl	= video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops ar_ioctl_ops = {
+	.vidioc_querycap    		    = ar_querycap,
+	.vidioc_g_input      		    = ar_g_input,
+	.vidioc_s_input      		    = ar_s_input,
+	.vidioc_enum_input   		    = ar_enum_input,
+	.vidioc_enum_fmt_vid_cap 	    = ar_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap 		    = ar_g_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap  		    = ar_s_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap  	    = ar_try_fmt_vid_cap,
+};
+
+#define ALIGN4(x)	((((int)(x)) & 0x3) == 0)
+
+static int __init ar_init(void)
+{
+	struct ar *ar;
+	struct v4l2_device *v4l2_dev;
+	int ret;
+	int i;
+
+	ar = &ardev;
+	v4l2_dev = &ar->v4l2_dev;
+	strlcpy(v4l2_dev->name, "arv", sizeof(v4l2_dev->name));
+	v4l2_info(v4l2_dev, "Colour AR VGA driver %s\n", VERSION);
+
+	ret = v4l2_device_register(NULL, v4l2_dev);
+	if (ret < 0) {
+		v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+		return ret;
+	}
+	ret = -EIO;
+
+#if USE_INT
+	/* allocate a DMA buffer for 1 line.  */
+	ar->line_buff = kmalloc(MAX_AR_LINE_BYTES, GFP_KERNEL | GFP_DMA);
+	if (ar->line_buff == NULL || !ALIGN4(ar->line_buff)) {
+		v4l2_err(v4l2_dev, "buffer allocation failed for DMA.\n");
+		ret = -ENOMEM;
+		goto out_end;
+	}
+#endif
+	/* allocate buffers for a frame */
+	for (i = 0; i < MAX_AR_HEIGHT; i++) {
+		ar->frame[i] = kmalloc(MAX_AR_LINE_BYTES, GFP_KERNEL);
+		if (ar->frame[i] == NULL || !ALIGN4(ar->frame[i])) {
+			v4l2_err(v4l2_dev, "buffer allocation failed for frame.\n");
+			ret = -ENOMEM;
+			goto out_line_buff;
+		}
+	}
+
+	strlcpy(ar->vdev.name, "Colour AR VGA", sizeof(ar->vdev.name));
+	ar->vdev.v4l2_dev = v4l2_dev;
+	ar->vdev.fops = &ar_fops;
+	ar->vdev.ioctl_ops = &ar_ioctl_ops;
+	ar->vdev.release = video_device_release_empty;
+	set_bit(V4L2_FL_USE_FH_PRIO, &ar->vdev.flags);
+	video_set_drvdata(&ar->vdev, ar);
+
+	if (vga) {
+		ar->width 	= AR_WIDTH_VGA;
+		ar->height 	= AR_HEIGHT_VGA;
+		ar->size 	= AR_SIZE_VGA;
+		ar->frame_bytes = AR_FRAME_BYTES_VGA;
+		ar->line_bytes	= AR_LINE_BYTES_VGA;
+		if (vga_interlace)
+			ar->mode = AR_MODE_INTERLACE;
+		else
+			ar->mode = AR_MODE_NORMAL;
+	} else {
+		ar->width 	= AR_WIDTH_QVGA;
+		ar->height 	= AR_HEIGHT_QVGA;
+		ar->size 	= AR_SIZE_QVGA;
+		ar->frame_bytes = AR_FRAME_BYTES_QVGA;
+		ar->line_bytes	= AR_LINE_BYTES_QVGA;
+		ar->mode	= AR_MODE_INTERLACE;
+	}
+	mutex_init(&ar->lock);
+	init_waitqueue_head(&ar->wait);
+
+#if USE_INT
+	if (request_irq(M32R_IRQ_INT3, ar_interrupt, 0, "arv", ar)) {
+		v4l2_err("request_irq(%d) failed.\n", M32R_IRQ_INT3);
+		ret = -EIO;
+		goto out_irq;
+	}
+#endif
+
+	if (ar_initialize(ar) != 0) {
+		v4l2_err(v4l2_dev, "M64278 not found.\n");
+		ret = -ENODEV;
+		goto out_dev;
+	}
+
+	/*
+	 * ok, we can initialize h/w according to parameters,
+	 * so register video device as a frame grabber type.
+	 * device is named "video[0-64]".
+	 * video_register_device() initializes h/w using ar_initialize().
+	 */
+	if (video_register_device(&ar->vdev, VFL_TYPE_GRABBER, video_nr) != 0) {
+		/* return -1, -ENFILE(full) or others */
+		v4l2_err(v4l2_dev, "register video (Colour AR) failed.\n");
+		ret = -ENODEV;
+		goto out_dev;
+	}
+
+	v4l2_info(v4l2_dev, "%s: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
+		video_device_node_name(&ar->vdev), M32R_IRQ_INT3, freq);
+
+	return 0;
+
+out_dev:
+#if USE_INT
+	free_irq(M32R_IRQ_INT3, ar);
+
+out_irq:
+#endif
+	for (i = 0; i < MAX_AR_HEIGHT; i++)
+		kfree(ar->frame[i]);
+
+out_line_buff:
+#if USE_INT
+	kfree(ar->line_buff);
+
+out_end:
+#endif
+	v4l2_device_unregister(&ar->v4l2_dev);
+	return ret;
+}
+
+
+static int __init ar_init_module(void)
+{
+	freq = (boot_cpu_data.bus_clock / 1000000);
+	printk(KERN_INFO "arv: Bus clock %d\n", freq);
+	if (freq != 50 && freq != 75)
+		freq = DEFAULT_FREQ;
+	return ar_init();
+}
+
+static void __exit ar_cleanup_module(void)
+{
+	struct ar *ar;
+	int i;
+
+	ar = &ardev;
+	video_unregister_device(&ar->vdev);
+#if USE_INT
+	free_irq(M32R_IRQ_INT3, ar);
+#endif
+	for (i = 0; i < MAX_AR_HEIGHT; i++)
+		kfree(ar->frame[i]);
+#if USE_INT
+	kfree(ar->line_buff);
+#endif
+	v4l2_device_unregister(&ar->v4l2_dev);
+}
+
+module_init(ar_init_module);
+module_exit(ar_cleanup_module);
+
+MODULE_AUTHOR("Takeo Takahashi <takahashi.takeo@renesas.com>");
+MODULE_DESCRIPTION("Colour AR M64278(VGA) for Video4Linux");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VERSION);
diff --git a/drivers/media/platform/atmel-isi.c b/drivers/media/platform/atmel-isi.c
new file mode 100644
index 0000000..6274a91
--- /dev/null
+++ b/drivers/media/platform/atmel-isi.c
@@ -0,0 +1,1099 @@
+/*
+ * Copyright (c) 2011 Atmel Corporation
+ * Josh Wu, <josh.wu@atmel.com>
+ *
+ * Based on previous work by Lars Haring, <lars.haring@atmel.com>
+ * and Sedji Gaouaou
+ * Based on the bttv driver for Bt848 with respective copyright holders
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/atmel-isi.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define MAX_BUFFER_NUM			32
+#define MAX_SUPPORT_WIDTH		2048
+#define MAX_SUPPORT_HEIGHT		2048
+#define VID_LIMIT_BYTES			(16 * 1024 * 1024)
+#define MIN_FRAME_RATE			15
+#define FRAME_INTERVAL_MILLI_SEC	(1000 / MIN_FRAME_RATE)
+
+/* ISI states */
+enum {
+	ISI_STATE_IDLE = 0,
+	ISI_STATE_READY,
+	ISI_STATE_WAIT_SOF,
+};
+
+/* Frame buffer descriptor */
+struct fbd {
+	/* Physical address of the frame buffer */
+	u32 fb_address;
+	/* DMA Control Register(only in HISI2) */
+	u32 dma_ctrl;
+	/* Physical address of the next fbd */
+	u32 next_fbd_address;
+};
+
+static void set_dma_ctrl(struct fbd *fb_desc, u32 ctrl)
+{
+	fb_desc->dma_ctrl = ctrl;
+}
+
+struct isi_dma_desc {
+	struct list_head list;
+	struct fbd *p_fbd;
+	u32 fbd_phys;
+};
+
+/* Frame buffer data */
+struct frame_buffer {
+	struct vb2_buffer vb;
+	struct isi_dma_desc *p_dma_desc;
+	struct list_head list;
+};
+
+struct atmel_isi {
+	/* Protects the access of variables shared with the ISR */
+	spinlock_t			lock;
+	void __iomem			*regs;
+
+	int				sequence;
+	/* State of the ISI module in capturing mode */
+	int				state;
+
+	/* Wait queue for waiting for SOF */
+	wait_queue_head_t		vsync_wq;
+
+	struct vb2_alloc_ctx		*alloc_ctx;
+
+	/* Allocate descriptors for dma buffer use */
+	struct fbd			*p_fb_descriptors;
+	u32				fb_descriptors_phys;
+	struct				list_head dma_desc_head;
+	struct isi_dma_desc		dma_desc[MAX_BUFFER_NUM];
+
+	struct completion		complete;
+	/* ISI peripherial clock */
+	struct clk			*pclk;
+	/* ISI_MCK, feed to camera sensor to generate pixel clock */
+	struct clk			*mck;
+	unsigned int			irq;
+
+	struct isi_platform_data	*pdata;
+	u16				width_flags;	/* max 12 bits */
+
+	struct list_head		video_buffer_list;
+	struct frame_buffer		*active;
+
+	struct soc_camera_device	*icd;
+	struct soc_camera_host		soc_host;
+};
+
+static void isi_writel(struct atmel_isi *isi, u32 reg, u32 val)
+{
+	writel(val, isi->regs + reg);
+}
+static u32 isi_readl(struct atmel_isi *isi, u32 reg)
+{
+	return readl(isi->regs + reg);
+}
+
+static int configure_geometry(struct atmel_isi *isi, u32 width,
+			u32 height, enum v4l2_mbus_pixelcode code)
+{
+	u32 cfg2, cr;
+
+	switch (code) {
+	/* YUV, including grey */
+	case V4L2_MBUS_FMT_Y8_1X8:
+		cr = ISI_CFG2_GRAYSCALE;
+		break;
+	case V4L2_MBUS_FMT_UYVY8_2X8:
+		cr = ISI_CFG2_YCC_SWAP_MODE_3;
+		break;
+	case V4L2_MBUS_FMT_VYUY8_2X8:
+		cr = ISI_CFG2_YCC_SWAP_MODE_2;
+		break;
+	case V4L2_MBUS_FMT_YUYV8_2X8:
+		cr = ISI_CFG2_YCC_SWAP_MODE_1;
+		break;
+	case V4L2_MBUS_FMT_YVYU8_2X8:
+		cr = ISI_CFG2_YCC_SWAP_DEFAULT;
+		break;
+	/* RGB, TODO */
+	default:
+		return -EINVAL;
+	}
+
+	isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+
+	cfg2 = isi_readl(isi, ISI_CFG2);
+	cfg2 |= cr;
+	/* Set width */
+	cfg2 &= ~(ISI_CFG2_IM_HSIZE_MASK);
+	cfg2 |= ((width - 1) << ISI_CFG2_IM_HSIZE_OFFSET) &
+			ISI_CFG2_IM_HSIZE_MASK;
+	/* Set height */
+	cfg2 &= ~(ISI_CFG2_IM_VSIZE_MASK);
+	cfg2 |= ((height - 1) << ISI_CFG2_IM_VSIZE_OFFSET)
+			& ISI_CFG2_IM_VSIZE_MASK;
+	isi_writel(isi, ISI_CFG2, cfg2);
+
+	return 0;
+}
+
+static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi)
+{
+	if (isi->active) {
+		struct vb2_buffer *vb = &isi->active->vb;
+		struct frame_buffer *buf = isi->active;
+
+		list_del_init(&buf->list);
+		do_gettimeofday(&vb->v4l2_buf.timestamp);
+		vb->v4l2_buf.sequence = isi->sequence++;
+		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+	}
+
+	if (list_empty(&isi->video_buffer_list)) {
+		isi->active = NULL;
+	} else {
+		/* start next dma frame. */
+		isi->active = list_entry(isi->video_buffer_list.next,
+					struct frame_buffer, list);
+		isi_writel(isi, ISI_DMA_C_DSCR,
+			isi->active->p_dma_desc->fbd_phys);
+		isi_writel(isi, ISI_DMA_C_CTRL,
+			ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+		isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
+	}
+	return IRQ_HANDLED;
+}
+
+/* ISI interrupt service routine */
+static irqreturn_t isi_interrupt(int irq, void *dev_id)
+{
+	struct atmel_isi *isi = dev_id;
+	u32 status, mask, pending;
+	irqreturn_t ret = IRQ_NONE;
+
+	spin_lock(&isi->lock);
+
+	status = isi_readl(isi, ISI_STATUS);
+	mask = isi_readl(isi, ISI_INTMASK);
+	pending = status & mask;
+
+	if (pending & ISI_CTRL_SRST) {
+		complete(&isi->complete);
+		isi_writel(isi, ISI_INTDIS, ISI_CTRL_SRST);
+		ret = IRQ_HANDLED;
+	} else if (pending & ISI_CTRL_DIS) {
+		complete(&isi->complete);
+		isi_writel(isi, ISI_INTDIS, ISI_CTRL_DIS);
+		ret = IRQ_HANDLED;
+	} else {
+		if ((pending & ISI_SR_VSYNC) &&
+				(isi->state == ISI_STATE_IDLE)) {
+			isi->state = ISI_STATE_READY;
+			wake_up_interruptible(&isi->vsync_wq);
+			ret = IRQ_HANDLED;
+		}
+		if (likely(pending & ISI_SR_CXFR_DONE))
+			ret = atmel_isi_handle_streaming(isi);
+	}
+
+	spin_unlock(&isi->lock);
+	return ret;
+}
+
+#define	WAIT_ISI_RESET		1
+#define	WAIT_ISI_DISABLE	0
+static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
+{
+	unsigned long timeout;
+	/*
+	 * The reset or disable will only succeed if we have a
+	 * pixel clock from the camera.
+	 */
+	init_completion(&isi->complete);
+
+	if (wait_reset) {
+		isi_writel(isi, ISI_INTEN, ISI_CTRL_SRST);
+		isi_writel(isi, ISI_CTRL, ISI_CTRL_SRST);
+	} else {
+		isi_writel(isi, ISI_INTEN, ISI_CTRL_DIS);
+		isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+	}
+
+	timeout = wait_for_completion_timeout(&isi->complete,
+			msecs_to_jiffies(100));
+	if (timeout == 0)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+/* ------------------------------------------------------------------
+	Videobuf operations
+   ------------------------------------------------------------------*/
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+				unsigned int *nbuffers, unsigned int *nplanes,
+				unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct atmel_isi *isi = ici->priv;
+	unsigned long size;
+	int ret;
+
+	/* Reset ISI */
+	ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
+	if (ret < 0) {
+		dev_err(icd->parent, "Reset ISI timed out\n");
+		return ret;
+	}
+	/* Disable all interrupts */
+	isi_writel(isi, ISI_INTDIS, ~0UL);
+
+	size = icd->sizeimage;
+
+	if (!*nbuffers || *nbuffers > MAX_BUFFER_NUM)
+		*nbuffers = MAX_BUFFER_NUM;
+
+	if (size * *nbuffers > VID_LIMIT_BYTES)
+		*nbuffers = VID_LIMIT_BYTES / size;
+
+	*nplanes = 1;
+	sizes[0] = size;
+	alloc_ctxs[0] = isi->alloc_ctx;
+
+	isi->sequence = 0;
+	isi->active = NULL;
+
+	dev_dbg(icd->parent, "%s, count=%d, size=%ld\n", __func__,
+		*nbuffers, size);
+
+	return 0;
+}
+
+static int buffer_init(struct vb2_buffer *vb)
+{
+	struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+
+	buf->p_dma_desc = NULL;
+	INIT_LIST_HEAD(&buf->list);
+
+	return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+	struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct atmel_isi *isi = ici->priv;
+	unsigned long size;
+	struct isi_dma_desc *desc;
+
+	size = icd->sizeimage;
+
+	if (vb2_plane_size(vb, 0) < size) {
+		dev_err(icd->parent, "%s data will not fit into plane (%lu < %lu)\n",
+				__func__, vb2_plane_size(vb, 0), size);
+		return -EINVAL;
+	}
+
+	vb2_set_plane_payload(&buf->vb, 0, size);
+
+	if (!buf->p_dma_desc) {
+		if (list_empty(&isi->dma_desc_head)) {
+			dev_err(icd->parent, "Not enough dma descriptors.\n");
+			return -EINVAL;
+		} else {
+			/* Get an available descriptor */
+			desc = list_entry(isi->dma_desc_head.next,
+						struct isi_dma_desc, list);
+			/* Delete the descriptor since now it is used */
+			list_del_init(&desc->list);
+
+			/* Initialize the dma descriptor */
+			desc->p_fbd->fb_address =
+					vb2_dma_contig_plane_dma_addr(vb, 0);
+			desc->p_fbd->next_fbd_address = 0;
+			set_dma_ctrl(desc->p_fbd, ISI_DMA_CTRL_WB);
+
+			buf->p_dma_desc = desc;
+		}
+	}
+	return 0;
+}
+
+static void buffer_cleanup(struct vb2_buffer *vb)
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct atmel_isi *isi = ici->priv;
+	struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+
+	/* This descriptor is available now and we add to head list */
+	if (buf->p_dma_desc)
+		list_add(&buf->p_dma_desc->list, &isi->dma_desc_head);
+}
+
+static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer)
+{
+	u32 ctrl, cfg1;
+
+	cfg1 = isi_readl(isi, ISI_CFG1);
+	/* Enable irq: cxfr for the codec path, pxfr for the preview path */
+	isi_writel(isi, ISI_INTEN,
+			ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
+
+	/* Check if already in a frame */
+	if (isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) {
+		dev_err(isi->icd->parent, "Already in frame handling.\n");
+		return;
+	}
+
+	isi_writel(isi, ISI_DMA_C_DSCR, buffer->p_dma_desc->fbd_phys);
+	isi_writel(isi, ISI_DMA_C_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+	isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
+
+	/* Enable linked list */
+	cfg1 |= isi->pdata->frate | ISI_CFG1_DISCR;
+
+	/* Enable codec path and ISI */
+	ctrl = ISI_CTRL_CDC | ISI_CTRL_EN;
+	isi_writel(isi, ISI_CTRL, ctrl);
+	isi_writel(isi, ISI_CFG1, cfg1);
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct atmel_isi *isi = ici->priv;
+	struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&isi->lock, flags);
+	list_add_tail(&buf->list, &isi->video_buffer_list);
+
+	if (isi->active == NULL) {
+		isi->active = buf;
+		if (vb2_is_streaming(vb->vb2_queue))
+			start_dma(isi, buf);
+	}
+	spin_unlock_irqrestore(&isi->lock, flags);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct atmel_isi *isi = ici->priv;
+
+	u32 sr = 0;
+	int ret;
+
+	spin_lock_irq(&isi->lock);
+	isi->state = ISI_STATE_IDLE;
+	/* Clear any pending SOF interrupt */
+	sr = isi_readl(isi, ISI_STATUS);
+	/* Enable VSYNC interrupt for SOF */
+	isi_writel(isi, ISI_INTEN, ISI_SR_VSYNC);
+	isi_writel(isi, ISI_CTRL, ISI_CTRL_EN);
+	spin_unlock_irq(&isi->lock);
+
+	dev_dbg(icd->parent, "Waiting for SOF\n");
+	ret = wait_event_interruptible(isi->vsync_wq,
+				       isi->state != ISI_STATE_IDLE);
+	if (ret)
+		goto err;
+
+	if (isi->state != ISI_STATE_READY) {
+		ret = -EIO;
+		goto err;
+	}
+
+	spin_lock_irq(&isi->lock);
+	isi->state = ISI_STATE_WAIT_SOF;
+	isi_writel(isi, ISI_INTDIS, ISI_SR_VSYNC);
+	if (count)
+		start_dma(isi, isi->active);
+	spin_unlock_irq(&isi->lock);
+
+	return 0;
+err:
+	isi->active = NULL;
+	isi->sequence = 0;
+	INIT_LIST_HEAD(&isi->video_buffer_list);
+	return ret;
+}
+
+/* abort streaming and wait for last buffer */
+static int stop_streaming(struct vb2_queue *vq)
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct atmel_isi *isi = ici->priv;
+	struct frame_buffer *buf, *node;
+	int ret = 0;
+	unsigned long timeout;
+
+	spin_lock_irq(&isi->lock);
+	isi->active = NULL;
+	/* Release all active buffers */
+	list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
+		list_del_init(&buf->list);
+		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+	}
+	spin_unlock_irq(&isi->lock);
+
+	timeout = jiffies + FRAME_INTERVAL_MILLI_SEC * HZ;
+	/* Wait until the end of the current frame. */
+	while ((isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) &&
+			time_before(jiffies, timeout))
+		msleep(1);
+
+	if (time_after(jiffies, timeout)) {
+		dev_err(icd->parent,
+			"Timeout waiting for finishing codec request\n");
+		return -ETIMEDOUT;
+	}
+
+	/* Disable interrupts */
+	isi_writel(isi, ISI_INTDIS,
+			ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
+
+	/* Disable ISI and wait for it is done */
+	ret = atmel_isi_wait_status(isi, WAIT_ISI_DISABLE);
+	if (ret < 0)
+		dev_err(icd->parent, "Disable ISI timed out\n");
+
+	return ret;
+}
+
+static struct vb2_ops isi_video_qops = {
+	.queue_setup		= queue_setup,
+	.buf_init		= buffer_init,
+	.buf_prepare		= buffer_prepare,
+	.buf_cleanup		= buffer_cleanup,
+	.buf_queue		= buffer_queue,
+	.start_streaming	= start_streaming,
+	.stop_streaming		= stop_streaming,
+	.wait_prepare		= soc_camera_unlock,
+	.wait_finish		= soc_camera_lock,
+};
+
+/* ------------------------------------------------------------------
+	SOC camera operations for the device
+   ------------------------------------------------------------------*/
+static int isi_camera_init_videobuf(struct vb2_queue *q,
+				     struct soc_camera_device *icd)
+{
+	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	q->io_modes = VB2_MMAP;
+	q->drv_priv = icd;
+	q->buf_struct_size = sizeof(struct frame_buffer);
+	q->ops = &isi_video_qops;
+	q->mem_ops = &vb2_dma_contig_memops;
+
+	return vb2_queue_init(q);
+}
+
+static int isi_camera_set_fmt(struct soc_camera_device *icd,
+			      struct v4l2_format *f)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct atmel_isi *isi = ici->priv;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	const struct soc_camera_format_xlate *xlate;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_mbus_framefmt mf;
+	int ret;
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+	if (!xlate) {
+		dev_warn(icd->parent, "Format %x not found\n",
+			 pix->pixelformat);
+		return -EINVAL;
+	}
+
+	dev_dbg(icd->parent, "Plan to set format %dx%d\n",
+			pix->width, pix->height);
+
+	mf.width	= pix->width;
+	mf.height	= pix->height;
+	mf.field	= pix->field;
+	mf.colorspace	= pix->colorspace;
+	mf.code		= xlate->code;
+
+	ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+	if (ret < 0)
+		return ret;
+
+	if (mf.code != xlate->code)
+		return -EINVAL;
+
+	ret = configure_geometry(isi, pix->width, pix->height, xlate->code);
+	if (ret < 0)
+		return ret;
+
+	pix->width		= mf.width;
+	pix->height		= mf.height;
+	pix->field		= mf.field;
+	pix->colorspace		= mf.colorspace;
+	icd->current_fmt	= xlate;
+
+	dev_dbg(icd->parent, "Finally set format %dx%d\n",
+		pix->width, pix->height);
+
+	return ret;
+}
+
+static int isi_camera_try_fmt(struct soc_camera_device *icd,
+			      struct v4l2_format *f)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	const struct soc_camera_format_xlate *xlate;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_mbus_framefmt mf;
+	u32 pixfmt = pix->pixelformat;
+	int ret;
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+	if (pixfmt && !xlate) {
+		dev_warn(icd->parent, "Format %x not found\n", pixfmt);
+		return -EINVAL;
+	}
+
+	/* limit to Atmel ISI hardware capabilities */
+	if (pix->height > MAX_SUPPORT_HEIGHT)
+		pix->height = MAX_SUPPORT_HEIGHT;
+	if (pix->width > MAX_SUPPORT_WIDTH)
+		pix->width = MAX_SUPPORT_WIDTH;
+
+	/* limit to sensor capabilities */
+	mf.width	= pix->width;
+	mf.height	= pix->height;
+	mf.field	= pix->field;
+	mf.colorspace	= pix->colorspace;
+	mf.code		= xlate->code;
+
+	ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+	if (ret < 0)
+		return ret;
+
+	pix->width	= mf.width;
+	pix->height	= mf.height;
+	pix->colorspace	= mf.colorspace;
+
+	switch (mf.field) {
+	case V4L2_FIELD_ANY:
+		pix->field = V4L2_FIELD_NONE;
+		break;
+	case V4L2_FIELD_NONE:
+		break;
+	default:
+		dev_err(icd->parent, "Field type %d unsupported.\n",
+			mf.field);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static const struct soc_mbus_pixelfmt isi_camera_formats[] = {
+	{
+		.fourcc			= V4L2_PIX_FMT_YUYV,
+		.name			= "Packed YUV422 16 bit",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+};
+
+/* This will be corrected as we get more formats */
+static bool isi_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
+{
+	return	fmt->packing == SOC_MBUS_PACKING_NONE ||
+		(fmt->bits_per_sample == 8 &&
+		 fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+		(fmt->bits_per_sample > 8 &&
+		 fmt->packing == SOC_MBUS_PACKING_EXTEND16);
+}
+
+#define ISI_BUS_PARAM (V4L2_MBUS_MASTER |	\
+		V4L2_MBUS_HSYNC_ACTIVE_HIGH |	\
+		V4L2_MBUS_HSYNC_ACTIVE_LOW |	\
+		V4L2_MBUS_VSYNC_ACTIVE_HIGH |	\
+		V4L2_MBUS_VSYNC_ACTIVE_LOW |	\
+		V4L2_MBUS_PCLK_SAMPLE_RISING |	\
+		V4L2_MBUS_PCLK_SAMPLE_FALLING |	\
+		V4L2_MBUS_DATA_ACTIVE_HIGH)
+
+static int isi_camera_try_bus_param(struct soc_camera_device *icd,
+				    unsigned char buswidth)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct atmel_isi *isi = ici->priv;
+	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+	unsigned long common_flags;
+	int ret;
+
+	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+	if (!ret) {
+		common_flags = soc_mbus_config_compatible(&cfg,
+							  ISI_BUS_PARAM);
+		if (!common_flags) {
+			dev_warn(icd->parent,
+				 "Flags incompatible: camera 0x%x, host 0x%x\n",
+				 cfg.flags, ISI_BUS_PARAM);
+			return -EINVAL;
+		}
+	} else if (ret != -ENOIOCTLCMD) {
+		return ret;
+	}
+
+	if ((1 << (buswidth - 1)) & isi->width_flags)
+		return 0;
+	return -EINVAL;
+}
+
+
+static int isi_camera_get_formats(struct soc_camera_device *icd,
+				  unsigned int idx,
+				  struct soc_camera_format_xlate *xlate)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	int formats = 0, ret;
+	/* sensor format */
+	enum v4l2_mbus_pixelcode code;
+	/* soc camera host format */
+	const struct soc_mbus_pixelfmt *fmt;
+
+	ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+	if (ret < 0)
+		/* No more formats */
+		return 0;
+
+	fmt = soc_mbus_get_fmtdesc(code);
+	if (!fmt) {
+		dev_err(icd->parent,
+			"Invalid format code #%u: %d\n", idx, code);
+		return 0;
+	}
+
+	/* This also checks support for the requested bits-per-sample */
+	ret = isi_camera_try_bus_param(icd, fmt->bits_per_sample);
+	if (ret < 0) {
+		dev_err(icd->parent,
+			"Fail to try the bus parameters.\n");
+		return 0;
+	}
+
+	switch (code) {
+	case V4L2_MBUS_FMT_UYVY8_2X8:
+	case V4L2_MBUS_FMT_VYUY8_2X8:
+	case V4L2_MBUS_FMT_YUYV8_2X8:
+	case V4L2_MBUS_FMT_YVYU8_2X8:
+		formats++;
+		if (xlate) {
+			xlate->host_fmt	= &isi_camera_formats[0];
+			xlate->code	= code;
+			xlate++;
+			dev_dbg(icd->parent, "Providing format %s using code %d\n",
+				isi_camera_formats[0].name, code);
+		}
+		break;
+	default:
+		if (!isi_camera_packing_supported(fmt))
+			return 0;
+		if (xlate)
+			dev_dbg(icd->parent,
+				"Providing format %s in pass-through mode\n",
+				fmt->name);
+	}
+
+	/* Generic pass-through */
+	formats++;
+	if (xlate) {
+		xlate->host_fmt	= fmt;
+		xlate->code	= code;
+		xlate++;
+	}
+
+	return formats;
+}
+
+/* Called with .video_lock held */
+static int isi_camera_add_device(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct atmel_isi *isi = ici->priv;
+	int ret;
+
+	if (isi->icd)
+		return -EBUSY;
+
+	ret = clk_enable(isi->pclk);
+	if (ret)
+		return ret;
+
+	ret = clk_enable(isi->mck);
+	if (ret) {
+		clk_disable(isi->pclk);
+		return ret;
+	}
+
+	isi->icd = icd;
+	dev_dbg(icd->parent, "Atmel ISI Camera driver attached to camera %d\n",
+		 icd->devnum);
+	return 0;
+}
+/* Called with .video_lock held */
+static void isi_camera_remove_device(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct atmel_isi *isi = ici->priv;
+
+	BUG_ON(icd != isi->icd);
+
+	clk_disable(isi->mck);
+	clk_disable(isi->pclk);
+	isi->icd = NULL;
+
+	dev_dbg(icd->parent, "Atmel ISI Camera driver detached from camera %d\n",
+		 icd->devnum);
+}
+
+static unsigned int isi_camera_poll(struct file *file, poll_table *pt)
+{
+	struct soc_camera_device *icd = file->private_data;
+
+	return vb2_poll(&icd->vb2_vidq, file, pt);
+}
+
+static int isi_camera_querycap(struct soc_camera_host *ici,
+			       struct v4l2_capability *cap)
+{
+	strcpy(cap->driver, "atmel-isi");
+	strcpy(cap->card, "Atmel Image Sensor Interface");
+	cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE |
+				V4L2_CAP_STREAMING);
+	return 0;
+}
+
+static int isi_camera_set_bus_param(struct soc_camera_device *icd)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct atmel_isi *isi = ici->priv;
+	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+	unsigned long common_flags;
+	int ret;
+	u32 cfg1 = 0;
+
+	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+	if (!ret) {
+		common_flags = soc_mbus_config_compatible(&cfg,
+							  ISI_BUS_PARAM);
+		if (!common_flags) {
+			dev_warn(icd->parent,
+				 "Flags incompatible: camera 0x%x, host 0x%x\n",
+				 cfg.flags, ISI_BUS_PARAM);
+			return -EINVAL;
+		}
+	} else if (ret != -ENOIOCTLCMD) {
+		return ret;
+	} else {
+		common_flags = ISI_BUS_PARAM;
+	}
+	dev_dbg(icd->parent, "Flags cam: 0x%x host: 0x%x common: 0x%lx\n",
+		cfg.flags, ISI_BUS_PARAM, common_flags);
+
+	/* Make choises, based on platform preferences */
+	if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+	    (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
+		if (isi->pdata->hsync_act_low)
+			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+		else
+			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
+	}
+
+	if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+	    (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
+		if (isi->pdata->vsync_act_low)
+			common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+		else
+			common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
+	}
+
+	if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+	    (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
+		if (isi->pdata->pclk_act_falling)
+			common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
+		else
+			common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
+	}
+
+	cfg.flags = common_flags;
+	ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+	if (ret < 0 && ret != -ENOIOCTLCMD) {
+		dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
+			common_flags, ret);
+		return ret;
+	}
+
+	/* set bus param for ISI */
+	if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+		cfg1 |= ISI_CFG1_HSYNC_POL_ACTIVE_LOW;
+	if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+		cfg1 |= ISI_CFG1_VSYNC_POL_ACTIVE_LOW;
+	if (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+		cfg1 |= ISI_CFG1_PIXCLK_POL_ACTIVE_FALLING;
+
+	if (isi->pdata->has_emb_sync)
+		cfg1 |= ISI_CFG1_EMB_SYNC;
+	if (isi->pdata->full_mode)
+		cfg1 |= ISI_CFG1_FULL_MODE;
+
+	isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+	isi_writel(isi, ISI_CFG1, cfg1);
+
+	return 0;
+}
+
+static struct soc_camera_host_ops isi_soc_camera_host_ops = {
+	.owner		= THIS_MODULE,
+	.add		= isi_camera_add_device,
+	.remove		= isi_camera_remove_device,
+	.set_fmt	= isi_camera_set_fmt,
+	.try_fmt	= isi_camera_try_fmt,
+	.get_formats	= isi_camera_get_formats,
+	.init_videobuf2	= isi_camera_init_videobuf,
+	.poll		= isi_camera_poll,
+	.querycap	= isi_camera_querycap,
+	.set_bus_param	= isi_camera_set_bus_param,
+};
+
+/* -----------------------------------------------------------------------*/
+static int __devexit atmel_isi_remove(struct platform_device *pdev)
+{
+	struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+	struct atmel_isi *isi = container_of(soc_host,
+					struct atmel_isi, soc_host);
+
+	free_irq(isi->irq, isi);
+	soc_camera_host_unregister(soc_host);
+	vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
+	dma_free_coherent(&pdev->dev,
+			sizeof(struct fbd) * MAX_BUFFER_NUM,
+			isi->p_fb_descriptors,
+			isi->fb_descriptors_phys);
+
+	iounmap(isi->regs);
+	clk_unprepare(isi->mck);
+	clk_put(isi->mck);
+	clk_unprepare(isi->pclk);
+	clk_put(isi->pclk);
+	kfree(isi);
+
+	return 0;
+}
+
+static int __devinit atmel_isi_probe(struct platform_device *pdev)
+{
+	unsigned int irq;
+	struct atmel_isi *isi;
+	struct clk *pclk;
+	struct resource *regs;
+	int ret, i;
+	struct device *dev = &pdev->dev;
+	struct soc_camera_host *soc_host;
+	struct isi_platform_data *pdata;
+
+	pdata = dev->platform_data;
+	if (!pdata || !pdata->data_width_flags || !pdata->mck_hz) {
+		dev_err(&pdev->dev,
+			"No config available for Atmel ISI\n");
+		return -EINVAL;
+	}
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs)
+		return -ENXIO;
+
+	pclk = clk_get(&pdev->dev, "isi_clk");
+	if (IS_ERR(pclk))
+		return PTR_ERR(pclk);
+
+	ret = clk_prepare(pclk);
+	if (ret)
+		goto err_clk_prepare_pclk;
+
+	isi = kzalloc(sizeof(struct atmel_isi), GFP_KERNEL);
+	if (!isi) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "Can't allocate interface!\n");
+		goto err_alloc_isi;
+	}
+
+	isi->pclk = pclk;
+	isi->pdata = pdata;
+	isi->active = NULL;
+	spin_lock_init(&isi->lock);
+	init_waitqueue_head(&isi->vsync_wq);
+	INIT_LIST_HEAD(&isi->video_buffer_list);
+	INIT_LIST_HEAD(&isi->dma_desc_head);
+
+	/* Get ISI_MCK, provided by programmable clock or external clock */
+	isi->mck = clk_get(dev, "isi_mck");
+	if (IS_ERR(isi->mck)) {
+		dev_err(dev, "Failed to get isi_mck\n");
+		ret = PTR_ERR(isi->mck);
+		goto err_clk_get;
+	}
+
+	ret = clk_prepare(isi->mck);
+	if (ret)
+		goto err_clk_prepare_mck;
+
+	/* Set ISI_MCK's frequency, it should be faster than pixel clock */
+	ret = clk_set_rate(isi->mck, pdata->mck_hz);
+	if (ret < 0)
+		goto err_set_mck_rate;
+
+	isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev,
+				sizeof(struct fbd) * MAX_BUFFER_NUM,
+				&isi->fb_descriptors_phys,
+				GFP_KERNEL);
+	if (!isi->p_fb_descriptors) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "Can't allocate descriptors!\n");
+		goto err_alloc_descriptors;
+	}
+
+	for (i = 0; i < MAX_BUFFER_NUM; i++) {
+		isi->dma_desc[i].p_fbd = isi->p_fb_descriptors + i;
+		isi->dma_desc[i].fbd_phys = isi->fb_descriptors_phys +
+					i * sizeof(struct fbd);
+		list_add(&isi->dma_desc[i].list, &isi->dma_desc_head);
+	}
+
+	isi->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+	if (IS_ERR(isi->alloc_ctx)) {
+		ret = PTR_ERR(isi->alloc_ctx);
+		goto err_alloc_ctx;
+	}
+
+	isi->regs = ioremap(regs->start, resource_size(regs));
+	if (!isi->regs) {
+		ret = -ENOMEM;
+		goto err_ioremap;
+	}
+
+	if (pdata->data_width_flags & ISI_DATAWIDTH_8)
+		isi->width_flags = 1 << 7;
+	if (pdata->data_width_flags & ISI_DATAWIDTH_10)
+		isi->width_flags |= 1 << 9;
+
+	isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		ret = irq;
+		goto err_req_irq;
+	}
+
+	ret = request_irq(irq, isi_interrupt, 0, "isi", isi);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+		goto err_req_irq;
+	}
+	isi->irq = irq;
+
+	soc_host		= &isi->soc_host;
+	soc_host->drv_name	= "isi-camera";
+	soc_host->ops		= &isi_soc_camera_host_ops;
+	soc_host->priv		= isi;
+	soc_host->v4l2_dev.dev	= &pdev->dev;
+	soc_host->nr		= pdev->id;
+
+	ret = soc_camera_host_register(soc_host);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to register soc camera host\n");
+		goto err_register_soc_camera_host;
+	}
+	return 0;
+
+err_register_soc_camera_host:
+	free_irq(isi->irq, isi);
+err_req_irq:
+	iounmap(isi->regs);
+err_ioremap:
+	vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
+err_alloc_ctx:
+	dma_free_coherent(&pdev->dev,
+			sizeof(struct fbd) * MAX_BUFFER_NUM,
+			isi->p_fb_descriptors,
+			isi->fb_descriptors_phys);
+err_alloc_descriptors:
+err_set_mck_rate:
+	clk_unprepare(isi->mck);
+err_clk_prepare_mck:
+	clk_put(isi->mck);
+err_clk_get:
+	kfree(isi);
+err_alloc_isi:
+	clk_unprepare(pclk);
+err_clk_prepare_pclk:
+	clk_put(pclk);
+
+	return ret;
+}
+
+static struct platform_driver atmel_isi_driver = {
+	.probe		= atmel_isi_probe,
+	.remove		= __devexit_p(atmel_isi_remove),
+	.driver		= {
+		.name = "atmel_isi",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init atmel_isi_init_module(void)
+{
+	return  platform_driver_probe(&atmel_isi_driver, &atmel_isi_probe);
+}
+
+static void __exit atmel_isi_exit(void)
+{
+	platform_driver_unregister(&atmel_isi_driver);
+}
+module_init(atmel_isi_init_module);
+module_exit(atmel_isi_exit);
+
+MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>");
+MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/platform/blackfin/Kconfig b/drivers/media/platform/blackfin/Kconfig
new file mode 100644
index 0000000..ecd5323
--- /dev/null
+++ b/drivers/media/platform/blackfin/Kconfig
@@ -0,0 +1,10 @@
+config VIDEO_BLACKFIN_CAPTURE
+	tristate "Blackfin Video Capture Driver"
+	depends on VIDEO_V4L2 && BLACKFIN && I2C
+	select VIDEOBUF2_DMA_CONTIG
+	help
+	  V4L2 bridge driver for Blackfin video capture device.
+	  Choose PPI or EPPI as its interface.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called bfin_video_capture.
diff --git a/drivers/media/platform/blackfin/Makefile b/drivers/media/platform/blackfin/Makefile
new file mode 100644
index 0000000..aa3a0a2
--- /dev/null
+++ b/drivers/media/platform/blackfin/Makefile
@@ -0,0 +1,2 @@
+bfin_video_capture-objs := bfin_capture.o ppi.o
+obj-$(CONFIG_VIDEO_BLACKFIN_CAPTURE) += bfin_video_capture.o
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
new file mode 100644
index 0000000..1677623
--- /dev/null
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -0,0 +1,1068 @@
+/*
+ * Analog Devices video capture driver
+ *
+ * Copyright (c) 2011 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <asm/dma.h>
+
+#include <media/blackfin/bfin_capture.h>
+#include <media/blackfin/ppi.h>
+
+#define CAPTURE_DRV_NAME        "bfin_capture"
+#define BCAP_MIN_NUM_BUF        2
+
+struct bcap_format {
+	char *desc;
+	u32 pixelformat;
+	enum v4l2_mbus_pixelcode mbus_code;
+	int bpp; /* bits per pixel */
+};
+
+struct bcap_buffer {
+	struct vb2_buffer vb;
+	struct list_head list;
+};
+
+struct bcap_device {
+	/* capture device instance */
+	struct v4l2_device v4l2_dev;
+	/* v4l2 control handler */
+	struct v4l2_ctrl_handler ctrl_handler;
+	/* device node data */
+	struct video_device *video_dev;
+	/* sub device instance */
+	struct v4l2_subdev *sd;
+	/* capture config */
+	struct bfin_capture_config *cfg;
+	/* ppi interface */
+	struct ppi_if *ppi;
+	/* current input */
+	unsigned int cur_input;
+	/* current selected standard */
+	v4l2_std_id std;
+	/* used to store pixel format */
+	struct v4l2_pix_format fmt;
+	/* bits per pixel*/
+	int bpp;
+	/* used to store sensor supported format */
+	struct bcap_format *sensor_formats;
+	/* number of sensor formats array */
+	int num_sensor_formats;
+	/* pointing to current video buffer */
+	struct bcap_buffer *cur_frm;
+	/* pointing to next video buffer */
+	struct bcap_buffer *next_frm;
+	/* buffer queue used in videobuf2 */
+	struct vb2_queue buffer_queue;
+	/* allocator-specific contexts for each plane */
+	struct vb2_alloc_ctx *alloc_ctx;
+	/* queue of filled frames */
+	struct list_head dma_queue;
+	/* used in videobuf2 callback */
+	spinlock_t lock;
+	/* used to access capture device */
+	struct mutex mutex;
+	/* used to wait ppi to complete one transfer */
+	struct completion comp;
+	/* prepare to stop */
+	bool stop;
+};
+
+struct bcap_fh {
+	struct v4l2_fh fh;
+	/* indicates whether this file handle is doing IO */
+	bool io_allowed;
+};
+
+static const struct bcap_format bcap_formats[] = {
+	{
+		.desc        = "YCbCr 4:2:2 Interleaved UYVY",
+		.pixelformat = V4L2_PIX_FMT_UYVY,
+		.mbus_code   = V4L2_MBUS_FMT_UYVY8_2X8,
+		.bpp         = 16,
+	},
+	{
+		.desc        = "YCbCr 4:2:2 Interleaved YUYV",
+		.pixelformat = V4L2_PIX_FMT_YUYV,
+		.mbus_code   = V4L2_MBUS_FMT_YUYV8_2X8,
+		.bpp         = 16,
+	},
+	{
+		.desc        = "RGB 565",
+		.pixelformat = V4L2_PIX_FMT_RGB565,
+		.mbus_code   = V4L2_MBUS_FMT_RGB565_2X8_LE,
+		.bpp         = 16,
+	},
+	{
+		.desc        = "RGB 444",
+		.pixelformat = V4L2_PIX_FMT_RGB444,
+		.mbus_code   = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
+		.bpp         = 16,
+	},
+
+};
+#define BCAP_MAX_FMTS ARRAY_SIZE(bcap_formats)
+
+static irqreturn_t bcap_isr(int irq, void *dev_id);
+
+static struct bcap_buffer *to_bcap_vb(struct vb2_buffer *vb)
+{
+	return container_of(vb, struct bcap_buffer, vb);
+}
+
+static int bcap_init_sensor_formats(struct bcap_device *bcap_dev)
+{
+	enum v4l2_mbus_pixelcode code;
+	struct bcap_format *sf;
+	unsigned int num_formats = 0;
+	int i, j;
+
+	while (!v4l2_subdev_call(bcap_dev->sd, video,
+				enum_mbus_fmt, num_formats, &code))
+		num_formats++;
+	if (!num_formats)
+		return -ENXIO;
+
+	sf = kzalloc(num_formats * sizeof(*sf), GFP_KERNEL);
+	if (!sf)
+		return -ENOMEM;
+
+	for (i = 0; i < num_formats; i++) {
+		v4l2_subdev_call(bcap_dev->sd, video,
+				enum_mbus_fmt, i, &code);
+		for (j = 0; j < BCAP_MAX_FMTS; j++)
+			if (code == bcap_formats[j].mbus_code)
+				break;
+		if (j == BCAP_MAX_FMTS) {
+			/* we don't allow this sensor working with our bridge */
+			kfree(sf);
+			return -EINVAL;
+		}
+		sf[i] = bcap_formats[j];
+	}
+	bcap_dev->sensor_formats = sf;
+	bcap_dev->num_sensor_formats = num_formats;
+	return 0;
+}
+
+static void bcap_free_sensor_formats(struct bcap_device *bcap_dev)
+{
+	bcap_dev->num_sensor_formats = 0;
+	kfree(bcap_dev->sensor_formats);
+	bcap_dev->sensor_formats = NULL;
+}
+
+static int bcap_open(struct file *file)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	struct video_device *vfd = bcap_dev->video_dev;
+	struct bcap_fh *bcap_fh;
+
+	if (!bcap_dev->sd) {
+		v4l2_err(&bcap_dev->v4l2_dev, "No sub device registered\n");
+		return -ENODEV;
+	}
+
+	bcap_fh = kzalloc(sizeof(*bcap_fh), GFP_KERNEL);
+	if (!bcap_fh) {
+		v4l2_err(&bcap_dev->v4l2_dev,
+			 "unable to allocate memory for file handle object\n");
+		return -ENOMEM;
+	}
+
+	v4l2_fh_init(&bcap_fh->fh, vfd);
+
+	/* store pointer to v4l2_fh in private_data member of file */
+	file->private_data = &bcap_fh->fh;
+	v4l2_fh_add(&bcap_fh->fh);
+	bcap_fh->io_allowed = false;
+	return 0;
+}
+
+static int bcap_release(struct file *file)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	struct v4l2_fh *fh = file->private_data;
+	struct bcap_fh *bcap_fh = container_of(fh, struct bcap_fh, fh);
+
+	/* if this instance is doing IO */
+	if (bcap_fh->io_allowed)
+		vb2_queue_release(&bcap_dev->buffer_queue);
+
+	file->private_data = NULL;
+	v4l2_fh_del(&bcap_fh->fh);
+	v4l2_fh_exit(&bcap_fh->fh);
+	kfree(bcap_fh);
+	return 0;
+}
+
+static int bcap_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	int ret;
+
+	if (mutex_lock_interruptible(&bcap_dev->mutex))
+		return -ERESTARTSYS;
+	ret = vb2_mmap(&bcap_dev->buffer_queue, vma);
+	mutex_unlock(&bcap_dev->mutex);
+	return ret;
+}
+
+#ifndef CONFIG_MMU
+static unsigned long bcap_get_unmapped_area(struct file *file,
+					    unsigned long addr,
+					    unsigned long len,
+					    unsigned long pgoff,
+					    unsigned long flags)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+
+	return vb2_get_unmapped_area(&bcap_dev->buffer_queue,
+				     addr,
+				     len,
+				     pgoff,
+				     flags);
+}
+#endif
+
+static unsigned int bcap_poll(struct file *file, poll_table *wait)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	unsigned int res;
+
+	mutex_lock(&bcap_dev->mutex);
+	res = vb2_poll(&bcap_dev->buffer_queue, file, wait);
+	mutex_unlock(&bcap_dev->mutex);
+	return res;
+}
+
+static int bcap_queue_setup(struct vb2_queue *vq,
+				const struct v4l2_format *fmt,
+				unsigned int *nbuffers, unsigned int *nplanes,
+				unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
+
+	if (*nbuffers < BCAP_MIN_NUM_BUF)
+		*nbuffers = BCAP_MIN_NUM_BUF;
+
+	*nplanes = 1;
+	sizes[0] = bcap_dev->fmt.sizeimage;
+	alloc_ctxs[0] = bcap_dev->alloc_ctx;
+
+	return 0;
+}
+
+static int bcap_buffer_init(struct vb2_buffer *vb)
+{
+	struct bcap_buffer *buf = to_bcap_vb(vb);
+
+	INIT_LIST_HEAD(&buf->list);
+	return 0;
+}
+
+static int bcap_buffer_prepare(struct vb2_buffer *vb)
+{
+	struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
+	struct bcap_buffer *buf = to_bcap_vb(vb);
+	unsigned long size;
+
+	size = bcap_dev->fmt.sizeimage;
+	if (vb2_plane_size(vb, 0) < size) {
+		v4l2_err(&bcap_dev->v4l2_dev, "buffer too small (%lu < %lu)\n",
+				vb2_plane_size(vb, 0), size);
+		return -EINVAL;
+	}
+	vb2_set_plane_payload(&buf->vb, 0, size);
+
+	return 0;
+}
+
+static void bcap_buffer_queue(struct vb2_buffer *vb)
+{
+	struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
+	struct bcap_buffer *buf = to_bcap_vb(vb);
+	unsigned long flags;
+
+	spin_lock_irqsave(&bcap_dev->lock, flags);
+	list_add_tail(&buf->list, &bcap_dev->dma_queue);
+	spin_unlock_irqrestore(&bcap_dev->lock, flags);
+}
+
+static void bcap_buffer_cleanup(struct vb2_buffer *vb)
+{
+	struct bcap_device *bcap_dev = vb2_get_drv_priv(vb->vb2_queue);
+	struct bcap_buffer *buf = to_bcap_vb(vb);
+	unsigned long flags;
+
+	spin_lock_irqsave(&bcap_dev->lock, flags);
+	list_del_init(&buf->list);
+	spin_unlock_irqrestore(&bcap_dev->lock, flags);
+}
+
+static void bcap_lock(struct vb2_queue *vq)
+{
+	struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
+	mutex_lock(&bcap_dev->mutex);
+}
+
+static void bcap_unlock(struct vb2_queue *vq)
+{
+	struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
+	mutex_unlock(&bcap_dev->mutex);
+}
+
+static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
+	struct ppi_if *ppi = bcap_dev->ppi;
+	struct ppi_params params;
+	int ret;
+
+	/* enable streamon on the sub device */
+	ret = v4l2_subdev_call(bcap_dev->sd, video, s_stream, 1);
+	if (ret && (ret != -ENOIOCTLCMD)) {
+		v4l2_err(&bcap_dev->v4l2_dev, "stream on failed in subdev\n");
+		return ret;
+	}
+
+	/* set ppi params */
+	params.width = bcap_dev->fmt.width;
+	params.height = bcap_dev->fmt.height;
+	params.bpp = bcap_dev->bpp;
+	params.ppi_control = bcap_dev->cfg->ppi_control;
+	params.int_mask = bcap_dev->cfg->int_mask;
+	params.blank_clocks = bcap_dev->cfg->blank_clocks;
+	ret = ppi->ops->set_params(ppi, &params);
+	if (ret < 0) {
+		v4l2_err(&bcap_dev->v4l2_dev,
+				"Error in setting ppi params\n");
+		return ret;
+	}
+
+	/* attach ppi DMA irq handler */
+	ret = ppi->ops->attach_irq(ppi, bcap_isr);
+	if (ret < 0) {
+		v4l2_err(&bcap_dev->v4l2_dev,
+				"Error in attaching interrupt handler\n");
+		return ret;
+	}
+
+	INIT_COMPLETION(bcap_dev->comp);
+	bcap_dev->stop = false;
+	return 0;
+}
+
+static int bcap_stop_streaming(struct vb2_queue *vq)
+{
+	struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
+	struct ppi_if *ppi = bcap_dev->ppi;
+	int ret;
+
+	if (!vb2_is_streaming(vq))
+		return 0;
+
+	bcap_dev->stop = true;
+	wait_for_completion(&bcap_dev->comp);
+	ppi->ops->stop(ppi);
+	ppi->ops->detach_irq(ppi);
+	ret = v4l2_subdev_call(bcap_dev->sd, video, s_stream, 0);
+	if (ret && (ret != -ENOIOCTLCMD))
+		v4l2_err(&bcap_dev->v4l2_dev,
+				"stream off failed in subdev\n");
+
+	/* release all active buffers */
+	while (!list_empty(&bcap_dev->dma_queue)) {
+		bcap_dev->next_frm = list_entry(bcap_dev->dma_queue.next,
+						struct bcap_buffer, list);
+		list_del(&bcap_dev->next_frm->list);
+		vb2_buffer_done(&bcap_dev->next_frm->vb, VB2_BUF_STATE_ERROR);
+	}
+	return 0;
+}
+
+static struct vb2_ops bcap_video_qops = {
+	.queue_setup            = bcap_queue_setup,
+	.buf_init               = bcap_buffer_init,
+	.buf_prepare            = bcap_buffer_prepare,
+	.buf_cleanup            = bcap_buffer_cleanup,
+	.buf_queue              = bcap_buffer_queue,
+	.wait_prepare           = bcap_unlock,
+	.wait_finish            = bcap_lock,
+	.start_streaming        = bcap_start_streaming,
+	.stop_streaming         = bcap_stop_streaming,
+};
+
+static int bcap_reqbufs(struct file *file, void *priv,
+			struct v4l2_requestbuffers *req_buf)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	struct vb2_queue *vq = &bcap_dev->buffer_queue;
+	struct v4l2_fh *fh = file->private_data;
+	struct bcap_fh *bcap_fh = container_of(fh, struct bcap_fh, fh);
+
+	if (vb2_is_busy(vq))
+		return -EBUSY;
+
+	bcap_fh->io_allowed = true;
+
+	return vb2_reqbufs(vq, req_buf);
+}
+
+static int bcap_querybuf(struct file *file, void *priv,
+				struct v4l2_buffer *buf)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+
+	return vb2_querybuf(&bcap_dev->buffer_queue, buf);
+}
+
+static int bcap_qbuf(struct file *file, void *priv,
+			struct v4l2_buffer *buf)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	struct v4l2_fh *fh = file->private_data;
+	struct bcap_fh *bcap_fh = container_of(fh, struct bcap_fh, fh);
+
+	if (!bcap_fh->io_allowed)
+		return -EBUSY;
+
+	return vb2_qbuf(&bcap_dev->buffer_queue, buf);
+}
+
+static int bcap_dqbuf(struct file *file, void *priv,
+			struct v4l2_buffer *buf)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	struct v4l2_fh *fh = file->private_data;
+	struct bcap_fh *bcap_fh = container_of(fh, struct bcap_fh, fh);
+
+	if (!bcap_fh->io_allowed)
+		return -EBUSY;
+
+	return vb2_dqbuf(&bcap_dev->buffer_queue,
+				buf, file->f_flags & O_NONBLOCK);
+}
+
+static irqreturn_t bcap_isr(int irq, void *dev_id)
+{
+	struct ppi_if *ppi = dev_id;
+	struct bcap_device *bcap_dev = ppi->priv;
+	struct timeval timevalue;
+	struct vb2_buffer *vb = &bcap_dev->cur_frm->vb;
+	dma_addr_t addr;
+
+	spin_lock(&bcap_dev->lock);
+
+	if (bcap_dev->cur_frm != bcap_dev->next_frm) {
+		do_gettimeofday(&timevalue);
+		vb->v4l2_buf.timestamp = timevalue;
+		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+		bcap_dev->cur_frm = bcap_dev->next_frm;
+	}
+
+	ppi->ops->stop(ppi);
+
+	if (bcap_dev->stop) {
+		complete(&bcap_dev->comp);
+	} else {
+		if (!list_empty(&bcap_dev->dma_queue)) {
+			bcap_dev->next_frm = list_entry(bcap_dev->dma_queue.next,
+						struct bcap_buffer, list);
+			list_del(&bcap_dev->next_frm->list);
+			addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->next_frm->vb, 0);
+			ppi->ops->update_addr(ppi, (unsigned long)addr);
+		}
+		ppi->ops->start(ppi);
+	}
+
+	spin_unlock(&bcap_dev->lock);
+
+	return IRQ_HANDLED;
+}
+
+static int bcap_streamon(struct file *file, void *priv,
+				enum v4l2_buf_type buf_type)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	struct bcap_fh *fh = file->private_data;
+	struct ppi_if *ppi = bcap_dev->ppi;
+	dma_addr_t addr;
+	int ret;
+
+	if (!fh->io_allowed)
+		return -EBUSY;
+
+	/* call streamon to start streaming in videobuf */
+	ret = vb2_streamon(&bcap_dev->buffer_queue, buf_type);
+	if (ret)
+		return ret;
+
+	/* if dma queue is empty, return error */
+	if (list_empty(&bcap_dev->dma_queue)) {
+		v4l2_err(&bcap_dev->v4l2_dev, "dma queue is empty\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* get the next frame from the dma queue */
+	bcap_dev->next_frm = list_entry(bcap_dev->dma_queue.next,
+					struct bcap_buffer, list);
+	bcap_dev->cur_frm = bcap_dev->next_frm;
+	/* remove buffer from the dma queue */
+	list_del(&bcap_dev->cur_frm->list);
+	addr = vb2_dma_contig_plane_dma_addr(&bcap_dev->cur_frm->vb, 0);
+	/* update DMA address */
+	ppi->ops->update_addr(ppi, (unsigned long)addr);
+	/* enable ppi */
+	ppi->ops->start(ppi);
+
+	return 0;
+err:
+	vb2_streamoff(&bcap_dev->buffer_queue, buf_type);
+	return ret;
+}
+
+static int bcap_streamoff(struct file *file, void *priv,
+				enum v4l2_buf_type buf_type)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	struct bcap_fh *fh = file->private_data;
+
+	if (!fh->io_allowed)
+		return -EBUSY;
+
+	return vb2_streamoff(&bcap_dev->buffer_queue, buf_type);
+}
+
+static int bcap_querystd(struct file *file, void *priv, v4l2_std_id *std)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+
+	return v4l2_subdev_call(bcap_dev->sd, video, querystd, std);
+}
+
+static int bcap_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+
+	*std = bcap_dev->std;
+	return 0;
+}
+
+static int bcap_s_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	int ret;
+
+	if (vb2_is_busy(&bcap_dev->buffer_queue))
+		return -EBUSY;
+
+	ret = v4l2_subdev_call(bcap_dev->sd, core, s_std, *std);
+	if (ret < 0)
+		return ret;
+
+	bcap_dev->std = *std;
+	return 0;
+}
+
+static int bcap_enum_input(struct file *file, void *priv,
+				struct v4l2_input *input)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	struct bfin_capture_config *config = bcap_dev->cfg;
+	int ret;
+	u32 status;
+
+	if (input->index >= config->num_inputs)
+		return -EINVAL;
+
+	*input = config->inputs[input->index];
+	/* get input status */
+	ret = v4l2_subdev_call(bcap_dev->sd, video, g_input_status, &status);
+	if (!ret)
+		input->status = status;
+	return 0;
+}
+
+static int bcap_g_input(struct file *file, void *priv, unsigned int *index)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+
+	*index = bcap_dev->cur_input;
+	return 0;
+}
+
+static int bcap_s_input(struct file *file, void *priv, unsigned int index)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	struct bfin_capture_config *config = bcap_dev->cfg;
+	struct bcap_route *route;
+	int ret;
+
+	if (vb2_is_busy(&bcap_dev->buffer_queue))
+		return -EBUSY;
+
+	if (index >= config->num_inputs)
+		return -EINVAL;
+
+	route = &config->routes[index];
+	ret = v4l2_subdev_call(bcap_dev->sd, video, s_routing,
+				route->input, route->output, 0);
+	if ((ret < 0) && (ret != -ENOIOCTLCMD)) {
+		v4l2_err(&bcap_dev->v4l2_dev, "Failed to set input\n");
+		return ret;
+	}
+	bcap_dev->cur_input = index;
+	return 0;
+}
+
+static int bcap_try_format(struct bcap_device *bcap,
+				struct v4l2_pix_format *pixfmt,
+				enum v4l2_mbus_pixelcode *mbus_code,
+				int *bpp)
+{
+	struct bcap_format *sf = bcap->sensor_formats;
+	struct bcap_format *fmt = NULL;
+	struct v4l2_mbus_framefmt mbus_fmt;
+	int ret, i;
+
+	for (i = 0; i < bcap->num_sensor_formats; i++) {
+		fmt = &sf[i];
+		if (pixfmt->pixelformat == fmt->pixelformat)
+			break;
+	}
+	if (i == bcap->num_sensor_formats)
+		fmt = &sf[0];
+
+	if (mbus_code)
+		*mbus_code = fmt->mbus_code;
+	if (bpp)
+		*bpp = fmt->bpp;
+	v4l2_fill_mbus_format(&mbus_fmt, pixfmt, fmt->mbus_code);
+	ret = v4l2_subdev_call(bcap->sd, video,
+				try_mbus_fmt, &mbus_fmt);
+	if (ret < 0)
+		return ret;
+	v4l2_fill_pix_format(pixfmt, &mbus_fmt);
+	pixfmt->bytesperline = pixfmt->width * fmt->bpp / 8;
+	pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+	return 0;
+}
+
+static int bcap_enum_fmt_vid_cap(struct file *file, void  *priv,
+					struct v4l2_fmtdesc *fmt)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	struct bcap_format *sf = bcap_dev->sensor_formats;
+
+	if (fmt->index >= bcap_dev->num_sensor_formats)
+		return -EINVAL;
+
+	fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	strlcpy(fmt->description,
+		sf[fmt->index].desc,
+		sizeof(fmt->description));
+	fmt->pixelformat = sf[fmt->index].pixelformat;
+	return 0;
+}
+
+static int bcap_try_fmt_vid_cap(struct file *file, void *priv,
+					struct v4l2_format *fmt)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+
+	return bcap_try_format(bcap_dev, pixfmt, NULL, NULL);
+}
+
+static int bcap_g_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *fmt)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+
+	fmt->fmt.pix = bcap_dev->fmt;
+	return 0;
+}
+
+static int bcap_s_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *fmt)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	struct v4l2_mbus_framefmt mbus_fmt;
+	enum v4l2_mbus_pixelcode mbus_code;
+	struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+	int ret, bpp;
+
+	if (vb2_is_busy(&bcap_dev->buffer_queue))
+		return -EBUSY;
+
+	/* see if format works */
+	ret = bcap_try_format(bcap_dev, pixfmt, &mbus_code, &bpp);
+	if (ret < 0)
+		return ret;
+
+	v4l2_fill_mbus_format(&mbus_fmt, pixfmt, mbus_code);
+	ret = v4l2_subdev_call(bcap_dev->sd, video, s_mbus_fmt, &mbus_fmt);
+	if (ret < 0)
+		return ret;
+	bcap_dev->fmt = *pixfmt;
+	bcap_dev->bpp = bpp;
+	return 0;
+}
+
+static int bcap_querycap(struct file *file, void  *priv,
+				struct v4l2_capability *cap)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+	strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
+	strlcpy(cap->bus_info, "Blackfin Platform", sizeof(cap->bus_info));
+	strlcpy(cap->card, bcap_dev->cfg->card_name, sizeof(cap->card));
+	return 0;
+}
+
+static int bcap_g_parm(struct file *file, void *fh,
+				struct v4l2_streamparm *a)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+
+	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+	return v4l2_subdev_call(bcap_dev->sd, video, g_parm, a);
+}
+
+static int bcap_s_parm(struct file *file, void *fh,
+				struct v4l2_streamparm *a)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+
+	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+	return v4l2_subdev_call(bcap_dev->sd, video, s_parm, a);
+}
+
+static int bcap_g_chip_ident(struct file *file, void *priv,
+		struct v4l2_dbg_chip_ident *chip)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+
+	chip->ident = V4L2_IDENT_NONE;
+	chip->revision = 0;
+	if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+			chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+		return -EINVAL;
+
+	return v4l2_subdev_call(bcap_dev->sd, core,
+			g_chip_ident, chip);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int bcap_dbg_g_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+
+	return v4l2_subdev_call(bcap_dev->sd, core,
+			g_register, reg);
+}
+
+static int bcap_dbg_s_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+
+	return v4l2_subdev_call(bcap_dev->sd, core,
+			s_register, reg);
+}
+#endif
+
+static int bcap_log_status(struct file *file, void *priv)
+{
+	struct bcap_device *bcap_dev = video_drvdata(file);
+	/* status for sub devices */
+	v4l2_device_call_all(&bcap_dev->v4l2_dev, 0, core, log_status);
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops bcap_ioctl_ops = {
+	.vidioc_querycap         = bcap_querycap,
+	.vidioc_g_fmt_vid_cap    = bcap_g_fmt_vid_cap,
+	.vidioc_enum_fmt_vid_cap = bcap_enum_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap    = bcap_s_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap  = bcap_try_fmt_vid_cap,
+	.vidioc_enum_input       = bcap_enum_input,
+	.vidioc_g_input          = bcap_g_input,
+	.vidioc_s_input          = bcap_s_input,
+	.vidioc_querystd         = bcap_querystd,
+	.vidioc_s_std            = bcap_s_std,
+	.vidioc_g_std            = bcap_g_std,
+	.vidioc_reqbufs          = bcap_reqbufs,
+	.vidioc_querybuf         = bcap_querybuf,
+	.vidioc_qbuf             = bcap_qbuf,
+	.vidioc_dqbuf            = bcap_dqbuf,
+	.vidioc_streamon         = bcap_streamon,
+	.vidioc_streamoff        = bcap_streamoff,
+	.vidioc_g_parm           = bcap_g_parm,
+	.vidioc_s_parm           = bcap_s_parm,
+	.vidioc_g_chip_ident     = bcap_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.vidioc_g_register       = bcap_dbg_g_register,
+	.vidioc_s_register       = bcap_dbg_s_register,
+#endif
+	.vidioc_log_status       = bcap_log_status,
+};
+
+static struct v4l2_file_operations bcap_fops = {
+	.owner = THIS_MODULE,
+	.open = bcap_open,
+	.release = bcap_release,
+	.unlocked_ioctl = video_ioctl2,
+	.mmap = bcap_mmap,
+#ifndef CONFIG_MMU
+	.get_unmapped_area = bcap_get_unmapped_area,
+#endif
+	.poll = bcap_poll
+};
+
+static int __devinit bcap_probe(struct platform_device *pdev)
+{
+	struct bcap_device *bcap_dev;
+	struct video_device *vfd;
+	struct i2c_adapter *i2c_adap;
+	struct bfin_capture_config *config;
+	struct vb2_queue *q;
+	int ret;
+
+	config = pdev->dev.platform_data;
+	if (!config) {
+		v4l2_err(pdev->dev.driver, "Unable to get board config\n");
+		return -ENODEV;
+	}
+
+	bcap_dev = kzalloc(sizeof(*bcap_dev), GFP_KERNEL);
+	if (!bcap_dev) {
+		v4l2_err(pdev->dev.driver, "Unable to alloc bcap_dev\n");
+		return -ENOMEM;
+	}
+
+	bcap_dev->cfg = config;
+
+	bcap_dev->ppi = ppi_create_instance(config->ppi_info);
+	if (!bcap_dev->ppi) {
+		v4l2_err(pdev->dev.driver, "Unable to create ppi\n");
+		ret = -ENODEV;
+		goto err_free_dev;
+	}
+	bcap_dev->ppi->priv = bcap_dev;
+
+	bcap_dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+	if (IS_ERR(bcap_dev->alloc_ctx)) {
+		ret = PTR_ERR(bcap_dev->alloc_ctx);
+		goto err_free_ppi;
+	}
+
+	vfd = video_device_alloc();
+	if (!vfd) {
+		ret = -ENOMEM;
+		v4l2_err(pdev->dev.driver, "Unable to alloc video device\n");
+		goto err_cleanup_ctx;
+	}
+
+	/* initialize field of video device */
+	vfd->release            = video_device_release;
+	vfd->fops               = &bcap_fops;
+	vfd->ioctl_ops          = &bcap_ioctl_ops;
+	vfd->tvnorms            = 0;
+	vfd->v4l2_dev           = &bcap_dev->v4l2_dev;
+	set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
+	strncpy(vfd->name, CAPTURE_DRV_NAME, sizeof(vfd->name));
+	bcap_dev->video_dev     = vfd;
+
+	ret = v4l2_device_register(&pdev->dev, &bcap_dev->v4l2_dev);
+	if (ret) {
+		v4l2_err(pdev->dev.driver,
+				"Unable to register v4l2 device\n");
+		goto err_release_vdev;
+	}
+	v4l2_info(&bcap_dev->v4l2_dev, "v4l2 device registered\n");
+
+	bcap_dev->v4l2_dev.ctrl_handler = &bcap_dev->ctrl_handler;
+	ret = v4l2_ctrl_handler_init(&bcap_dev->ctrl_handler, 0);
+	if (ret) {
+		v4l2_err(&bcap_dev->v4l2_dev,
+				"Unable to init control handler\n");
+		goto err_unreg_v4l2;
+	}
+
+	spin_lock_init(&bcap_dev->lock);
+	/* initialize queue */
+	q = &bcap_dev->buffer_queue;
+	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	q->io_modes = VB2_MMAP;
+	q->drv_priv = bcap_dev;
+	q->buf_struct_size = sizeof(struct bcap_buffer);
+	q->ops = &bcap_video_qops;
+	q->mem_ops = &vb2_dma_contig_memops;
+
+	vb2_queue_init(q);
+
+	mutex_init(&bcap_dev->mutex);
+	init_completion(&bcap_dev->comp);
+
+	/* init video dma queues */
+	INIT_LIST_HEAD(&bcap_dev->dma_queue);
+
+	vfd->lock = &bcap_dev->mutex;
+
+	/* register video device */
+	ret = video_register_device(bcap_dev->video_dev, VFL_TYPE_GRABBER, -1);
+	if (ret) {
+		v4l2_err(&bcap_dev->v4l2_dev,
+				"Unable to register video device\n");
+		goto err_free_handler;
+	}
+	video_set_drvdata(bcap_dev->video_dev, bcap_dev);
+	v4l2_info(&bcap_dev->v4l2_dev, "video device registered as: %s\n",
+			video_device_node_name(vfd));
+
+	/* load up the subdevice */
+	i2c_adap = i2c_get_adapter(config->i2c_adapter_id);
+	if (!i2c_adap) {
+		v4l2_err(&bcap_dev->v4l2_dev,
+				"Unable to find i2c adapter\n");
+		goto err_unreg_vdev;
+
+	}
+	bcap_dev->sd = v4l2_i2c_new_subdev_board(&bcap_dev->v4l2_dev,
+						 i2c_adap,
+						 &config->board_info,
+						 NULL);
+	if (bcap_dev->sd) {
+		int i;
+		/* update tvnorms from the sub devices */
+		for (i = 0; i < config->num_inputs; i++)
+			vfd->tvnorms |= config->inputs[i].std;
+	} else {
+		v4l2_err(&bcap_dev->v4l2_dev,
+				"Unable to register sub device\n");
+		goto err_unreg_vdev;
+	}
+
+	v4l2_info(&bcap_dev->v4l2_dev, "v4l2 sub device registered\n");
+
+	/* now we can probe the default state */
+	if (vfd->tvnorms) {
+		v4l2_std_id std;
+		ret = v4l2_subdev_call(bcap_dev->sd, core, g_std, &std);
+		if (ret) {
+			v4l2_err(&bcap_dev->v4l2_dev,
+					"Unable to get std\n");
+			goto err_unreg_vdev;
+		}
+		bcap_dev->std = std;
+	}
+	ret = bcap_init_sensor_formats(bcap_dev);
+	if (ret) {
+		v4l2_err(&bcap_dev->v4l2_dev,
+				"Unable to create sensor formats table\n");
+		goto err_unreg_vdev;
+	}
+	return 0;
+err_unreg_vdev:
+	video_unregister_device(bcap_dev->video_dev);
+	bcap_dev->video_dev = NULL;
+err_free_handler:
+	v4l2_ctrl_handler_free(&bcap_dev->ctrl_handler);
+err_unreg_v4l2:
+	v4l2_device_unregister(&bcap_dev->v4l2_dev);
+err_release_vdev:
+	if (bcap_dev->video_dev)
+		video_device_release(bcap_dev->video_dev);
+err_cleanup_ctx:
+	vb2_dma_contig_cleanup_ctx(bcap_dev->alloc_ctx);
+err_free_ppi:
+	ppi_delete_instance(bcap_dev->ppi);
+err_free_dev:
+	kfree(bcap_dev);
+	return ret;
+}
+
+static int __devexit bcap_remove(struct platform_device *pdev)
+{
+	struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+	struct bcap_device *bcap_dev = container_of(v4l2_dev,
+						struct bcap_device, v4l2_dev);
+
+	bcap_free_sensor_formats(bcap_dev);
+	video_unregister_device(bcap_dev->video_dev);
+	v4l2_ctrl_handler_free(&bcap_dev->ctrl_handler);
+	v4l2_device_unregister(v4l2_dev);
+	vb2_dma_contig_cleanup_ctx(bcap_dev->alloc_ctx);
+	ppi_delete_instance(bcap_dev->ppi);
+	kfree(bcap_dev);
+	return 0;
+}
+
+static struct platform_driver bcap_driver = {
+	.driver = {
+		.name  = CAPTURE_DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = bcap_probe,
+	.remove = __devexit_p(bcap_remove),
+};
+
+static __init int bcap_init(void)
+{
+	return platform_driver_register(&bcap_driver);
+}
+
+static __exit void bcap_exit(void)
+{
+	platform_driver_unregister(&bcap_driver);
+}
+
+module_init(bcap_init);
+module_exit(bcap_exit);
+
+MODULE_DESCRIPTION("Analog Devices blackfin video capture driver");
+MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/blackfin/ppi.c b/drivers/media/platform/blackfin/ppi.c
new file mode 100644
index 0000000..d295921
--- /dev/null
+++ b/drivers/media/platform/blackfin/ppi.c
@@ -0,0 +1,271 @@
+/*
+ * ppi.c Analog Devices Parallel Peripheral Interface driver
+ *
+ * Copyright (c) 2011 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/slab.h>
+
+#include <asm/bfin_ppi.h>
+#include <asm/blackfin.h>
+#include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+
+#include <media/blackfin/ppi.h>
+
+static int ppi_attach_irq(struct ppi_if *ppi, irq_handler_t handler);
+static void ppi_detach_irq(struct ppi_if *ppi);
+static int ppi_start(struct ppi_if *ppi);
+static int ppi_stop(struct ppi_if *ppi);
+static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params);
+static void ppi_update_addr(struct ppi_if *ppi, unsigned long addr);
+
+static const struct ppi_ops ppi_ops = {
+	.attach_irq = ppi_attach_irq,
+	.detach_irq = ppi_detach_irq,
+	.start = ppi_start,
+	.stop = ppi_stop,
+	.set_params = ppi_set_params,
+	.update_addr = ppi_update_addr,
+};
+
+static irqreturn_t ppi_irq_err(int irq, void *dev_id)
+{
+	struct ppi_if *ppi = dev_id;
+	const struct ppi_info *info = ppi->info;
+
+	switch (info->type) {
+	case PPI_TYPE_PPI:
+	{
+		struct bfin_ppi_regs *reg = info->base;
+		unsigned short status;
+
+		/* register on bf561 is cleared when read 
+		 * others are W1C
+		 */
+		status = bfin_read16(&reg->status);
+		bfin_write16(&reg->status, 0xff00);
+		break;
+	}
+	case PPI_TYPE_EPPI:
+	{
+		struct bfin_eppi_regs *reg = info->base;
+		bfin_write16(&reg->status, 0xffff);
+		break;
+	}
+	default:
+		break;
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int ppi_attach_irq(struct ppi_if *ppi, irq_handler_t handler)
+{
+	const struct ppi_info *info = ppi->info;
+	int ret;
+
+	ret = request_dma(info->dma_ch, "PPI_DMA");
+
+	if (ret) {
+		pr_err("Unable to allocate DMA channel for PPI\n");
+		return ret;
+	}
+	set_dma_callback(info->dma_ch, handler, ppi);
+
+	if (ppi->err_int) {
+		ret = request_irq(info->irq_err, ppi_irq_err, 0, "PPI ERROR", ppi);
+		if (ret) {
+			pr_err("Unable to allocate IRQ for PPI\n");
+			free_dma(info->dma_ch);
+		}
+	}
+	return ret;
+}
+
+static void ppi_detach_irq(struct ppi_if *ppi)
+{
+	const struct ppi_info *info = ppi->info;
+
+	if (ppi->err_int)
+		free_irq(info->irq_err, ppi);
+	free_dma(info->dma_ch);
+}
+
+static int ppi_start(struct ppi_if *ppi)
+{
+	const struct ppi_info *info = ppi->info;
+
+	/* enable DMA */
+	enable_dma(info->dma_ch);
+
+	/* enable PPI */
+	ppi->ppi_control |= PORT_EN;
+	switch (info->type) {
+	case PPI_TYPE_PPI:
+	{
+		struct bfin_ppi_regs *reg = info->base;
+		bfin_write16(&reg->control, ppi->ppi_control);
+		break;
+	}
+	case PPI_TYPE_EPPI:
+	{
+		struct bfin_eppi_regs *reg = info->base;
+		bfin_write32(&reg->control, ppi->ppi_control);
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+
+	SSYNC();
+	return 0;
+}
+
+static int ppi_stop(struct ppi_if *ppi)
+{
+	const struct ppi_info *info = ppi->info;
+
+	/* disable PPI */
+	ppi->ppi_control &= ~PORT_EN;
+	switch (info->type) {
+	case PPI_TYPE_PPI:
+	{
+		struct bfin_ppi_regs *reg = info->base;
+		bfin_write16(&reg->control, ppi->ppi_control);
+		break;
+	}
+	case PPI_TYPE_EPPI:
+	{
+		struct bfin_eppi_regs *reg = info->base;
+		bfin_write32(&reg->control, ppi->ppi_control);
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+
+	/* disable DMA */
+	clear_dma_irqstat(info->dma_ch);
+	disable_dma(info->dma_ch);
+
+	SSYNC();
+	return 0;
+}
+
+static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
+{
+	const struct ppi_info *info = ppi->info;
+	int dma32 = 0;
+	int dma_config, bytes_per_line, lines_per_frame;
+
+	bytes_per_line = params->width * params->bpp / 8;
+	lines_per_frame = params->height;
+	if (params->int_mask == 0xFFFFFFFF)
+		ppi->err_int = false;
+	else
+		ppi->err_int = true;
+
+	dma_config = (DMA_FLOW_STOP | WNR | RESTART | DMA2D | DI_EN);
+	ppi->ppi_control = params->ppi_control & ~PORT_EN;
+	switch (info->type) {
+	case PPI_TYPE_PPI:
+	{
+		struct bfin_ppi_regs *reg = info->base;
+
+		if (params->ppi_control & DMA32)
+			dma32 = 1;
+
+		bfin_write16(&reg->control, ppi->ppi_control);
+		bfin_write16(&reg->count, bytes_per_line - 1);
+		bfin_write16(&reg->frame, lines_per_frame);
+		break;
+	}
+	case PPI_TYPE_EPPI:
+	{
+		struct bfin_eppi_regs *reg = info->base;
+
+		if ((params->ppi_control & PACK_EN)
+			|| (params->ppi_control & 0x38000) > DLEN_16)
+			dma32 = 1;
+
+		bfin_write32(&reg->control, ppi->ppi_control);
+		bfin_write16(&reg->line, bytes_per_line + params->blank_clocks);
+		bfin_write16(&reg->frame, lines_per_frame);
+		bfin_write16(&reg->hdelay, 0);
+		bfin_write16(&reg->vdelay, 0);
+		bfin_write16(&reg->hcount, bytes_per_line);
+		bfin_write16(&reg->vcount, lines_per_frame);
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+
+	if (dma32) {
+		dma_config |= WDSIZE_32;
+		set_dma_x_count(info->dma_ch, bytes_per_line >> 2);
+		set_dma_x_modify(info->dma_ch, 4);
+		set_dma_y_modify(info->dma_ch, 4);
+	} else {
+		dma_config |= WDSIZE_16;
+		set_dma_x_count(info->dma_ch, bytes_per_line >> 1);
+		set_dma_x_modify(info->dma_ch, 2);
+		set_dma_y_modify(info->dma_ch, 2);
+	}
+	set_dma_y_count(info->dma_ch, lines_per_frame);
+	set_dma_config(info->dma_ch, dma_config);
+
+	SSYNC();
+	return 0;
+}
+
+static void ppi_update_addr(struct ppi_if *ppi, unsigned long addr)
+{
+	set_dma_start_addr(ppi->info->dma_ch, addr);
+}
+
+struct ppi_if *ppi_create_instance(const struct ppi_info *info)
+{
+	struct ppi_if *ppi;
+
+	if (!info || !info->pin_req)
+		return NULL;
+
+	if (peripheral_request_list(info->pin_req, KBUILD_MODNAME)) {
+		pr_err("request peripheral failed\n");
+		return NULL;
+	}
+
+	ppi = kzalloc(sizeof(*ppi), GFP_KERNEL);
+	if (!ppi) {
+		peripheral_free_list(info->pin_req);
+		pr_err("unable to allocate memory for ppi handle\n");
+		return NULL;
+	}
+	ppi->ops = &ppi_ops;
+	ppi->info = info;
+
+	pr_info("ppi probe success\n");
+	return ppi;
+}
+
+void ppi_delete_instance(struct ppi_if *ppi)
+{
+	peripheral_free_list(ppi->info->pin_req);
+	kfree(ppi);
+}
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
new file mode 100644
index 0000000..0d6e0a0
--- /dev/null
+++ b/drivers/media/platform/coda.c
@@ -0,0 +1,1849 @@
+/*
+ * Coda multi-standard codec IP
+ *
+ * Copyright (C) 2012 Vista Silicon S.L.
+ *    Javier Martin, <javier.martin@vista-silicon.com>
+ *    Xavier Duret
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/of.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "coda.h"
+
+#define CODA_NAME		"coda"
+
+#define CODA_MAX_INSTANCES	4
+
+#define CODA_FMO_BUF_SIZE	32
+#define CODADX6_WORK_BUF_SIZE	(288 * 1024 + CODA_FMO_BUF_SIZE * 8 * 1024)
+#define CODA7_WORK_BUF_SIZE	(512 * 1024 + CODA_FMO_BUF_SIZE * 8 * 1024)
+#define CODA_PARA_BUF_SIZE	(10 * 1024)
+#define CODA_ISRAM_SIZE	(2048 * 2)
+
+#define CODA_OUTPUT_BUFS	4
+#define CODA_CAPTURE_BUFS	2
+
+#define MAX_W		720
+#define MAX_H		576
+#define CODA_MAX_FRAME_SIZE	0x90000
+#define FMO_SLICE_SAVE_BUF_SIZE         (32)
+#define CODA_DEFAULT_GAMMA		4096
+
+#define MIN_W 176
+#define MIN_H 144
+#define MAX_W 720
+#define MAX_H 576
+
+#define S_ALIGN		1 /* multiple of 2 */
+#define W_ALIGN		1 /* multiple of 2 */
+#define H_ALIGN		1 /* multiple of 2 */
+
+#define fh_to_ctx(__fh)	container_of(__fh, struct coda_ctx, fh)
+
+static int coda_debug;
+module_param(coda_debug, int, 0);
+MODULE_PARM_DESC(coda_debug, "Debug level (0-1)");
+
+enum {
+	V4L2_M2M_SRC = 0,
+	V4L2_M2M_DST = 1,
+};
+
+enum coda_fmt_type {
+	CODA_FMT_ENC,
+	CODA_FMT_RAW,
+};
+
+enum coda_inst_type {
+	CODA_INST_ENCODER,
+	CODA_INST_DECODER,
+};
+
+enum coda_product {
+	CODA_DX6 = 0xf001,
+};
+
+struct coda_fmt {
+	char *name;
+	u32 fourcc;
+	enum coda_fmt_type type;
+};
+
+struct coda_devtype {
+	char			*firmware;
+	enum coda_product	product;
+	struct coda_fmt		*formats;
+	unsigned int		num_formats;
+	size_t			workbuf_size;
+};
+
+/* Per-queue, driver-specific private data */
+struct coda_q_data {
+	unsigned int		width;
+	unsigned int		height;
+	unsigned int		sizeimage;
+	struct coda_fmt	*fmt;
+};
+
+struct coda_aux_buf {
+	void			*vaddr;
+	dma_addr_t		paddr;
+	u32			size;
+};
+
+struct coda_dev {
+	struct v4l2_device	v4l2_dev;
+	struct video_device	vfd;
+	struct platform_device	*plat_dev;
+	struct coda_devtype	*devtype;
+
+	void __iomem		*regs_base;
+	struct clk		*clk_per;
+	struct clk		*clk_ahb;
+
+	struct coda_aux_buf	codebuf;
+	struct coda_aux_buf	workbuf;
+
+	spinlock_t		irqlock;
+	struct mutex		dev_mutex;
+	struct v4l2_m2m_dev	*m2m_dev;
+	struct vb2_alloc_ctx	*alloc_ctx;
+	int			instances;
+};
+
+struct coda_params {
+	u8			h264_intra_qp;
+	u8			h264_inter_qp;
+	u8			mpeg4_intra_qp;
+	u8			mpeg4_inter_qp;
+	u8			gop_size;
+	int			codec_mode;
+	enum v4l2_mpeg_video_multi_slice_mode slice_mode;
+	u32			framerate;
+	u16			bitrate;
+	u32			slice_max_mb;
+};
+
+struct coda_ctx {
+	struct coda_dev			*dev;
+	int				aborting;
+	int				rawstreamon;
+	int				compstreamon;
+	u32				isequence;
+	struct coda_q_data		q_data[2];
+	enum coda_inst_type		inst_type;
+	enum v4l2_colorspace		colorspace;
+	struct coda_params		params;
+	struct v4l2_m2m_ctx		*m2m_ctx;
+	struct v4l2_ctrl_handler	ctrls;
+	struct v4l2_fh			fh;
+	struct vb2_buffer		*reference;
+	int				gopcounter;
+	char				vpu_header[3][64];
+	int				vpu_header_size[3];
+	struct coda_aux_buf		parabuf;
+	int				idx;
+};
+
+static inline void coda_write(struct coda_dev *dev, u32 data, u32 reg)
+{
+	v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+		 "%s: data=0x%x, reg=0x%x\n", __func__, data, reg);
+	writel(data, dev->regs_base + reg);
+}
+
+static inline unsigned int coda_read(struct coda_dev *dev, u32 reg)
+{
+	u32 data;
+	data = readl(dev->regs_base + reg);
+	v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+		 "%s: data=0x%x, reg=0x%x\n", __func__, data, reg);
+	return data;
+}
+
+static inline unsigned long coda_isbusy(struct coda_dev *dev)
+{
+	return coda_read(dev, CODA_REG_BIT_BUSY);
+}
+
+static inline int coda_is_initialized(struct coda_dev *dev)
+{
+	return (coda_read(dev, CODA_REG_BIT_CUR_PC) != 0);
+}
+
+static int coda_wait_timeout(struct coda_dev *dev)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+	while (coda_isbusy(dev)) {
+		if (time_after(jiffies, timeout))
+			return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+static void coda_command_async(struct coda_ctx *ctx, int cmd)
+{
+	struct coda_dev *dev = ctx->dev;
+	coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
+
+	coda_write(dev, ctx->idx, CODA_REG_BIT_RUN_INDEX);
+	coda_write(dev, ctx->params.codec_mode, CODA_REG_BIT_RUN_COD_STD);
+	coda_write(dev, cmd, CODA_REG_BIT_RUN_COMMAND);
+}
+
+static int coda_command_sync(struct coda_ctx *ctx, int cmd)
+{
+	struct coda_dev *dev = ctx->dev;
+
+	coda_command_async(ctx, cmd);
+	return coda_wait_timeout(dev);
+}
+
+static struct coda_q_data *get_q_data(struct coda_ctx *ctx,
+					 enum v4l2_buf_type type)
+{
+	switch (type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+		return &(ctx->q_data[V4L2_M2M_SRC]);
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+		return &(ctx->q_data[V4L2_M2M_DST]);
+	default:
+		BUG();
+	}
+	return NULL;
+}
+
+/*
+ * Add one array of supported formats for each version of Coda:
+ *  i.MX27 -> codadx6
+ *  i.MX51 -> coda7
+ *  i.MX6  -> coda960
+ */
+static struct coda_fmt codadx6_formats[] = {
+	{
+		.name = "YUV 4:2:0 Planar",
+		.fourcc = V4L2_PIX_FMT_YUV420,
+		.type = CODA_FMT_RAW,
+	},
+	{
+		.name = "H264 Encoded Stream",
+		.fourcc = V4L2_PIX_FMT_H264,
+		.type = CODA_FMT_ENC,
+	},
+	{
+		.name = "MPEG4 Encoded Stream",
+		.fourcc = V4L2_PIX_FMT_MPEG4,
+		.type = CODA_FMT_ENC,
+	},
+};
+
+static struct coda_fmt *find_format(struct coda_dev *dev, struct v4l2_format *f)
+{
+	struct coda_fmt *formats = dev->devtype->formats;
+	int num_formats = dev->devtype->num_formats;
+	unsigned int k;
+
+	for (k = 0; k < num_formats; k++) {
+		if (formats[k].fourcc == f->fmt.pix.pixelformat)
+			break;
+	}
+
+	if (k == num_formats)
+		return NULL;
+
+	return &formats[k];
+}
+
+/*
+ * V4L2 ioctl() operations.
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+			   struct v4l2_capability *cap)
+{
+	strlcpy(cap->driver, CODA_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, CODA_NAME, sizeof(cap->card));
+	strlcpy(cap->bus_info, CODA_NAME, sizeof(cap->bus_info));
+	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+				| V4L2_CAP_STREAMING;
+	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+	return 0;
+}
+
+static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
+			enum coda_fmt_type type)
+{
+	struct coda_ctx *ctx = fh_to_ctx(priv);
+	struct coda_dev *dev = ctx->dev;
+	struct coda_fmt *formats = dev->devtype->formats;
+	struct coda_fmt *fmt;
+	int num_formats = dev->devtype->num_formats;
+	int i, num = 0;
+
+	for (i = 0; i < num_formats; i++) {
+		if (formats[i].type == type) {
+			if (num == f->index)
+				break;
+			++num;
+		}
+	}
+
+	if (i < num_formats) {
+		fmt = &formats[i];
+		strlcpy(f->description, fmt->name, sizeof(f->description));
+		f->pixelformat = fmt->fourcc;
+		return 0;
+	}
+
+	/* Format not found */
+	return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	return enum_fmt(priv, f, CODA_FMT_ENC);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	return enum_fmt(priv, f, CODA_FMT_RAW);
+}
+
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct vb2_queue *vq;
+	struct coda_q_data *q_data;
+	struct coda_ctx *ctx = fh_to_ctx(priv);
+
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	if (!vq)
+		return -EINVAL;
+
+	q_data = get_q_data(ctx, f->type);
+
+	f->fmt.pix.field	= V4L2_FIELD_NONE;
+	f->fmt.pix.pixelformat	= q_data->fmt->fourcc;
+	f->fmt.pix.width	= q_data->width;
+	f->fmt.pix.height	= q_data->height;
+	if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
+		f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 2);
+	else /* encoded formats h.264/mpeg4 */
+		f->fmt.pix.bytesperline = 0;
+
+	f->fmt.pix.sizeimage	= q_data->sizeimage;
+	f->fmt.pix.colorspace	= ctx->colorspace;
+
+	return 0;
+}
+
+static int vidioc_try_fmt(struct coda_dev *dev, struct v4l2_format *f)
+{
+	enum v4l2_field field;
+
+	field = f->fmt.pix.field;
+	if (field == V4L2_FIELD_ANY)
+		field = V4L2_FIELD_NONE;
+	else if (V4L2_FIELD_NONE != field)
+		return -EINVAL;
+
+	/* V4L2 specification suggests the driver corrects the format struct
+	 * if any of the dimensions is unsupported */
+	f->fmt.pix.field = field;
+
+	if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420) {
+		v4l_bound_align_image(&f->fmt.pix.width, MIN_W, MAX_W,
+				      W_ALIGN, &f->fmt.pix.height,
+				      MIN_H, MAX_H, H_ALIGN, S_ALIGN);
+		f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 2);
+		f->fmt.pix.sizeimage = f->fmt.pix.height *
+					f->fmt.pix.bytesperline;
+	} else { /*encoded formats h.264/mpeg4 */
+		f->fmt.pix.bytesperline = 0;
+		f->fmt.pix.sizeimage = CODA_MAX_FRAME_SIZE;
+	}
+
+	return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	int ret;
+	struct coda_fmt *fmt;
+	struct coda_ctx *ctx = fh_to_ctx(priv);
+
+	fmt = find_format(ctx->dev, f);
+	/*
+	 * Since decoding support is not implemented yet do not allow
+	 * CODA_FMT_RAW formats in the capture interface.
+	 */
+	if (!fmt || !(fmt->type == CODA_FMT_ENC))
+		f->fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
+
+	f->fmt.pix.colorspace = ctx->colorspace;
+
+	ret = vidioc_try_fmt(ctx->dev, f);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct coda_ctx *ctx = fh_to_ctx(priv);
+	struct coda_fmt *fmt;
+	int ret;
+
+	fmt = find_format(ctx->dev, f);
+	/*
+	 * Since decoding support is not implemented yet do not allow
+	 * CODA_FMT formats in the capture interface.
+	 */
+	if (!fmt || !(fmt->type == CODA_FMT_RAW))
+		f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
+
+	if (!f->fmt.pix.colorspace)
+		f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+
+	ret = vidioc_try_fmt(ctx->dev, f);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int vidioc_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
+{
+	struct coda_q_data *q_data;
+	struct vb2_queue *vq;
+	int ret;
+
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	if (!vq)
+		return -EINVAL;
+
+	q_data = get_q_data(ctx, f->type);
+	if (!q_data)
+		return -EINVAL;
+
+	if (vb2_is_busy(vq)) {
+		v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+		return -EBUSY;
+	}
+
+	ret = vidioc_try_fmt(ctx->dev, f);
+	if (ret)
+		return ret;
+
+	q_data->fmt = find_format(ctx->dev, f);
+	q_data->width = f->fmt.pix.width;
+	q_data->height = f->fmt.pix.height;
+	if (q_data->fmt->fourcc == V4L2_PIX_FMT_YUV420) {
+		q_data->sizeimage = q_data->width * q_data->height * 3 / 2;
+	} else { /* encoded format h.264/mpeg-4 */
+		q_data->sizeimage = CODA_MAX_FRAME_SIZE;
+	}
+
+	v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+		"Setting format for type %d, wxh: %dx%d, fmt: %d\n",
+		f->type, q_data->width, q_data->height, q_data->fmt->fourcc);
+
+	return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	int ret;
+
+	ret = vidioc_try_fmt_vid_cap(file, priv, f);
+	if (ret)
+		return ret;
+
+	return vidioc_s_fmt(fh_to_ctx(priv), f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct coda_ctx *ctx = fh_to_ctx(priv);
+	int ret;
+
+	ret = vidioc_try_fmt_vid_out(file, priv, f);
+	if (ret)
+		return ret;
+
+	ret = vidioc_s_fmt(fh_to_ctx(priv), f);
+	if (ret)
+		ctx->colorspace = f->fmt.pix.colorspace;
+
+	return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+			  struct v4l2_requestbuffers *reqbufs)
+{
+	struct coda_ctx *ctx = fh_to_ctx(priv);
+
+	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+			   struct v4l2_buffer *buf)
+{
+	struct coda_ctx *ctx = fh_to_ctx(priv);
+
+	return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct coda_ctx *ctx = fh_to_ctx(priv);
+
+	return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct coda_ctx *ctx = fh_to_ctx(priv);
+
+	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_streamon(struct file *file, void *priv,
+			   enum v4l2_buf_type type)
+{
+	struct coda_ctx *ctx = fh_to_ctx(priv);
+
+	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv,
+			    enum v4l2_buf_type type)
+{
+	struct coda_ctx *ctx = fh_to_ctx(priv);
+
+	return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops coda_ioctl_ops = {
+	.vidioc_querycap	= vidioc_querycap,
+
+	.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap	= vidioc_g_fmt,
+	.vidioc_try_fmt_vid_cap	= vidioc_try_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap	= vidioc_s_fmt_vid_cap,
+
+	.vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+	.vidioc_g_fmt_vid_out	= vidioc_g_fmt,
+	.vidioc_try_fmt_vid_out	= vidioc_try_fmt_vid_out,
+	.vidioc_s_fmt_vid_out	= vidioc_s_fmt_vid_out,
+
+	.vidioc_reqbufs		= vidioc_reqbufs,
+	.vidioc_querybuf	= vidioc_querybuf,
+
+	.vidioc_qbuf		= vidioc_qbuf,
+	.vidioc_dqbuf		= vidioc_dqbuf,
+
+	.vidioc_streamon	= vidioc_streamon,
+	.vidioc_streamoff	= vidioc_streamoff,
+};
+
+/*
+ * Mem-to-mem operations.
+ */
+static void coda_device_run(void *m2m_priv)
+{
+	struct coda_ctx *ctx = m2m_priv;
+	struct coda_q_data *q_data_src, *q_data_dst;
+	struct vb2_buffer *src_buf, *dst_buf;
+	struct coda_dev *dev = ctx->dev;
+	int force_ipicture;
+	int quant_param = 0;
+	u32 picture_y, picture_cb, picture_cr;
+	u32 pic_stream_buffer_addr, pic_stream_buffer_size;
+	u32 dst_fourcc;
+
+	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+	q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+	q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+	dst_fourcc = q_data_dst->fmt->fourcc;
+
+	src_buf->v4l2_buf.sequence = ctx->isequence;
+	dst_buf->v4l2_buf.sequence = ctx->isequence;
+	ctx->isequence++;
+
+	/*
+	 * Workaround coda firmware BUG that only marks the first
+	 * frame as IDR. This is a problem for some decoders that can't
+	 * recover when a frame is lost.
+	 */
+	if (src_buf->v4l2_buf.sequence % ctx->params.gop_size) {
+		src_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+		src_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+	} else {
+		src_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+		src_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
+	}
+
+	/*
+	 * Copy headers at the beginning of the first frame for H.264 only.
+	 * In MPEG4 they are already copied by the coda.
+	 */
+	if (src_buf->v4l2_buf.sequence == 0) {
+		pic_stream_buffer_addr =
+			vb2_dma_contig_plane_dma_addr(dst_buf, 0) +
+			ctx->vpu_header_size[0] +
+			ctx->vpu_header_size[1] +
+			ctx->vpu_header_size[2];
+		pic_stream_buffer_size = CODA_MAX_FRAME_SIZE -
+			ctx->vpu_header_size[0] -
+			ctx->vpu_header_size[1] -
+			ctx->vpu_header_size[2];
+		memcpy(vb2_plane_vaddr(dst_buf, 0),
+		       &ctx->vpu_header[0][0], ctx->vpu_header_size[0]);
+		memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0],
+		       &ctx->vpu_header[1][0], ctx->vpu_header_size[1]);
+		memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0] +
+			ctx->vpu_header_size[1], &ctx->vpu_header[2][0],
+			ctx->vpu_header_size[2]);
+	} else {
+		pic_stream_buffer_addr =
+			vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+		pic_stream_buffer_size = CODA_MAX_FRAME_SIZE;
+	}
+
+	if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) {
+		force_ipicture = 1;
+		switch (dst_fourcc) {
+		case V4L2_PIX_FMT_H264:
+			quant_param = ctx->params.h264_intra_qp;
+			break;
+		case V4L2_PIX_FMT_MPEG4:
+			quant_param = ctx->params.mpeg4_intra_qp;
+			break;
+		default:
+			v4l2_warn(&ctx->dev->v4l2_dev,
+				"cannot set intra qp, fmt not supported\n");
+			break;
+		}
+	} else {
+		force_ipicture = 0;
+		switch (dst_fourcc) {
+		case V4L2_PIX_FMT_H264:
+			quant_param = ctx->params.h264_inter_qp;
+			break;
+		case V4L2_PIX_FMT_MPEG4:
+			quant_param = ctx->params.mpeg4_inter_qp;
+			break;
+		default:
+			v4l2_warn(&ctx->dev->v4l2_dev,
+				"cannot set inter qp, fmt not supported\n");
+			break;
+		}
+	}
+
+	/* submit */
+	coda_write(dev, 0, CODA_CMD_ENC_PIC_ROT_MODE);
+	coda_write(dev, quant_param, CODA_CMD_ENC_PIC_QS);
+
+
+	picture_y = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+	picture_cb = picture_y + q_data_src->width * q_data_src->height;
+	picture_cr = picture_cb + q_data_src->width / 2 *
+			q_data_src->height / 2;
+
+	coda_write(dev, picture_y, CODA_CMD_ENC_PIC_SRC_ADDR_Y);
+	coda_write(dev, picture_cb, CODA_CMD_ENC_PIC_SRC_ADDR_CB);
+	coda_write(dev, picture_cr, CODA_CMD_ENC_PIC_SRC_ADDR_CR);
+	coda_write(dev, force_ipicture << 1 & 0x2,
+		   CODA_CMD_ENC_PIC_OPTION);
+
+	coda_write(dev, pic_stream_buffer_addr, CODA_CMD_ENC_PIC_BB_START);
+	coda_write(dev, pic_stream_buffer_size / 1024,
+		   CODA_CMD_ENC_PIC_BB_SIZE);
+	coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
+}
+
+static int coda_job_ready(void *m2m_priv)
+{
+	struct coda_ctx *ctx = m2m_priv;
+
+	/*
+	 * For both 'P' and 'key' frame cases 1 picture
+	 * and 1 frame are needed.
+	 */
+	if (!v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) ||
+		!v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)) {
+		v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+			 "not ready: not enough video buffers.\n");
+		return 0;
+	}
+
+	/* For P frames a reference picture is needed too */
+	if ((ctx->gopcounter != (ctx->params.gop_size - 1)) &&
+	   !ctx->reference) {
+		v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+			 "not ready: reference picture not available.\n");
+		return 0;
+	}
+
+	if (coda_isbusy(ctx->dev)) {
+		v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+			 "not ready: coda is still busy.\n");
+		return 0;
+	}
+
+	v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+			"job ready\n");
+	return 1;
+}
+
+static void coda_job_abort(void *priv)
+{
+	struct coda_ctx *ctx = priv;
+	struct coda_dev *dev = ctx->dev;
+
+	ctx->aborting = 1;
+
+	v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+		 "Aborting task\n");
+
+	v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
+}
+
+static void coda_lock(void *m2m_priv)
+{
+	struct coda_ctx *ctx = m2m_priv;
+	struct coda_dev *pcdev = ctx->dev;
+	mutex_lock(&pcdev->dev_mutex);
+}
+
+static void coda_unlock(void *m2m_priv)
+{
+	struct coda_ctx *ctx = m2m_priv;
+	struct coda_dev *pcdev = ctx->dev;
+	mutex_unlock(&pcdev->dev_mutex);
+}
+
+static struct v4l2_m2m_ops coda_m2m_ops = {
+	.device_run	= coda_device_run,
+	.job_ready	= coda_job_ready,
+	.job_abort	= coda_job_abort,
+	.lock		= coda_lock,
+	.unlock		= coda_unlock,
+};
+
+static void set_default_params(struct coda_ctx *ctx)
+{
+	struct coda_dev *dev = ctx->dev;
+
+	ctx->params.codec_mode = CODA_MODE_INVALID;
+	ctx->colorspace = V4L2_COLORSPACE_REC709;
+	ctx->params.framerate = 30;
+	ctx->reference = NULL;
+	ctx->aborting = 0;
+
+	/* Default formats for output and input queues */
+	ctx->q_data[V4L2_M2M_SRC].fmt = &dev->devtype->formats[0];
+	ctx->q_data[V4L2_M2M_DST].fmt = &dev->devtype->formats[1];
+	ctx->q_data[V4L2_M2M_SRC].width = MAX_W;
+	ctx->q_data[V4L2_M2M_SRC].height = MAX_H;
+	ctx->q_data[V4L2_M2M_SRC].sizeimage = (MAX_W * MAX_H * 3) / 2;
+	ctx->q_data[V4L2_M2M_DST].width = MAX_W;
+	ctx->q_data[V4L2_M2M_DST].height = MAX_H;
+	ctx->q_data[V4L2_M2M_DST].sizeimage = CODA_MAX_FRAME_SIZE;
+}
+
+/*
+ * Queue operations
+ */
+static int coda_queue_setup(struct vb2_queue *vq,
+				const struct v4l2_format *fmt,
+				unsigned int *nbuffers, unsigned int *nplanes,
+				unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct coda_ctx *ctx = vb2_get_drv_priv(vq);
+	unsigned int size;
+
+	if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		*nbuffers = CODA_OUTPUT_BUFS;
+		if (fmt)
+			size = fmt->fmt.pix.width *
+				fmt->fmt.pix.height * 3 / 2;
+		else
+			size = MAX_W *
+				MAX_H * 3 / 2;
+	} else {
+		*nbuffers = CODA_CAPTURE_BUFS;
+		size = CODA_MAX_FRAME_SIZE;
+	}
+
+	*nplanes = 1;
+	sizes[0] = size;
+
+	alloc_ctxs[0] = ctx->dev->alloc_ctx;
+
+	v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+		 "get %d buffer(s) of size %d each.\n", *nbuffers, size);
+
+	return 0;
+}
+
+static int coda_buf_prepare(struct vb2_buffer *vb)
+{
+	struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct coda_q_data *q_data;
+
+	q_data = get_q_data(ctx, vb->vb2_queue->type);
+
+	if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+		v4l2_warn(&ctx->dev->v4l2_dev,
+			  "%s data will not fit into plane (%lu < %lu)\n",
+			  __func__, vb2_plane_size(vb, 0),
+			  (long)q_data->sizeimage);
+		return -EINVAL;
+	}
+
+	vb2_set_plane_payload(vb, 0, q_data->sizeimage);
+
+	return 0;
+}
+
+static void coda_buf_queue(struct vb2_buffer *vb)
+{
+	struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static void coda_wait_prepare(struct vb2_queue *q)
+{
+	struct coda_ctx *ctx = vb2_get_drv_priv(q);
+	coda_unlock(ctx);
+}
+
+static void coda_wait_finish(struct vb2_queue *q)
+{
+	struct coda_ctx *ctx = vb2_get_drv_priv(q);
+	coda_lock(ctx);
+}
+
+static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct coda_ctx *ctx = vb2_get_drv_priv(q);
+	struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev;
+	u32 bitstream_buf, bitstream_size;
+	struct coda_dev *dev = ctx->dev;
+	struct coda_q_data *q_data_src, *q_data_dst;
+	u32 dst_fourcc;
+	struct vb2_buffer *buf;
+	struct vb2_queue *src_vq;
+	u32 value;
+	int i = 0;
+
+	if (count < 1)
+		return -EINVAL;
+
+	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		ctx->rawstreamon = 1;
+	else
+		ctx->compstreamon = 1;
+
+	/* Don't start the coda unless both queues are on */
+	if (!(ctx->rawstreamon & ctx->compstreamon))
+		return 0;
+
+	ctx->gopcounter = ctx->params.gop_size - 1;
+
+	q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+	buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+	bitstream_buf = vb2_dma_contig_plane_dma_addr(buf, 0);
+	q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+	bitstream_size = q_data_dst->sizeimage;
+	dst_fourcc = q_data_dst->fmt->fourcc;
+
+	/* Find out whether coda must encode or decode */
+	if (q_data_src->fmt->type == CODA_FMT_RAW &&
+	    q_data_dst->fmt->type == CODA_FMT_ENC) {
+		ctx->inst_type = CODA_INST_ENCODER;
+	} else if (q_data_src->fmt->type == CODA_FMT_ENC &&
+		   q_data_dst->fmt->type == CODA_FMT_RAW) {
+		ctx->inst_type = CODA_INST_DECODER;
+		v4l2_err(v4l2_dev, "decoding not supported.\n");
+		return -EINVAL;
+	} else {
+		v4l2_err(v4l2_dev, "couldn't tell instance type.\n");
+		return -EINVAL;
+	}
+
+	if (!coda_is_initialized(dev)) {
+		v4l2_err(v4l2_dev, "coda is not initialized.\n");
+		return -EFAULT;
+	}
+	coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
+	coda_write(dev, bitstream_buf, CODA_REG_BIT_RD_PTR(ctx->idx));
+	coda_write(dev, bitstream_buf, CODA_REG_BIT_WR_PTR(ctx->idx));
+	switch (dev->devtype->product) {
+	case CODA_DX6:
+		coda_write(dev, CODADX6_STREAM_BUF_DYNALLOC_EN |
+			CODADX6_STREAM_BUF_PIC_RESET, CODA_REG_BIT_STREAM_CTRL);
+		break;
+	default:
+		coda_write(dev, CODA7_STREAM_BUF_DYNALLOC_EN |
+			CODA7_STREAM_BUF_PIC_RESET, CODA_REG_BIT_STREAM_CTRL);
+	}
+
+	/* Configure the coda */
+	coda_write(dev, 0xffff4c00, CODA_REG_BIT_SEARCH_RAM_BASE_ADDR);
+
+	/* Could set rotation here if needed */
+	switch (dev->devtype->product) {
+	case CODA_DX6:
+		value = (q_data_src->width & CODADX6_PICWIDTH_MASK) << CODADX6_PICWIDTH_OFFSET;
+		break;
+	default:
+		value = (q_data_src->width & CODA7_PICWIDTH_MASK) << CODA7_PICWIDTH_OFFSET;
+	}
+	value |= (q_data_src->height & CODA_PICHEIGHT_MASK) << CODA_PICHEIGHT_OFFSET;
+	coda_write(dev, value, CODA_CMD_ENC_SEQ_SRC_SIZE);
+	coda_write(dev, ctx->params.framerate,
+		   CODA_CMD_ENC_SEQ_SRC_F_RATE);
+
+	switch (dst_fourcc) {
+	case V4L2_PIX_FMT_MPEG4:
+		if (dev->devtype->product == CODA_DX6)
+			ctx->params.codec_mode = CODADX6_MODE_ENCODE_MP4;
+		else
+			ctx->params.codec_mode = CODA7_MODE_ENCODE_MP4;
+
+		coda_write(dev, CODA_STD_MPEG4, CODA_CMD_ENC_SEQ_COD_STD);
+		coda_write(dev, 0, CODA_CMD_ENC_SEQ_MP4_PARA);
+		break;
+	case V4L2_PIX_FMT_H264:
+		if (dev->devtype->product == CODA_DX6)
+			ctx->params.codec_mode = CODADX6_MODE_ENCODE_H264;
+		else
+			ctx->params.codec_mode = CODA7_MODE_ENCODE_H264;
+
+		coda_write(dev, CODA_STD_H264, CODA_CMD_ENC_SEQ_COD_STD);
+		coda_write(dev, 0, CODA_CMD_ENC_SEQ_264_PARA);
+		break;
+	default:
+		v4l2_err(v4l2_dev,
+			 "dst format (0x%08x) invalid.\n", dst_fourcc);
+		return -EINVAL;
+	}
+
+	value  = (ctx->params.slice_max_mb & CODA_SLICING_SIZE_MASK) << CODA_SLICING_SIZE_OFFSET;
+	value |= (1 & CODA_SLICING_UNIT_MASK) << CODA_SLICING_UNIT_OFFSET;
+	if (ctx->params.slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB)
+		value |=  1 & CODA_SLICING_MODE_MASK;
+	coda_write(dev, value, CODA_CMD_ENC_SEQ_SLICE_MODE);
+	value  =  ctx->params.gop_size & CODA_GOP_SIZE_MASK;
+	coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE);
+
+	if (ctx->params.bitrate) {
+		/* Rate control enabled */
+		value = (ctx->params.bitrate & CODA_RATECONTROL_BITRATE_MASK) << CODA_RATECONTROL_BITRATE_OFFSET;
+		value |=  1 & CODA_RATECONTROL_ENABLE_MASK;
+	} else {
+		value = 0;
+	}
+	coda_write(dev, value, CODA_CMD_ENC_SEQ_RC_PARA);
+
+	coda_write(dev, 0, CODA_CMD_ENC_SEQ_RC_BUF_SIZE);
+	coda_write(dev, 0, CODA_CMD_ENC_SEQ_INTRA_REFRESH);
+
+	coda_write(dev, bitstream_buf, CODA_CMD_ENC_SEQ_BB_START);
+	coda_write(dev, bitstream_size / 1024, CODA_CMD_ENC_SEQ_BB_SIZE);
+
+	/* set default gamma */
+	value = (CODA_DEFAULT_GAMMA & CODA_GAMMA_MASK) << CODA_GAMMA_OFFSET;
+	coda_write(dev, value, CODA_CMD_ENC_SEQ_RC_GAMMA);
+
+	value  = (CODA_DEFAULT_GAMMA > 0) << CODA_OPTION_GAMMA_OFFSET;
+	value |= (0 & CODA_OPTION_SLICEREPORT_MASK) << CODA_OPTION_SLICEREPORT_OFFSET;
+	coda_write(dev, value, CODA_CMD_ENC_SEQ_OPTION);
+
+	if (dst_fourcc == V4L2_PIX_FMT_H264) {
+		value  = (FMO_SLICE_SAVE_BUF_SIZE << 7);
+		value |= (0 & CODA_FMOPARAM_TYPE_MASK) << CODA_FMOPARAM_TYPE_OFFSET;
+		value |=  0 & CODA_FMOPARAM_SLICENUM_MASK;
+		coda_write(dev, value, CODA_CMD_ENC_SEQ_FMO);
+	}
+
+	if (coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT)) {
+		v4l2_err(v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	if (coda_read(dev, CODA_RET_ENC_SEQ_SUCCESS) == 0)
+		return -EFAULT;
+
+	/*
+	 * Walk the src buffer list and let the codec know the
+	 * addresses of the pictures.
+	 */
+	src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+	for (i = 0; i < src_vq->num_buffers; i++) {
+		u32 *p;
+
+		buf = src_vq->bufs[i];
+		p = ctx->parabuf.vaddr;
+
+		p[i * 3] = vb2_dma_contig_plane_dma_addr(buf, 0);
+		p[i * 3 + 1] = p[i * 3] + q_data_src->width *
+				q_data_src->height;
+		p[i * 3 + 2] = p[i * 3 + 1] + q_data_src->width / 2 *
+				q_data_src->height / 2;
+	}
+
+	coda_write(dev, src_vq->num_buffers, CODA_CMD_SET_FRAME_BUF_NUM);
+	coda_write(dev, q_data_src->width, CODA_CMD_SET_FRAME_BUF_STRIDE);
+	if (coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF)) {
+		v4l2_err(v4l2_dev, "CODA_COMMAND_SET_FRAME_BUF timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	/* Save stream headers */
+	buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+	switch (dst_fourcc) {
+	case V4L2_PIX_FMT_H264:
+		/*
+		 * Get SPS in the first frame and copy it to an
+		 * intermediate buffer.
+		 */
+		coda_write(dev, vb2_dma_contig_plane_dma_addr(buf, 0), CODA_CMD_ENC_HEADER_BB_START);
+		coda_write(dev, bitstream_size, CODA_CMD_ENC_HEADER_BB_SIZE);
+		coda_write(dev, CODA_HEADER_H264_SPS, CODA_CMD_ENC_HEADER_CODE);
+		if (coda_command_sync(ctx, CODA_COMMAND_ENCODE_HEADER)) {
+			v4l2_err(v4l2_dev, "CODA_COMMAND_ENCODE_HEADER timeout\n");
+			return -ETIMEDOUT;
+		}
+		ctx->vpu_header_size[0] = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->idx)) -
+				coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
+		memcpy(&ctx->vpu_header[0][0], vb2_plane_vaddr(buf, 0),
+		       ctx->vpu_header_size[0]);
+
+		/*
+		 * Get PPS in the first frame and copy it to an
+		 * intermediate buffer.
+		 */
+		coda_write(dev, vb2_dma_contig_plane_dma_addr(buf, 0), CODA_CMD_ENC_HEADER_BB_START);
+		coda_write(dev, bitstream_size, CODA_CMD_ENC_HEADER_BB_SIZE);
+		coda_write(dev, CODA_HEADER_H264_PPS, CODA_CMD_ENC_HEADER_CODE);
+		if (coda_command_sync(ctx, CODA_COMMAND_ENCODE_HEADER)) {
+			v4l2_err(v4l2_dev, "CODA_COMMAND_ENCODE_HEADER timeout\n");
+			return -ETIMEDOUT;
+		}
+		ctx->vpu_header_size[1] = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->idx)) -
+				coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
+		memcpy(&ctx->vpu_header[1][0], vb2_plane_vaddr(buf, 0),
+		       ctx->vpu_header_size[1]);
+		ctx->vpu_header_size[2] = 0;
+		break;
+	case V4L2_PIX_FMT_MPEG4:
+		/*
+		 * Get VOS in the first frame and copy it to an
+		 * intermediate buffer
+		 */
+		coda_write(dev, vb2_dma_contig_plane_dma_addr(buf, 0), CODA_CMD_ENC_HEADER_BB_START);
+		coda_write(dev, bitstream_size, CODA_CMD_ENC_HEADER_BB_SIZE);
+		coda_write(dev, CODA_HEADER_MP4V_VOS, CODA_CMD_ENC_HEADER_CODE);
+		if (coda_command_sync(ctx, CODA_COMMAND_ENCODE_HEADER)) {
+			v4l2_err(v4l2_dev, "CODA_COMMAND_ENCODE_HEADER timeout\n");
+			return -ETIMEDOUT;
+		}
+		ctx->vpu_header_size[0] = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->idx)) -
+				coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
+		memcpy(&ctx->vpu_header[0][0], vb2_plane_vaddr(buf, 0),
+		       ctx->vpu_header_size[0]);
+
+		coda_write(dev, vb2_dma_contig_plane_dma_addr(buf, 0), CODA_CMD_ENC_HEADER_BB_START);
+		coda_write(dev, bitstream_size, CODA_CMD_ENC_HEADER_BB_SIZE);
+		coda_write(dev, CODA_HEADER_MP4V_VIS, CODA_CMD_ENC_HEADER_CODE);
+		if (coda_command_sync(ctx, CODA_COMMAND_ENCODE_HEADER)) {
+			v4l2_err(v4l2_dev, "CODA_COMMAND_ENCODE_HEADER failed\n");
+			return -ETIMEDOUT;
+		}
+		ctx->vpu_header_size[1] = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->idx)) -
+				coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
+		memcpy(&ctx->vpu_header[1][0], vb2_plane_vaddr(buf, 0),
+		       ctx->vpu_header_size[1]);
+
+		coda_write(dev, vb2_dma_contig_plane_dma_addr(buf, 0), CODA_CMD_ENC_HEADER_BB_START);
+		coda_write(dev, bitstream_size, CODA_CMD_ENC_HEADER_BB_SIZE);
+		coda_write(dev, CODA_HEADER_MP4V_VOL, CODA_CMD_ENC_HEADER_CODE);
+		if (coda_command_sync(ctx, CODA_COMMAND_ENCODE_HEADER)) {
+			v4l2_err(v4l2_dev, "CODA_COMMAND_ENCODE_HEADER failed\n");
+			return -ETIMEDOUT;
+		}
+		ctx->vpu_header_size[2] = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->idx)) -
+				coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
+		memcpy(&ctx->vpu_header[2][0], vb2_plane_vaddr(buf, 0),
+		       ctx->vpu_header_size[2]);
+		break;
+	default:
+		/* No more formats need to save headers at the moment */
+		break;
+	}
+
+	return 0;
+}
+
+static int coda_stop_streaming(struct vb2_queue *q)
+{
+	struct coda_ctx *ctx = vb2_get_drv_priv(q);
+
+	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+			 "%s: output\n", __func__);
+		ctx->rawstreamon = 0;
+	} else {
+		v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+			 "%s: capture\n", __func__);
+		ctx->compstreamon = 0;
+	}
+
+	if (!ctx->rawstreamon && !ctx->compstreamon) {
+		v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+			 "%s: sent command 'SEQ_END' to coda\n", __func__);
+		if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
+			v4l2_err(&ctx->dev->v4l2_dev,
+				 "CODA_COMMAND_SEQ_END failed\n");
+			return -ETIMEDOUT;
+		}
+	}
+
+	return 0;
+}
+
+static struct vb2_ops coda_qops = {
+	.queue_setup		= coda_queue_setup,
+	.buf_prepare		= coda_buf_prepare,
+	.buf_queue		= coda_buf_queue,
+	.wait_prepare		= coda_wait_prepare,
+	.wait_finish		= coda_wait_finish,
+	.start_streaming	= coda_start_streaming,
+	.stop_streaming		= coda_stop_streaming,
+};
+
+static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct coda_ctx *ctx =
+			container_of(ctrl->handler, struct coda_ctx, ctrls);
+
+	v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+		 "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
+
+	switch (ctrl->id) {
+	case V4L2_CID_MPEG_VIDEO_BITRATE:
+		ctx->params.bitrate = ctrl->val / 1000;
+		break;
+	case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+		ctx->params.gop_size = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+		ctx->params.h264_intra_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+		ctx->params.h264_inter_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:
+		ctx->params.mpeg4_intra_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:
+		ctx->params.mpeg4_inter_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+		ctx->params.slice_mode = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+		ctx->params.slice_max_mb = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+		break;
+	default:
+		v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+			"Invalid control, id=%d, val=%d\n",
+			ctrl->id, ctrl->val);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct v4l2_ctrl_ops coda_ctrl_ops = {
+	.s_ctrl = coda_s_ctrl,
+};
+
+static int coda_ctrls_setup(struct coda_ctx *ctx)
+{
+	v4l2_ctrl_handler_init(&ctx->ctrls, 9);
+
+	v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+		V4L2_CID_MPEG_VIDEO_BITRATE, 0, 32767000, 1, 0);
+	v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+		V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 60, 1, 16);
+	v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+		V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 1, 51, 1, 25);
+	v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+		V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 1, 51, 1, 25);
+	v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+		V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP, 1, 31, 1, 2);
+	v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+		V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP, 1, 31, 1, 2);
+	v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+		V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+		V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB, 0,
+		V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB);
+	v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+		V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, 1, 0x3fffffff, 1, 1);
+	v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+		V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+		V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+		(1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE),
+		V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
+
+	if (ctx->ctrls.error) {
+		v4l2_err(&ctx->dev->v4l2_dev, "control initialization error (%d)",
+			ctx->ctrls.error);
+		return -EINVAL;
+	}
+
+	return v4l2_ctrl_handler_setup(&ctx->ctrls);
+}
+
+static int coda_queue_init(void *priv, struct vb2_queue *src_vq,
+		      struct vb2_queue *dst_vq)
+{
+	struct coda_ctx *ctx = priv;
+	int ret;
+
+	memset(src_vq, 0, sizeof(*src_vq));
+	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	src_vq->io_modes = VB2_MMAP;
+	src_vq->drv_priv = ctx;
+	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	src_vq->ops = &coda_qops;
+	src_vq->mem_ops = &vb2_dma_contig_memops;
+
+	ret = vb2_queue_init(src_vq);
+	if (ret)
+		return ret;
+
+	memset(dst_vq, 0, sizeof(*dst_vq));
+	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	dst_vq->io_modes = VB2_MMAP;
+	dst_vq->drv_priv = ctx;
+	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	dst_vq->ops = &coda_qops;
+	dst_vq->mem_ops = &vb2_dma_contig_memops;
+
+	return vb2_queue_init(dst_vq);
+}
+
+static int coda_open(struct file *file)
+{
+	struct coda_dev *dev = video_drvdata(file);
+	struct coda_ctx *ctx = NULL;
+	int ret = 0;
+
+	if (dev->instances >= CODA_MAX_INSTANCES)
+		return -EBUSY;
+
+	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	v4l2_fh_init(&ctx->fh, video_devdata(file));
+	file->private_data = &ctx->fh;
+	v4l2_fh_add(&ctx->fh);
+	ctx->dev = dev;
+
+	set_default_params(ctx);
+	ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
+					 &coda_queue_init);
+	if (IS_ERR(ctx->m2m_ctx)) {
+		int ret = PTR_ERR(ctx->m2m_ctx);
+
+		v4l2_err(&dev->v4l2_dev, "%s return error (%d)\n",
+			 __func__, ret);
+		goto err;
+	}
+	ret = coda_ctrls_setup(ctx);
+	if (ret) {
+		v4l2_err(&dev->v4l2_dev, "failed to setup coda controls\n");
+		goto err;
+	}
+
+	ctx->fh.ctrl_handler = &ctx->ctrls;
+
+	ctx->parabuf.vaddr = dma_alloc_coherent(&dev->plat_dev->dev,
+			CODA_PARA_BUF_SIZE, &ctx->parabuf.paddr, GFP_KERNEL);
+	if (!ctx->parabuf.vaddr) {
+		v4l2_err(&dev->v4l2_dev, "failed to allocate parabuf");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	coda_lock(ctx);
+	ctx->idx = dev->instances++;
+	coda_unlock(ctx);
+
+	clk_prepare_enable(dev->clk_per);
+	clk_prepare_enable(dev->clk_ahb);
+
+	v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Created instance %d (%p)\n",
+		 ctx->idx, ctx);
+
+	return 0;
+
+err:
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx);
+	return ret;
+}
+
+static int coda_release(struct file *file)
+{
+	struct coda_dev *dev = video_drvdata(file);
+	struct coda_ctx *ctx = fh_to_ctx(file->private_data);
+
+	v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Releasing instance %p\n",
+		 ctx);
+
+	coda_lock(ctx);
+	dev->instances--;
+	coda_unlock(ctx);
+
+	dma_free_coherent(&dev->plat_dev->dev, CODA_PARA_BUF_SIZE,
+		ctx->parabuf.vaddr, ctx->parabuf.paddr);
+	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+	v4l2_ctrl_handler_free(&ctx->ctrls);
+	clk_disable_unprepare(dev->clk_per);
+	clk_disable_unprepare(dev->clk_ahb);
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx);
+
+	return 0;
+}
+
+static unsigned int coda_poll(struct file *file,
+				 struct poll_table_struct *wait)
+{
+	struct coda_ctx *ctx = fh_to_ctx(file->private_data);
+	int ret;
+
+	coda_lock(ctx);
+	ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+	coda_unlock(ctx);
+	return ret;
+}
+
+static int coda_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct coda_ctx *ctx = fh_to_ctx(file->private_data);
+
+	return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations coda_fops = {
+	.owner		= THIS_MODULE,
+	.open		= coda_open,
+	.release	= coda_release,
+	.poll		= coda_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= coda_mmap,
+};
+
+static irqreturn_t coda_irq_handler(int irq, void *data)
+{
+	struct vb2_buffer *src_buf, *dst_buf, *tmp_buf;
+	struct coda_dev *dev = data;
+	u32 wr_ptr, start_ptr;
+	struct coda_ctx *ctx;
+
+	/* read status register to attend the IRQ */
+	coda_read(dev, CODA_REG_BIT_INT_STATUS);
+	coda_write(dev, CODA_REG_BIT_INT_CLEAR_SET,
+		      CODA_REG_BIT_INT_CLEAR);
+
+	ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+	if (ctx == NULL) {
+		v4l2_err(&dev->v4l2_dev, "Instance released before the end of transaction\n");
+		return IRQ_HANDLED;
+	}
+
+	if (ctx->aborting) {
+		v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+			 "task has been aborted\n");
+		return IRQ_HANDLED;
+	}
+
+	if (coda_isbusy(ctx->dev)) {
+		v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+			 "coda is still busy!!!!\n");
+		return IRQ_NONE;
+	}
+
+	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+
+	/* Get results from the coda */
+	coda_read(dev, CODA_RET_ENC_PIC_TYPE);
+	start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START);
+	wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->idx));
+	/* Calculate bytesused field */
+	if (dst_buf->v4l2_buf.sequence == 0) {
+		dst_buf->v4l2_planes[0].bytesused = (wr_ptr - start_ptr) +
+						ctx->vpu_header_size[0] +
+						ctx->vpu_header_size[1] +
+						ctx->vpu_header_size[2];
+	} else {
+		dst_buf->v4l2_planes[0].bytesused = (wr_ptr - start_ptr);
+	}
+
+	v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "frame size = %u\n",
+		 wr_ptr - start_ptr);
+
+	coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM);
+	coda_read(dev, CODA_RET_ENC_PIC_FLAG);
+
+	if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) {
+		dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+		dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
+	} else {
+		dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+		dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+	}
+
+	/* Free previous reference picture if available */
+	if (ctx->reference) {
+		v4l2_m2m_buf_done(ctx->reference, VB2_BUF_STATE_DONE);
+		ctx->reference = NULL;
+	}
+
+	/*
+	 * For the last frame of the gop we don't need to save
+	 * a reference picture.
+	 */
+	v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+	tmp_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+	if (ctx->gopcounter == 0)
+		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+	else
+		ctx->reference = tmp_buf;
+
+	v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+
+	ctx->gopcounter--;
+	if (ctx->gopcounter < 0)
+		ctx->gopcounter = ctx->params.gop_size - 1;
+
+	v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+		"job finished: encoding frame (%d) (%s)\n",
+		dst_buf->v4l2_buf.sequence,
+		(dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
+		"KEYFRAME" : "PFRAME");
+
+	v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->m2m_ctx);
+
+	return IRQ_HANDLED;
+}
+
+static u32 coda_supported_firmwares[] = {
+	CODA_FIRMWARE_VERNUM(CODA_DX6, 2, 2, 5),
+};
+
+static bool coda_firmware_supported(u32 vernum)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(coda_supported_firmwares); i++)
+		if (vernum == coda_supported_firmwares[i])
+			return true;
+	return false;
+}
+
+static char *coda_product_name(int product)
+{
+	static char buf[9];
+
+	switch (product) {
+	case CODA_DX6:
+		return "CodaDx6";
+	default:
+		snprintf(buf, sizeof(buf), "(0x%04x)", product);
+		return buf;
+	}
+}
+
+static int coda_hw_init(struct coda_dev *dev, const struct firmware *fw)
+{
+	u16 product, major, minor, release;
+	u32 data;
+	u16 *p;
+	int i;
+
+	clk_prepare_enable(dev->clk_per);
+	clk_prepare_enable(dev->clk_ahb);
+
+	/* Copy the whole firmware image to the code buffer */
+	memcpy(dev->codebuf.vaddr, fw->data, fw->size);
+	/*
+	 * Copy the first CODA_ISRAM_SIZE in the internal SRAM.
+	 * This memory seems to be big-endian here, which is weird, since
+	 * the internal ARM processor of the coda is little endian.
+	 * Data in this SRAM survives a reboot.
+	 */
+	p = (u16 *)fw->data;
+	for (i = 0; i < (CODA_ISRAM_SIZE / 2); i++)  {
+		data = CODA_DOWN_ADDRESS_SET(i) |
+			CODA_DOWN_DATA_SET(p[i ^ 1]);
+		coda_write(dev, data, CODA_REG_BIT_CODE_DOWN);
+	}
+	release_firmware(fw);
+
+	/* Tell the BIT where to find everything it needs */
+	coda_write(dev, dev->workbuf.paddr,
+		      CODA_REG_BIT_WORK_BUF_ADDR);
+	coda_write(dev, dev->codebuf.paddr,
+		      CODA_REG_BIT_CODE_BUF_ADDR);
+	coda_write(dev, 0, CODA_REG_BIT_CODE_RUN);
+
+	/* Set default values */
+	switch (dev->devtype->product) {
+	case CODA_DX6:
+		coda_write(dev, CODADX6_STREAM_BUF_PIC_FLUSH, CODA_REG_BIT_STREAM_CTRL);
+		break;
+	default:
+		coda_write(dev, CODA7_STREAM_BUF_PIC_FLUSH, CODA_REG_BIT_STREAM_CTRL);
+	}
+	coda_write(dev, 0, CODA_REG_BIT_FRAME_MEM_CTRL);
+	coda_write(dev, CODA_INT_INTERRUPT_ENABLE,
+		      CODA_REG_BIT_INT_ENABLE);
+
+	/* Reset VPU and start processor */
+	data = coda_read(dev, CODA_REG_BIT_CODE_RESET);
+	data |= CODA_REG_RESET_ENABLE;
+	coda_write(dev, data, CODA_REG_BIT_CODE_RESET);
+	udelay(10);
+	data &= ~CODA_REG_RESET_ENABLE;
+	coda_write(dev, data, CODA_REG_BIT_CODE_RESET);
+	coda_write(dev, CODA_REG_RUN_ENABLE, CODA_REG_BIT_CODE_RUN);
+
+	/* Load firmware */
+	coda_write(dev, 0, CODA_CMD_FIRMWARE_VERNUM);
+	coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
+	coda_write(dev, 0, CODA_REG_BIT_RUN_INDEX);
+	coda_write(dev, 0, CODA_REG_BIT_RUN_COD_STD);
+	coda_write(dev, CODA_COMMAND_FIRMWARE_GET, CODA_REG_BIT_RUN_COMMAND);
+	if (coda_wait_timeout(dev)) {
+		clk_disable_unprepare(dev->clk_per);
+		clk_disable_unprepare(dev->clk_ahb);
+		v4l2_err(&dev->v4l2_dev, "firmware get command error\n");
+		return -EIO;
+	}
+
+	/* Check we are compatible with the loaded firmware */
+	data = coda_read(dev, CODA_CMD_FIRMWARE_VERNUM);
+	product = CODA_FIRMWARE_PRODUCT(data);
+	major = CODA_FIRMWARE_MAJOR(data);
+	minor = CODA_FIRMWARE_MINOR(data);
+	release = CODA_FIRMWARE_RELEASE(data);
+
+	clk_disable_unprepare(dev->clk_per);
+	clk_disable_unprepare(dev->clk_ahb);
+
+	if (product != dev->devtype->product) {
+		v4l2_err(&dev->v4l2_dev, "Wrong firmware. Hw: %s, Fw: %s,"
+			 " Version: %u.%u.%u\n",
+			 coda_product_name(dev->devtype->product),
+			 coda_product_name(product), major, minor, release);
+		return -EINVAL;
+	}
+
+	v4l2_info(&dev->v4l2_dev, "Initialized %s.\n",
+		  coda_product_name(product));
+
+	if (coda_firmware_supported(data)) {
+		v4l2_info(&dev->v4l2_dev, "Firmware version: %u.%u.%u\n",
+			  major, minor, release);
+	} else {
+		v4l2_warn(&dev->v4l2_dev, "Unsupported firmware version: "
+			  "%u.%u.%u\n", major, minor, release);
+	}
+
+	return 0;
+}
+
+static void coda_fw_callback(const struct firmware *fw, void *context)
+{
+	struct coda_dev *dev = context;
+	struct platform_device *pdev = dev->plat_dev;
+	int ret;
+
+	if (!fw) {
+		v4l2_err(&dev->v4l2_dev, "firmware request failed\n");
+		return;
+	}
+
+	/* allocate auxiliary per-device code buffer for the BIT processor */
+	dev->codebuf.size = fw->size;
+	dev->codebuf.vaddr = dma_alloc_coherent(&pdev->dev, fw->size,
+						    &dev->codebuf.paddr,
+						    GFP_KERNEL);
+	if (!dev->codebuf.vaddr) {
+		dev_err(&pdev->dev, "failed to allocate code buffer\n");
+		return;
+	}
+
+	ret = coda_hw_init(dev, fw);
+	if (ret) {
+		v4l2_err(&dev->v4l2_dev, "HW initialization failed\n");
+		return;
+	}
+
+	dev->vfd.fops	= &coda_fops,
+	dev->vfd.ioctl_ops	= &coda_ioctl_ops;
+	dev->vfd.release	= video_device_release_empty,
+	dev->vfd.lock	= &dev->dev_mutex;
+	dev->vfd.v4l2_dev	= &dev->v4l2_dev;
+	snprintf(dev->vfd.name, sizeof(dev->vfd.name), "%s", CODA_NAME);
+	video_set_drvdata(&dev->vfd, dev);
+
+	dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+	if (IS_ERR(dev->alloc_ctx)) {
+		v4l2_err(&dev->v4l2_dev, "Failed to alloc vb2 context\n");
+		return;
+	}
+
+	dev->m2m_dev = v4l2_m2m_init(&coda_m2m_ops);
+	if (IS_ERR(dev->m2m_dev)) {
+		v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+		goto rel_ctx;
+	}
+
+	ret = video_register_device(&dev->vfd, VFL_TYPE_GRABBER, 0);
+	if (ret) {
+		v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+		goto rel_m2m;
+	}
+	v4l2_info(&dev->v4l2_dev, "codec registered as /dev/video%d\n",
+		  dev->vfd.num);
+
+	return;
+
+rel_m2m:
+	v4l2_m2m_release(dev->m2m_dev);
+rel_ctx:
+	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+}
+
+static int coda_firmware_request(struct coda_dev *dev)
+{
+	char *fw = dev->devtype->firmware;
+
+	dev_dbg(&dev->plat_dev->dev, "requesting firmware '%s' for %s\n", fw,
+		coda_product_name(dev->devtype->product));
+
+	return request_firmware_nowait(THIS_MODULE, true,
+		fw, &dev->plat_dev->dev, GFP_KERNEL, dev, coda_fw_callback);
+}
+
+enum coda_platform {
+	CODA_IMX27,
+};
+
+static struct coda_devtype coda_devdata[] = {
+	[CODA_IMX27] = {
+		.firmware    = "v4l-codadx6-imx27.bin",
+		.product     = CODA_DX6,
+		.formats     = codadx6_formats,
+		.num_formats = ARRAY_SIZE(codadx6_formats),
+	},
+};
+
+static struct platform_device_id coda_platform_ids[] = {
+	{ .name = "coda-imx27", .driver_data = CODA_IMX27 },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, coda_platform_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id coda_dt_ids[] = {
+	{ .compatible = "fsl,imx27-vpu", .data = &coda_platform_ids[CODA_IMX27] },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, coda_dt_ids);
+#endif
+
+static int __devinit coda_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *of_id =
+			of_match_device(of_match_ptr(coda_dt_ids), &pdev->dev);
+	const struct platform_device_id *pdev_id;
+	struct coda_dev *dev;
+	struct resource *res;
+	int ret, irq;
+
+	dev = devm_kzalloc(&pdev->dev, sizeof *dev, GFP_KERNEL);
+	if (!dev) {
+		dev_err(&pdev->dev, "Not enough memory for %s\n",
+			CODA_NAME);
+		return -ENOMEM;
+	}
+
+	spin_lock_init(&dev->irqlock);
+
+	dev->plat_dev = pdev;
+	dev->clk_per = devm_clk_get(&pdev->dev, "per");
+	if (IS_ERR(dev->clk_per)) {
+		dev_err(&pdev->dev, "Could not get per clock\n");
+		return PTR_ERR(dev->clk_per);
+	}
+
+	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(dev->clk_ahb)) {
+		dev_err(&pdev->dev, "Could not get ahb clock\n");
+		return PTR_ERR(dev->clk_ahb);
+	}
+
+	/* Get  memory for physical registers */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "failed to get memory region resource\n");
+		return -ENOENT;
+	}
+
+	if (devm_request_mem_region(&pdev->dev, res->start,
+			resource_size(res), CODA_NAME) == NULL) {
+		dev_err(&pdev->dev, "failed to request memory region\n");
+		return -ENOENT;
+	}
+	dev->regs_base = devm_ioremap(&pdev->dev, res->start,
+				      resource_size(res));
+	if (!dev->regs_base) {
+		dev_err(&pdev->dev, "failed to ioremap address region\n");
+		return -ENOENT;
+	}
+
+	/* IRQ */
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "failed to get irq resource\n");
+		return -ENOENT;
+	}
+
+	if (devm_request_irq(&pdev->dev, irq, coda_irq_handler,
+		0, CODA_NAME, dev) < 0) {
+		dev_err(&pdev->dev, "failed to request irq\n");
+		return -ENOENT;
+	}
+
+	ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+	if (ret)
+		return ret;
+
+	mutex_init(&dev->dev_mutex);
+
+	pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
+
+	if (of_id) {
+		dev->devtype = of_id->data;
+	} else if (pdev_id) {
+		dev->devtype = &coda_devdata[pdev_id->driver_data];
+	} else {
+		v4l2_device_unregister(&dev->v4l2_dev);
+		return -EINVAL;
+	}
+
+	/* allocate auxiliary per-device buffers for the BIT processor */
+	switch (dev->devtype->product) {
+	case CODA_DX6:
+		dev->workbuf.size = CODADX6_WORK_BUF_SIZE;
+		break;
+	default:
+		dev->workbuf.size = CODA7_WORK_BUF_SIZE;
+	}
+	dev->workbuf.vaddr = dma_alloc_coherent(&pdev->dev, dev->workbuf.size,
+						    &dev->workbuf.paddr,
+						    GFP_KERNEL);
+	if (!dev->workbuf.vaddr) {
+		dev_err(&pdev->dev, "failed to allocate work buffer\n");
+		v4l2_device_unregister(&dev->v4l2_dev);
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, dev);
+
+	return coda_firmware_request(dev);
+}
+
+static int coda_remove(struct platform_device *pdev)
+{
+	struct coda_dev *dev = platform_get_drvdata(pdev);
+
+	video_unregister_device(&dev->vfd);
+	if (dev->m2m_dev)
+		v4l2_m2m_release(dev->m2m_dev);
+	if (dev->alloc_ctx)
+		vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+	v4l2_device_unregister(&dev->v4l2_dev);
+	if (dev->codebuf.vaddr)
+		dma_free_coherent(&pdev->dev, dev->codebuf.size,
+				  &dev->codebuf.vaddr, dev->codebuf.paddr);
+	if (dev->workbuf.vaddr)
+		dma_free_coherent(&pdev->dev, dev->workbuf.size, &dev->workbuf.vaddr,
+			  dev->workbuf.paddr);
+	return 0;
+}
+
+static struct platform_driver coda_driver = {
+	.probe	= coda_probe,
+	.remove	= __devexit_p(coda_remove),
+	.driver	= {
+		.name	= CODA_NAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(coda_dt_ids),
+	},
+	.id_table = coda_platform_ids,
+};
+
+module_platform_driver(coda_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
+MODULE_DESCRIPTION("Coda multi-standard codec V4L2 driver");
diff --git a/drivers/media/platform/coda.h b/drivers/media/platform/coda.h
new file mode 100644
index 0000000..3fbb315
--- /dev/null
+++ b/drivers/media/platform/coda.h
@@ -0,0 +1,216 @@
+/*
+ * linux/drivers/media/platform/coda/coda_regs.h
+ *
+ * Copyright (C) 2012 Vista Silicon SL
+ *    Javier Martin <javier.martin@vista-silicon.com>
+ *    Xavier Duret
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _REGS_CODA_H_
+#define _REGS_CODA_H_
+
+/* HW registers */
+#define CODA_REG_BIT_CODE_RUN			0x000
+#define		CODA_REG_RUN_ENABLE		(1 << 0)
+#define CODA_REG_BIT_CODE_DOWN			0x004
+#define		CODA_DOWN_ADDRESS_SET(x)	(((x) & 0xffff) << 16)
+#define		CODA_DOWN_DATA_SET(x)		((x) & 0xffff)
+#define CODA_REG_BIT_HOST_IN_REQ		0x008
+#define CODA_REG_BIT_INT_CLEAR			0x00c
+#define		CODA_REG_BIT_INT_CLEAR_SET	0x1
+#define CODA_REG_BIT_INT_STATUS		0x010
+#define CODA_REG_BIT_CODE_RESET		0x014
+#define		CODA_REG_RESET_ENABLE		(1 << 0)
+#define CODA_REG_BIT_CUR_PC			0x018
+
+/* Static SW registers */
+#define CODA_REG_BIT_CODE_BUF_ADDR		0x100
+#define CODA_REG_BIT_WORK_BUF_ADDR		0x104
+#define CODA_REG_BIT_PARA_BUF_ADDR		0x108
+#define CODA_REG_BIT_STREAM_CTRL		0x10c
+#define		CODA7_STREAM_BUF_PIC_RESET	(1 << 4)
+#define		CODADX6_STREAM_BUF_PIC_RESET	(1 << 3)
+#define		CODA7_STREAM_BUF_PIC_FLUSH	(1 << 3)
+#define		CODADX6_STREAM_BUF_PIC_FLUSH	(1 << 2)
+#define		CODA7_STREAM_BUF_DYNALLOC_EN	(1 << 5)
+#define		CODADX6_STREAM_BUF_DYNALLOC_EN	(1 << 4)
+#define 	CODA_STREAM_CHKDIS_OFFSET	(1 << 1)
+#define		CODA_STREAM_ENDIAN_SELECT	(1 << 0)
+#define CODA_REG_BIT_FRAME_MEM_CTRL		0x110
+#define		CODA_IMAGE_ENDIAN_SELECT	(1 << 0)
+#define CODA_REG_BIT_RD_PTR(x)			(0x120 + 8 * (x))
+#define CODA_REG_BIT_WR_PTR(x)			(0x124 + 8 * (x))
+#define CODA_REG_BIT_SEARCH_RAM_BASE_ADDR	0x140
+#define CODA_REG_BIT_BUSY			0x160
+#define		CODA_REG_BIT_BUSY_FLAG		1
+#define CODA_REG_BIT_RUN_COMMAND		0x164
+#define		CODA_COMMAND_SEQ_INIT		1
+#define		CODA_COMMAND_SEQ_END		2
+#define		CODA_COMMAND_PIC_RUN		3
+#define		CODA_COMMAND_SET_FRAME_BUF	4
+#define		CODA_COMMAND_ENCODE_HEADER	5
+#define		CODA_COMMAND_ENC_PARA_SET	6
+#define		CODA_COMMAND_DEC_PARA_SET	7
+#define		CODA_COMMAND_DEC_BUF_FLUSH	8
+#define		CODA_COMMAND_RC_CHANGE_PARAMETER 9
+#define		CODA_COMMAND_FIRMWARE_GET	0xf
+#define CODA_REG_BIT_RUN_INDEX			0x168
+#define		CODA_INDEX_SET(x)		((x) & 0x3)
+#define CODA_REG_BIT_RUN_COD_STD		0x16c
+#define		CODADX6_MODE_DECODE_MP4		0
+#define		CODADX6_MODE_ENCODE_MP4		1
+#define		CODADX6_MODE_DECODE_H264	2
+#define		CODADX6_MODE_ENCODE_H264	3
+#define		CODA7_MODE_DECODE_H264		0
+#define		CODA7_MODE_DECODE_VC1		1
+#define		CODA7_MODE_DECODE_MP2		2
+#define		CODA7_MODE_DECODE_MP4		3
+#define		CODA7_MODE_DECODE_DV3		3
+#define		CODA7_MODE_DECODE_RV		4
+#define		CODA7_MODE_DECODE_MJPG		5
+#define		CODA7_MODE_ENCODE_H264		8
+#define		CODA7_MODE_ENCODE_MP4		11
+#define		CODA7_MODE_ENCODE_MJPG		13
+#define 	CODA_MODE_INVALID		0xffff
+#define CODA_REG_BIT_INT_ENABLE		0x170
+#define		CODA_INT_INTERRUPT_ENABLE	(1 << 3)
+
+/*
+ * Commands' mailbox:
+ * registers with offsets in the range 0x180-0x1d0
+ * have different meaning depending on the command being
+ * issued.
+ */
+
+/* Encoder Sequence Initialization */
+#define CODA_CMD_ENC_SEQ_BB_START				0x180
+#define CODA_CMD_ENC_SEQ_BB_SIZE				0x184
+#define CODA_CMD_ENC_SEQ_OPTION				0x188
+#define		CODA_OPTION_GAMMA_OFFSET			7
+#define		CODA_OPTION_GAMMA_MASK				0x01
+#define		CODA_OPTION_LIMITQP_OFFSET			6
+#define		CODA_OPTION_LIMITQP_MASK			0x01
+#define		CODA_OPTION_RCINTRAQP_OFFSET			5
+#define		CODA_OPTION_RCINTRAQP_MASK			0x01
+#define		CODA_OPTION_FMO_OFFSET				4
+#define		CODA_OPTION_FMO_MASK				0x01
+#define		CODA_OPTION_SLICEREPORT_OFFSET			1
+#define		CODA_OPTION_SLICEREPORT_MASK			0x01
+#define CODA_CMD_ENC_SEQ_COD_STD				0x18c
+#define		CODA_STD_MPEG4					0
+#define		CODA_STD_H263					1
+#define		CODA_STD_H264					2
+#define		CODA_STD_MJPG					3
+#define CODA_CMD_ENC_SEQ_SRC_SIZE				0x190
+#define		CODA7_PICWIDTH_OFFSET				16
+#define		CODA7_PICWIDTH_MASK				0xffff
+#define		CODADX6_PICWIDTH_OFFSET				10
+#define		CODADX6_PICWIDTH_MASK				0x3ff
+#define		CODA_PICHEIGHT_OFFSET				0
+#define		CODA_PICHEIGHT_MASK				0x3ff
+#define CODA_CMD_ENC_SEQ_SRC_F_RATE				0x194
+#define CODA_CMD_ENC_SEQ_MP4_PARA				0x198
+#define		CODA_MP4PARAM_VERID_OFFSET			6
+#define		CODA_MP4PARAM_VERID_MASK			0x01
+#define		CODA_MP4PARAM_INTRADCVLCTHR_OFFSET		2
+#define		CODA_MP4PARAM_INTRADCVLCTHR_MASK		0x07
+#define		CODA_MP4PARAM_REVERSIBLEVLCENABLE_OFFSET	1
+#define		CODA_MP4PARAM_REVERSIBLEVLCENABLE_MASK		0x01
+#define		CODA_MP4PARAM_DATAPARTITIONENABLE_OFFSET	0
+#define		CODA_MP4PARAM_DATAPARTITIONENABLE_MASK		0x01
+#define CODA_CMD_ENC_SEQ_263_PARA				0x19c
+#define		CODA_263PARAM_ANNEXJENABLE_OFFSET		2
+#define		CODA_263PARAM_ANNEXJENABLE_MASK		0x01
+#define		CODA_263PARAM_ANNEXKENABLE_OFFSET		1
+#define		CODA_263PARAM_ANNEXKENABLE_MASK		0x01
+#define		CODA_263PARAM_ANNEXTENABLE_OFFSET		0
+#define		CODA_263PARAM_ANNEXTENABLE_MASK		0x01
+#define CODA_CMD_ENC_SEQ_264_PARA				0x1a0
+#define		CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET	12
+#define		CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK	0x0f
+#define		CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET	8
+#define		CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK	0x0f
+#define		CODA_264PARAM_DISABLEDEBLK_OFFSET		6
+#define		CODA_264PARAM_DISABLEDEBLK_MASK		0x01
+#define		CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET	5
+#define		CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_MASK	0x01
+#define		CODA_264PARAM_CHROMAQPOFFSET_OFFSET		0
+#define		CODA_264PARAM_CHROMAQPOFFSET_MASK		0x1f
+#define CODA_CMD_ENC_SEQ_SLICE_MODE				0x1a4
+#define		CODA_SLICING_SIZE_OFFSET			2
+#define		CODA_SLICING_SIZE_MASK				0x3fffffff
+#define		CODA_SLICING_UNIT_OFFSET			1
+#define		CODA_SLICING_UNIT_MASK				0x01
+#define		CODA_SLICING_MODE_OFFSET			0
+#define		CODA_SLICING_MODE_MASK				0x01
+#define CODA_CMD_ENC_SEQ_GOP_SIZE				0x1a8
+#define		CODA_GOP_SIZE_OFFSET				0
+#define		CODA_GOP_SIZE_MASK				0x3f
+#define CODA_CMD_ENC_SEQ_RC_PARA				0x1ac
+#define		CODA_RATECONTROL_AUTOSKIP_OFFSET		31
+#define		CODA_RATECONTROL_AUTOSKIP_MASK			0x01
+#define		CODA_RATECONTROL_INITIALDELAY_OFFSET		16
+#define		CODA_RATECONTROL_INITIALDELAY_MASK		0x7f
+#define		CODA_RATECONTROL_BITRATE_OFFSET		1
+#define		CODA_RATECONTROL_BITRATE_MASK			0x7f
+#define		CODA_RATECONTROL_ENABLE_OFFSET			0
+#define		CODA_RATECONTROL_ENABLE_MASK			0x01
+#define CODA_CMD_ENC_SEQ_RC_BUF_SIZE				0x1b0
+#define CODA_CMD_ENC_SEQ_INTRA_REFRESH				0x1b4
+#define CODA_CMD_ENC_SEQ_FMO					0x1b8
+#define		CODA_FMOPARAM_TYPE_OFFSET			4
+#define		CODA_FMOPARAM_TYPE_MASK				1
+#define		CODA_FMOPARAM_SLICENUM_OFFSET			0
+#define		CODA_FMOPARAM_SLICENUM_MASK			0x0f
+#define CODA_CMD_ENC_SEQ_RC_QP_MAX				0x1c8
+#define		CODA_QPMAX_OFFSET				0
+#define		CODA_QPMAX_MASK					0x3f
+#define CODA_CMD_ENC_SEQ_RC_GAMMA				0x1cc
+#define		CODA_GAMMA_OFFSET				0
+#define		CODA_GAMMA_MASK					0xffff
+#define CODA_RET_ENC_SEQ_SUCCESS				0x1c0
+
+/* Encoder Picture Run */
+#define CODA_CMD_ENC_PIC_SRC_ADDR_Y	0x180
+#define CODA_CMD_ENC_PIC_SRC_ADDR_CB	0x184
+#define CODA_CMD_ENC_PIC_SRC_ADDR_CR	0x188
+#define CODA_CMD_ENC_PIC_QS		0x18c
+#define CODA_CMD_ENC_PIC_ROT_MODE	0x190
+#define CODA_CMD_ENC_PIC_OPTION	0x194
+#define CODA_CMD_ENC_PIC_BB_START	0x198
+#define CODA_CMD_ENC_PIC_BB_SIZE	0x19c
+#define CODA_RET_ENC_PIC_TYPE		0x1c4
+#define CODA_RET_ENC_PIC_SLICE_NUM	0x1cc
+#define CODA_RET_ENC_PIC_FLAG		0x1d0
+
+/* Set Frame Buffer */
+#define CODA_CMD_SET_FRAME_BUF_NUM	0x180
+#define CODA_CMD_SET_FRAME_BUF_STRIDE	0x184
+
+/* Encoder Header */
+#define CODA_CMD_ENC_HEADER_CODE	0x180
+#define		CODA_GAMMA_OFFSET	0
+#define		CODA_HEADER_H264_SPS	0
+#define		CODA_HEADER_H264_PPS	1
+#define		CODA_HEADER_MP4V_VOL	0
+#define		CODA_HEADER_MP4V_VOS	1
+#define		CODA_HEADER_MP4V_VIS	2
+#define CODA_CMD_ENC_HEADER_BB_START	0x184
+#define CODA_CMD_ENC_HEADER_BB_SIZE	0x188
+
+/* Get Version */
+#define CODA_CMD_FIRMWARE_VERNUM		0x1c0
+#define		CODA_FIRMWARE_PRODUCT(x)	(((x) >> 16) & 0xffff)
+#define		CODA_FIRMWARE_MAJOR(x)		(((x) >> 12) & 0x0f)
+#define		CODA_FIRMWARE_MINOR(x)		(((x) >> 8) & 0x0f)
+#define		CODA_FIRMWARE_RELEASE(x)	((x) & 0xff)
+#define		CODA_FIRMWARE_VERNUM(product, major, minor, release)	\
+			((product) << 16 | ((major) << 12) |		\
+			((minor) << 8) | (release))
+
+#endif
diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/davinci/Kconfig
new file mode 100644
index 0000000..52c5ca6
--- /dev/null
+++ b/drivers/media/platform/davinci/Kconfig
@@ -0,0 +1,121 @@
+config VIDEO_DAVINCI_VPIF_DISPLAY
+	tristate "DM646x/DA850/OMAPL138 EVM Video Display"
+	depends on VIDEO_DEV && (MACH_DAVINCI_DM6467_EVM || MACH_DAVINCI_DA850_EVM)
+	select VIDEOBUF2_DMA_CONTIG
+	select VIDEO_DAVINCI_VPIF
+	select VIDEO_ADV7343 if VIDEO_HELPER_CHIPS_AUTO
+	select VIDEO_THS7303 if VIDEO_HELPER_CHIPS_AUTO
+	help
+	  Enables Davinci VPIF module used for display devices.
+	  This module is common for following DM6467/DA850/OMAPL138
+	  based display devices.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called vpif_display.
+
+config VIDEO_DAVINCI_VPIF_CAPTURE
+	tristate "DM646x/DA850/OMAPL138 EVM Video Capture"
+	depends on VIDEO_DEV && (MACH_DAVINCI_DM6467_EVM || MACH_DAVINCI_DA850_EVM)
+	select VIDEOBUF2_DMA_CONTIG
+	select VIDEO_DAVINCI_VPIF
+	help
+	  Enables Davinci VPIF module used for captur devices.
+	  This module is common for following DM6467/DA850/OMAPL138
+	  based capture devices.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called vpif_capture.
+
+config VIDEO_DAVINCI_VPIF
+	tristate "DaVinci VPIF Driver"
+	depends on VIDEO_DAVINCI_VPIF_DISPLAY || VIDEO_DAVINCI_VPIF_CAPTURE
+	help
+	  Support for DaVinci VPIF Driver.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called vpif.
+
+config VIDEO_VPSS_SYSTEM
+	tristate "VPSS System module driver"
+	depends on ARCH_DAVINCI
+	help
+	  Support for vpss system module for video driver
+
+config VIDEO_VPFE_CAPTURE
+	tristate "VPFE Video Capture Driver"
+	depends on VIDEO_V4L2 && (ARCH_DAVINCI || ARCH_OMAP3)
+	depends on I2C
+	select VIDEOBUF_DMA_CONTIG
+	help
+	  Support for DMx/AMx VPFE based frame grabber. This is the
+	  common V4L2 module for following DMx/AMx SoCs from Texas
+	  Instruments:- DM6446, DM365, DM355 & AM3517/05.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called vpfe-capture.
+
+config VIDEO_DM6446_CCDC
+	tristate "DM6446 CCDC HW module"
+	depends on VIDEO_VPFE_CAPTURE
+	select VIDEO_VPSS_SYSTEM
+	default y
+	help
+	   Enables DaVinci CCD hw module. DaVinci CCDC hw interfaces
+	   with decoder modules such as TVP5146 over BT656 or
+	   sensor module such as MT9T001 over a raw interface. This
+	   module configures the interface and CCDC/ISIF to do
+	   video frame capture from slave decoders.
+
+	   To compile this driver as a module, choose M here: the
+	   module will be called vpfe.
+
+config VIDEO_DM355_CCDC
+	tristate "DM355 CCDC HW module"
+	depends on ARCH_DAVINCI_DM355 && VIDEO_VPFE_CAPTURE
+	select VIDEO_VPSS_SYSTEM
+	default y
+	help
+	   Enables DM355 CCD hw module. DM355 CCDC hw interfaces
+	   with decoder modules such as TVP5146 over BT656 or
+	   sensor module such as MT9T001 over a raw interface. This
+	   module configures the interface and CCDC/ISIF to do
+	   video frame capture from a slave decoders
+
+	   To compile this driver as a module, choose M here: the
+	   module will be called vpfe.
+
+config VIDEO_ISIF
+	tristate "ISIF HW module"
+	depends on ARCH_DAVINCI_DM365 && VIDEO_VPFE_CAPTURE
+	select VIDEO_VPSS_SYSTEM
+	default y
+	help
+	   Enables ISIF hw module. This is the hardware module for
+	   configuring ISIF in VPFE to capture Raw Bayer RGB data  from
+	   a image sensor or YUV data from a YUV source.
+
+	   To compile this driver as a module, choose M here: the
+	   module will be called vpfe.
+
+config VIDEO_DM644X_VPBE
+	tristate "DM644X VPBE HW module"
+	depends on ARCH_DAVINCI_DM644x
+	select VIDEO_VPSS_SYSTEM
+	select VIDEOBUF_DMA_CONTIG
+	help
+	    Enables VPBE modules used for display on a DM644x
+	    SoC.
+
+	    To compile this driver as a module, choose M here: the
+	    module will be called vpbe.
+
+
+config VIDEO_VPBE_DISPLAY
+	tristate "VPBE V4L2 Display driver"
+	depends on ARCH_DAVINCI_DM644x
+	select VIDEO_DM644X_VPBE
+	help
+	    Enables VPBE V4L2 Display driver on a DM644x device
+
+	    To compile this driver as a module, choose M here: the
+	    module will be called vpbe_display.
diff --git a/drivers/media/platform/davinci/Makefile b/drivers/media/platform/davinci/Makefile
new file mode 100644
index 0000000..74ed92d
--- /dev/null
+++ b/drivers/media/platform/davinci/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the davinci video device drivers.
+#
+
+# VPIF
+obj-$(CONFIG_VIDEO_DAVINCI_VPIF) += vpif.o
+
+#VPIF Display driver
+obj-$(CONFIG_VIDEO_DAVINCI_VPIF_DISPLAY) += vpif_display.o
+#VPIF Capture driver
+obj-$(CONFIG_VIDEO_DAVINCI_VPIF_CAPTURE) += vpif_capture.o
+
+# Capture: DM6446 and DM355
+obj-$(CONFIG_VIDEO_VPSS_SYSTEM) += vpss.o
+obj-$(CONFIG_VIDEO_VPFE_CAPTURE) += vpfe_capture.o
+obj-$(CONFIG_VIDEO_DM6446_CCDC) += dm644x_ccdc.o
+obj-$(CONFIG_VIDEO_DM355_CCDC) += dm355_ccdc.o
+obj-$(CONFIG_VIDEO_ISIF) += isif.o
+obj-$(CONFIG_VIDEO_DM644X_VPBE) += vpbe.o vpbe_osd.o vpbe_venc.o
+obj-$(CONFIG_VIDEO_VPBE_DISPLAY) += vpbe_display.o
diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/davinci/ccdc_hw_device.h
new file mode 100644
index 0000000..86b9b35
--- /dev/null
+++ b/drivers/media/platform/davinci/ccdc_hw_device.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * ccdc device API
+ */
+#ifndef _CCDC_HW_DEVICE_H
+#define _CCDC_HW_DEVICE_H
+
+#ifdef __KERNEL__
+#include <linux/videodev2.h>
+#include <linux/device.h>
+#include <media/davinci/vpfe_types.h>
+#include <media/davinci/ccdc_types.h>
+
+/*
+ * ccdc hw operations
+ */
+struct ccdc_hw_ops {
+	/* Pointer to initialize function to initialize ccdc device */
+	int (*open) (struct device *dev);
+	/* Pointer to deinitialize function */
+	int (*close) (struct device *dev);
+	/* set ccdc base address */
+	void (*set_ccdc_base)(void *base, int size);
+	/* Pointer to function to enable or disable ccdc */
+	void (*enable) (int en);
+	/* reset sbl. only for 6446 */
+	void (*reset) (void);
+	/* enable output to sdram */
+	void (*enable_out_to_sdram) (int en);
+	/* Pointer to function to set hw parameters */
+	int (*set_hw_if_params) (struct vpfe_hw_if_param *param);
+	/* get interface parameters */
+	int (*get_hw_if_params) (struct vpfe_hw_if_param *param);
+	/*
+	 * Pointer to function to set parameters. Used
+	 * for implementing VPFE_S_CCDC_PARAMS
+	 */
+	int (*set_params) (void *params);
+	/*
+	 * Pointer to function to get parameter. Used
+	 * for implementing VPFE_G_CCDC_PARAMS
+	 */
+	int (*get_params) (void *params);
+	/* Pointer to function to configure ccdc */
+	int (*configure) (void);
+
+	/* Pointer to function to set buffer type */
+	int (*set_buftype) (enum ccdc_buftype buf_type);
+	/* Pointer to function to get buffer type */
+	enum ccdc_buftype (*get_buftype) (void);
+	/* Pointer to function to set frame format */
+	int (*set_frame_format) (enum ccdc_frmfmt frm_fmt);
+	/* Pointer to function to get frame format */
+	enum ccdc_frmfmt (*get_frame_format) (void);
+	/* enumerate hw pix formats */
+	int (*enum_pix)(u32 *hw_pix, int i);
+	/* Pointer to function to set buffer type */
+	u32 (*get_pixel_format) (void);
+	/* Pointer to function to get pixel format. */
+	int (*set_pixel_format) (u32 pixfmt);
+	/* Pointer to function to set image window */
+	int (*set_image_window) (struct v4l2_rect *win);
+	/* Pointer to function to set image window */
+	void (*get_image_window) (struct v4l2_rect *win);
+	/* Pointer to function to get line length */
+	unsigned int (*get_line_length) (void);
+
+	/* Query CCDC control IDs */
+	int (*queryctrl)(struct v4l2_queryctrl *qctrl);
+	/* Set CCDC control */
+	int (*set_control)(struct v4l2_control *ctrl);
+	/* Get CCDC control */
+	int (*get_control)(struct v4l2_control *ctrl);
+
+	/* Pointer to function to set frame buffer address */
+	void (*setfbaddr) (unsigned long addr);
+	/* Pointer to function to get field id */
+	int (*getfid) (void);
+};
+
+struct ccdc_hw_device {
+	/* ccdc device name */
+	char name[32];
+	/* module owner */
+	struct module *owner;
+	/* hw ops */
+	struct ccdc_hw_ops hw_ops;
+};
+
+/* Used by CCDC module to register & unregister with vpfe capture driver */
+int vpfe_register_ccdc_device(struct ccdc_hw_device *dev);
+void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev);
+
+#endif
+#endif
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
new file mode 100644
index 0000000..ce0e413
--- /dev/null
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -0,0 +1,1072 @@
+/*
+ * Copyright (C) 2005-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * CCDC hardware module for DM355
+ * ------------------------------
+ *
+ * This module is for configuring DM355 CCD controller of VPFE to capture
+ * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
+ * such as Defect Pixel Correction, Color Space Conversion etc to
+ * pre-process the Bayer RGB data, before writing it to SDRAM. This
+ * module also allows application to configure individual
+ * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL.
+ * To do so, application include dm355_ccdc.h and vpfe_capture.h header
+ * files. The setparams() API is called by vpfe_capture driver
+ * to configure module parameters
+ *
+ * TODO: 1) Raw bayer parameter settings and bayer capture
+ * 	 2) Split module parameter structure to module specific ioctl structs
+ *	 3) add support for lense shading correction
+ *	 4) investigate if enum used for user space type definition
+ * 	    to be replaced by #defines or integer
+ */
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/videodev2.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <media/davinci/dm355_ccdc.h>
+#include <media/davinci/vpss.h>
+
+#include "dm355_ccdc_regs.h"
+#include "ccdc_hw_device.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CCDC Driver for DM355");
+MODULE_AUTHOR("Texas Instruments");
+
+static struct ccdc_oper_config {
+	struct device *dev;
+	/* CCDC interface type */
+	enum vpfe_hw_if_type if_type;
+	/* Raw Bayer configuration */
+	struct ccdc_params_raw bayer;
+	/* YCbCr configuration */
+	struct ccdc_params_ycbcr ycbcr;
+	/* Master clock */
+	struct clk *mclk;
+	/* slave clock */
+	struct clk *sclk;
+	/* ccdc base address */
+	void __iomem *base_addr;
+} ccdc_cfg = {
+	/* Raw configurations */
+	.bayer = {
+		.pix_fmt = CCDC_PIXFMT_RAW,
+		.frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
+		.win = CCDC_WIN_VGA,
+		.fid_pol = VPFE_PINPOL_POSITIVE,
+		.vd_pol = VPFE_PINPOL_POSITIVE,
+		.hd_pol = VPFE_PINPOL_POSITIVE,
+		.gain = {
+			.r_ye = 256,
+			.gb_g = 256,
+			.gr_cy = 256,
+			.b_mg = 256
+		},
+		.config_params = {
+			.datasft = 2,
+			.mfilt1 = CCDC_NO_MEDIAN_FILTER1,
+			.mfilt2 = CCDC_NO_MEDIAN_FILTER2,
+			.alaw = {
+				.gama_wd = 2,
+			},
+			.blk_clamp = {
+				.sample_pixel = 1,
+				.dc_sub = 25
+			},
+			.col_pat_field0 = {
+				.olop = CCDC_GREEN_BLUE,
+				.olep = CCDC_BLUE,
+				.elop = CCDC_RED,
+				.elep = CCDC_GREEN_RED
+			},
+			.col_pat_field1 = {
+				.olop = CCDC_GREEN_BLUE,
+				.olep = CCDC_BLUE,
+				.elop = CCDC_RED,
+				.elep = CCDC_GREEN_RED
+			},
+		},
+	},
+	/* YCbCr configuration */
+	.ycbcr = {
+		.win = CCDC_WIN_PAL,
+		.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
+		.frm_fmt = CCDC_FRMFMT_INTERLACED,
+		.fid_pol = VPFE_PINPOL_POSITIVE,
+		.vd_pol = VPFE_PINPOL_POSITIVE,
+		.hd_pol = VPFE_PINPOL_POSITIVE,
+		.bt656_enable = 1,
+		.pix_order = CCDC_PIXORDER_CBYCRY,
+		.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED
+	},
+};
+
+
+/* Raw Bayer formats */
+static u32 ccdc_raw_bayer_pix_formats[] =
+		{V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
+
+/* Raw YUV formats */
+static u32 ccdc_raw_yuv_pix_formats[] =
+		{V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
+
+/* register access routines */
+static inline u32 regr(u32 offset)
+{
+	return __raw_readl(ccdc_cfg.base_addr + offset);
+}
+
+static inline void regw(u32 val, u32 offset)
+{
+	__raw_writel(val, ccdc_cfg.base_addr + offset);
+}
+
+static void ccdc_enable(int en)
+{
+	unsigned int temp;
+	temp = regr(SYNCEN);
+	temp &= (~CCDC_SYNCEN_VDHDEN_MASK);
+	temp |= (en & CCDC_SYNCEN_VDHDEN_MASK);
+	regw(temp, SYNCEN);
+}
+
+static void ccdc_enable_output_to_sdram(int en)
+{
+	unsigned int temp;
+	temp = regr(SYNCEN);
+	temp &= (~(CCDC_SYNCEN_WEN_MASK));
+	temp |= ((en << CCDC_SYNCEN_WEN_SHIFT) & CCDC_SYNCEN_WEN_MASK);
+	regw(temp, SYNCEN);
+}
+
+static void ccdc_config_gain_offset(void)
+{
+	/* configure gain */
+	regw(ccdc_cfg.bayer.gain.r_ye, RYEGAIN);
+	regw(ccdc_cfg.bayer.gain.gr_cy, GRCYGAIN);
+	regw(ccdc_cfg.bayer.gain.gb_g, GBGGAIN);
+	regw(ccdc_cfg.bayer.gain.b_mg, BMGGAIN);
+	/* configure offset */
+	regw(ccdc_cfg.bayer.ccdc_offset, OFFSET);
+}
+
+/*
+ * ccdc_restore_defaults()
+ * This function restore power on defaults in the ccdc registers
+ */
+static int ccdc_restore_defaults(void)
+{
+	int i;
+
+	dev_dbg(ccdc_cfg.dev, "\nstarting ccdc_restore_defaults...");
+	/* set all registers to zero */
+	for (i = 0; i <= CCDC_REG_LAST; i += 4)
+		regw(0, i);
+
+	/* now override the values with power on defaults in registers */
+	regw(MODESET_DEFAULT, MODESET);
+	/* no culling support */
+	regw(CULH_DEFAULT, CULH);
+	regw(CULV_DEFAULT, CULV);
+	/* Set default Gain and Offset */
+	ccdc_cfg.bayer.gain.r_ye = GAIN_DEFAULT;
+	ccdc_cfg.bayer.gain.gb_g = GAIN_DEFAULT;
+	ccdc_cfg.bayer.gain.gr_cy = GAIN_DEFAULT;
+	ccdc_cfg.bayer.gain.b_mg = GAIN_DEFAULT;
+	ccdc_config_gain_offset();
+	regw(OUTCLIP_DEFAULT, OUTCLIP);
+	regw(LSCCFG2_DEFAULT, LSCCFG2);
+	/* select ccdc input */
+	if (vpss_select_ccdc_source(VPSS_CCDCIN)) {
+		dev_dbg(ccdc_cfg.dev, "\ncouldn't select ccdc input source");
+		return -EFAULT;
+	}
+	/* select ccdc clock */
+	if (vpss_enable_clock(VPSS_CCDC_CLOCK, 1) < 0) {
+		dev_dbg(ccdc_cfg.dev, "\ncouldn't enable ccdc clock");
+		return -EFAULT;
+	}
+	dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_restore_defaults...");
+	return 0;
+}
+
+static int ccdc_open(struct device *device)
+{
+	return ccdc_restore_defaults();
+}
+
+static int ccdc_close(struct device *device)
+{
+	/* disable clock */
+	vpss_enable_clock(VPSS_CCDC_CLOCK, 0);
+	/* do nothing for now */
+	return 0;
+}
+/*
+ * ccdc_setwin()
+ * This function will configure the window size to
+ * be capture in CCDC reg.
+ */
+static void ccdc_setwin(struct v4l2_rect *image_win,
+			enum ccdc_frmfmt frm_fmt, int ppc)
+{
+	int horz_start, horz_nr_pixels;
+	int vert_start, vert_nr_lines;
+	int mid_img = 0;
+
+	dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_setwin...");
+
+	/*
+	 * ppc - per pixel count. indicates how many pixels per cell
+	 * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
+	 * raw capture this is 1
+	 */
+	horz_start = image_win->left << (ppc - 1);
+	horz_nr_pixels = ((image_win->width) << (ppc - 1)) - 1;
+
+	/* Writing the horizontal info into the registers */
+	regw(horz_start, SPH);
+	regw(horz_nr_pixels, NPH);
+	vert_start = image_win->top;
+
+	if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+		vert_nr_lines = (image_win->height >> 1) - 1;
+		vert_start >>= 1;
+		/* Since first line doesn't have any data */
+		vert_start += 1;
+		/* configure VDINT0 and VDINT1 */
+		regw(vert_start, VDINT0);
+	} else {
+		/* Since first line doesn't have any data */
+		vert_start += 1;
+		vert_nr_lines = image_win->height - 1;
+		/* configure VDINT0 and VDINT1 */
+		mid_img = vert_start + (image_win->height / 2);
+		regw(vert_start, VDINT0);
+		regw(mid_img, VDINT1);
+	}
+	regw(vert_start & CCDC_START_VER_ONE_MASK, SLV0);
+	regw(vert_start & CCDC_START_VER_TWO_MASK, SLV1);
+	regw(vert_nr_lines & CCDC_NUM_LINES_VER, NLV);
+	dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin...");
+}
+
+static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam)
+{
+	if (ccdcparam->datasft < CCDC_DATA_NO_SHIFT ||
+	    ccdcparam->datasft > CCDC_DATA_SHIFT_6BIT) {
+		dev_dbg(ccdc_cfg.dev, "Invalid value of data shift\n");
+		return -EINVAL;
+	}
+
+	if (ccdcparam->mfilt1 < CCDC_NO_MEDIAN_FILTER1 ||
+	    ccdcparam->mfilt1 > CCDC_MEDIAN_FILTER1) {
+		dev_dbg(ccdc_cfg.dev, "Invalid value of median filter1\n");
+		return -EINVAL;
+	}
+
+	if (ccdcparam->mfilt2 < CCDC_NO_MEDIAN_FILTER2 ||
+	    ccdcparam->mfilt2 > CCDC_MEDIAN_FILTER2) {
+		dev_dbg(ccdc_cfg.dev, "Invalid value of median filter2\n");
+		return -EINVAL;
+	}
+
+	if ((ccdcparam->med_filt_thres < 0) ||
+	   (ccdcparam->med_filt_thres > CCDC_MED_FILT_THRESH)) {
+		dev_dbg(ccdc_cfg.dev,
+			"Invalid value of median filter threshold\n");
+		return -EINVAL;
+	}
+
+	if (ccdcparam->data_sz < CCDC_DATA_16BITS ||
+	    ccdcparam->data_sz > CCDC_DATA_8BITS) {
+		dev_dbg(ccdc_cfg.dev, "Invalid value of data size\n");
+		return -EINVAL;
+	}
+
+	if (ccdcparam->alaw.enable) {
+		if (ccdcparam->alaw.gama_wd < CCDC_GAMMA_BITS_13_4 ||
+		    ccdcparam->alaw.gama_wd > CCDC_GAMMA_BITS_09_0) {
+			dev_dbg(ccdc_cfg.dev, "Invalid value of ALAW\n");
+			return -EINVAL;
+		}
+	}
+
+	if (ccdcparam->blk_clamp.b_clamp_enable) {
+		if (ccdcparam->blk_clamp.sample_pixel < CCDC_SAMPLE_1PIXELS ||
+		    ccdcparam->blk_clamp.sample_pixel > CCDC_SAMPLE_16PIXELS) {
+			dev_dbg(ccdc_cfg.dev,
+				"Invalid value of sample pixel\n");
+			return -EINVAL;
+		}
+		if (ccdcparam->blk_clamp.sample_ln < CCDC_SAMPLE_1LINES ||
+		    ccdcparam->blk_clamp.sample_ln > CCDC_SAMPLE_16LINES) {
+			dev_dbg(ccdc_cfg.dev,
+				"Invalid value of sample lines\n");
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+/* Parameter operations */
+static int ccdc_set_params(void __user *params)
+{
+	struct ccdc_config_params_raw ccdc_raw_params;
+	int x;
+
+	/* only raw module parameters can be set through the IOCTL */
+	if (ccdc_cfg.if_type != VPFE_RAW_BAYER)
+		return -EINVAL;
+
+	x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
+	if (x) {
+		dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying ccdc"
+			"params, %d\n", x);
+		return -EFAULT;
+	}
+
+	if (!validate_ccdc_param(&ccdc_raw_params)) {
+		memcpy(&ccdc_cfg.bayer.config_params,
+			&ccdc_raw_params,
+			sizeof(ccdc_raw_params));
+		return 0;
+	}
+	return -EINVAL;
+}
+
+/* This function will configure CCDC for YCbCr video capture */
+static void ccdc_config_ycbcr(void)
+{
+	struct ccdc_params_ycbcr *params = &ccdc_cfg.ycbcr;
+	u32 temp;
+
+	/* first set the CCDC power on defaults values in all registers */
+	dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_ycbcr...");
+	ccdc_restore_defaults();
+
+	/* configure pixel format & video frame format */
+	temp = (((params->pix_fmt & CCDC_INPUT_MODE_MASK) <<
+		CCDC_INPUT_MODE_SHIFT) |
+		((params->frm_fmt & CCDC_FRM_FMT_MASK) <<
+		CCDC_FRM_FMT_SHIFT));
+
+	/* setup BT.656 sync mode */
+	if (params->bt656_enable) {
+		regw(CCDC_REC656IF_BT656_EN, REC656IF);
+		/*
+		 * configure the FID, VD, HD pin polarity fld,hd pol positive,
+		 * vd negative, 8-bit pack mode
+		 */
+		temp |= CCDC_VD_POL_NEGATIVE;
+	} else {		/* y/c external sync mode */
+		temp |= (((params->fid_pol & CCDC_FID_POL_MASK) <<
+			CCDC_FID_POL_SHIFT) |
+			((params->hd_pol & CCDC_HD_POL_MASK) <<
+			CCDC_HD_POL_SHIFT) |
+			((params->vd_pol & CCDC_VD_POL_MASK) <<
+			CCDC_VD_POL_SHIFT));
+	}
+
+	/* pack the data to 8-bit */
+	temp |= CCDC_DATA_PACK_ENABLE;
+
+	regw(temp, MODESET);
+
+	/* configure video window */
+	ccdc_setwin(&params->win, params->frm_fmt, 2);
+
+	/* configure the order of y cb cr in SD-RAM */
+	temp = (params->pix_order << CCDC_Y8POS_SHIFT);
+	temp |= CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_FIDMD_NO_LATCH_VSYNC;
+	regw(temp, CCDCFG);
+
+	/*
+	 * configure the horizontal line offset. This is done by rounding up
+	 * width to a multiple of 16 pixels and multiply by two to account for
+	 * y:cb:cr 4:2:2 data
+	 */
+	regw(((params->win.width * 2 + 31) >> 5), HSIZE);
+
+	/* configure the memory line offset */
+	if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) {
+		/* two fields are interleaved in memory */
+		regw(CCDC_SDOFST_FIELD_INTERLEAVED, SDOFST);
+	}
+
+	dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n");
+}
+
+/*
+ * ccdc_config_black_clamp()
+ * configure parameters for Optical Black Clamp
+ */
+static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp)
+{
+	u32 val;
+
+	if (!bclamp->b_clamp_enable) {
+		/* configure DCSub */
+		regw(bclamp->dc_sub & CCDC_BLK_DC_SUB_MASK, DCSUB);
+		regw(0x0000, CLAMP);
+		return;
+	}
+	/* Enable the Black clamping, set sample lines and pixels */
+	val = (bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) |
+	      ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) <<
+		CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE;
+	regw(val, CLAMP);
+
+	/* If Black clamping is enable then make dcsub 0 */
+	val = (bclamp->sample_ln & CCDC_NUM_LINE_CALC_MASK)
+			<< CCDC_NUM_LINE_CALC_SHIFT;
+	regw(val, DCSUB);
+}
+
+/*
+ * ccdc_config_black_compense()
+ * configure parameters for Black Compensation
+ */
+static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp)
+{
+	u32 val;
+
+	val = (bcomp->b & CCDC_BLK_COMP_MASK) |
+		((bcomp->gb & CCDC_BLK_COMP_MASK) <<
+		CCDC_BLK_COMP_GB_COMP_SHIFT);
+	regw(val, BLKCMP1);
+
+	val = ((bcomp->gr & CCDC_BLK_COMP_MASK) <<
+		CCDC_BLK_COMP_GR_COMP_SHIFT) |
+		((bcomp->r & CCDC_BLK_COMP_MASK) <<
+		CCDC_BLK_COMP_R_COMP_SHIFT);
+	regw(val, BLKCMP0);
+}
+
+/*
+ * ccdc_write_dfc_entry()
+ * write an entry in the dfc table.
+ */
+int ccdc_write_dfc_entry(int index, struct ccdc_vertical_dft *dfc)
+{
+/* TODO This is to be re-visited and adjusted */
+#define DFC_WRITE_WAIT_COUNT	1000
+	u32 val, count = DFC_WRITE_WAIT_COUNT;
+
+	regw(dfc->dft_corr_vert[index], DFCMEM0);
+	regw(dfc->dft_corr_horz[index], DFCMEM1);
+	regw(dfc->dft_corr_sub1[index], DFCMEM2);
+	regw(dfc->dft_corr_sub2[index], DFCMEM3);
+	regw(dfc->dft_corr_sub3[index], DFCMEM4);
+	/* set WR bit to write */
+	val = regr(DFCMEMCTL) | CCDC_DFCMEMCTL_DFCMWR_MASK;
+	regw(val, DFCMEMCTL);
+
+	/*
+	 * Assume, it is very short. If we get an error, we need to
+	 * adjust this value
+	 */
+	while (regr(DFCMEMCTL) & CCDC_DFCMEMCTL_DFCMWR_MASK)
+		count--;
+	/*
+	 * TODO We expect the count to be non-zero to be successful. Adjust
+	 * the count if write requires more time
+	 */
+
+	if (count) {
+		dev_err(ccdc_cfg.dev, "defect table write timeout !!!\n");
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * ccdc_config_vdfc()
+ * configure parameters for Vertical Defect Correction
+ */
+static int ccdc_config_vdfc(struct ccdc_vertical_dft *dfc)
+{
+	u32 val;
+	int i;
+
+	/* Configure General Defect Correction. The table used is from IPIPE */
+	val = dfc->gen_dft_en & CCDC_DFCCTL_GDFCEN_MASK;
+
+	/* Configure Vertical Defect Correction if needed */
+	if (!dfc->ver_dft_en) {
+		/* Enable only General Defect Correction */
+		regw(val, DFCCTL);
+		return 0;
+	}
+
+	if (dfc->table_size > CCDC_DFT_TABLE_SIZE)
+		return -EINVAL;
+
+	val |= CCDC_DFCCTL_VDFC_DISABLE;
+	val |= (dfc->dft_corr_ctl.vdfcsl & CCDC_DFCCTL_VDFCSL_MASK) <<
+		CCDC_DFCCTL_VDFCSL_SHIFT;
+	val |= (dfc->dft_corr_ctl.vdfcuda & CCDC_DFCCTL_VDFCUDA_MASK) <<
+		CCDC_DFCCTL_VDFCUDA_SHIFT;
+	val |= (dfc->dft_corr_ctl.vdflsft & CCDC_DFCCTL_VDFLSFT_MASK) <<
+		CCDC_DFCCTL_VDFLSFT_SHIFT;
+	regw(val , DFCCTL);
+
+	/* clear address ptr to offset 0 */
+	val = CCDC_DFCMEMCTL_DFCMARST_MASK << CCDC_DFCMEMCTL_DFCMARST_SHIFT;
+
+	/* write defect table entries */
+	for (i = 0; i < dfc->table_size; i++) {
+		/* increment address for non zero index */
+		if (i != 0)
+			val = CCDC_DFCMEMCTL_INC_ADDR;
+		regw(val, DFCMEMCTL);
+		if (ccdc_write_dfc_entry(i, dfc) < 0)
+			return -EFAULT;
+	}
+
+	/* update saturation level and enable dfc */
+	regw(dfc->saturation_ctl & CCDC_VDC_DFCVSAT_MASK, DFCVSAT);
+	val = regr(DFCCTL) | (CCDC_DFCCTL_VDFCEN_MASK <<
+			CCDC_DFCCTL_VDFCEN_SHIFT);
+	regw(val, DFCCTL);
+	return 0;
+}
+
+/*
+ * ccdc_config_csc()
+ * configure parameters for color space conversion
+ * Each register CSCM0-7 has two values in S8Q5 format.
+ */
+static void ccdc_config_csc(struct ccdc_csc *csc)
+{
+	u32 val1, val2;
+	int i;
+
+	if (!csc->enable)
+		return;
+
+	/* Enable the CSC sub-module */
+	regw(CCDC_CSC_ENABLE, CSCCTL);
+
+	/* Converting the co-eff as per the format of the register */
+	for (i = 0; i < CCDC_CSC_COEFF_TABLE_SIZE; i++) {
+		if ((i % 2) == 0) {
+			/* CSCM - LSB */
+			val1 = (csc->coeff[i].integer &
+				CCDC_CSC_COEF_INTEG_MASK)
+				<< CCDC_CSC_COEF_INTEG_SHIFT;
+			/*
+			 * convert decimal part to binary. Use 2 decimal
+			 * precision, user values range from .00 - 0.99
+			 */
+			val1 |= (((csc->coeff[i].decimal &
+				CCDC_CSC_COEF_DECIMAL_MASK) *
+				CCDC_CSC_DEC_MAX) / 100);
+		} else {
+
+			/* CSCM - MSB */
+			val2 = (csc->coeff[i].integer &
+				CCDC_CSC_COEF_INTEG_MASK)
+				<< CCDC_CSC_COEF_INTEG_SHIFT;
+			val2 |= (((csc->coeff[i].decimal &
+				 CCDC_CSC_COEF_DECIMAL_MASK) *
+				 CCDC_CSC_DEC_MAX) / 100);
+			val2 <<= CCDC_CSCM_MSB_SHIFT;
+			val2 |= val1;
+			regw(val2, (CSCM0 + ((i - 1) << 1)));
+		}
+	}
+}
+
+/*
+ * ccdc_config_color_patterns()
+ * configure parameters for color patterns
+ */
+static void ccdc_config_color_patterns(struct ccdc_col_pat *pat0,
+				       struct ccdc_col_pat *pat1)
+{
+	u32 val;
+
+	val = (pat0->olop | (pat0->olep << 2) | (pat0->elop << 4) |
+		(pat0->elep << 6) | (pat1->olop << 8) | (pat1->olep << 10) |
+		(pat1->elop << 12) | (pat1->elep << 14));
+	regw(val, COLPTN);
+}
+
+/* This function will configure CCDC for Raw mode image capture */
+static int ccdc_config_raw(void)
+{
+	struct ccdc_params_raw *params = &ccdc_cfg.bayer;
+	struct ccdc_config_params_raw *config_params =
+					&ccdc_cfg.bayer.config_params;
+	unsigned int val;
+
+	dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_raw...");
+
+	/* restore power on defaults to register */
+	ccdc_restore_defaults();
+
+	/* CCDCFG register:
+	 * set CCD Not to swap input since input is RAW data
+	 * set FID detection function to Latch at V-Sync
+	 * set WENLOG - ccdc valid area to AND
+	 * set TRGSEL to WENBIT
+	 * set EXTRG to DISABLE
+	 * disable latching function on VSYNC - shadowed registers
+	 */
+	regw(CCDC_YCINSWP_RAW | CCDC_CCDCFG_FIDMD_LATCH_VSYNC |
+	     CCDC_CCDCFG_WENLOG_AND | CCDC_CCDCFG_TRGSEL_WEN |
+	     CCDC_CCDCFG_EXTRG_DISABLE | CCDC_LATCH_ON_VSYNC_DISABLE, CCDCFG);
+
+	/*
+	 * Set VDHD direction to input,  input type to raw input
+	 * normal data polarity, do not use external WEN
+	 */
+	val = (CCDC_VDHDOUT_INPUT | CCDC_RAW_IP_MODE | CCDC_DATAPOL_NORMAL |
+		CCDC_EXWEN_DISABLE);
+
+	/*
+	 * Configure the vertical sync polarity (MODESET.VDPOL), horizontal
+	 * sync polarity (MODESET.HDPOL), field id polarity (MODESET.FLDPOL),
+	 * frame format(progressive or interlace), & pixel format (Input mode)
+	 */
+	val |= (((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) |
+		((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) |
+		((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) |
+		((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) |
+		((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT));
+
+	/* set pack for alaw compression */
+	if ((config_params->data_sz == CCDC_DATA_8BITS) ||
+	     config_params->alaw.enable)
+		val |= CCDC_DATA_PACK_ENABLE;
+
+	/* Configure for LPF */
+	if (config_params->lpf_enable)
+		val |= (config_params->lpf_enable & CCDC_LPF_MASK) <<
+			CCDC_LPF_SHIFT;
+
+	/* Configure the data shift */
+	val |= (config_params->datasft & CCDC_DATASFT_MASK) <<
+		CCDC_DATASFT_SHIFT;
+	regw(val , MODESET);
+	dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to MODESET...\n", val);
+
+	/* Configure the Median Filter threshold */
+	regw((config_params->med_filt_thres) & CCDC_MED_FILT_THRESH, MEDFILT);
+
+	/* Configure GAMMAWD register. defaur 11-2, and Mosaic cfa pattern */
+	val = CCDC_GAMMA_BITS_11_2 << CCDC_GAMMAWD_INPUT_SHIFT |
+		CCDC_CFA_MOSAIC;
+
+	/* Enable and configure aLaw register if needed */
+	if (config_params->alaw.enable) {
+		val |= (CCDC_ALAW_ENABLE |
+			((config_params->alaw.gama_wd &
+			CCDC_ALAW_GAMA_WD_MASK) <<
+			CCDC_GAMMAWD_INPUT_SHIFT));
+	}
+
+	/* Configure Median filter1 & filter2 */
+	val |= ((config_params->mfilt1 << CCDC_MFILT1_SHIFT) |
+		(config_params->mfilt2 << CCDC_MFILT2_SHIFT));
+
+	regw(val, GAMMAWD);
+	dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to GAMMAWD...\n", val);
+
+	/* configure video window */
+	ccdc_setwin(&params->win, params->frm_fmt, 1);
+
+	/* Optical Clamp Averaging */
+	ccdc_config_black_clamp(&config_params->blk_clamp);
+
+	/* Black level compensation */
+	ccdc_config_black_compense(&config_params->blk_comp);
+
+	/* Vertical Defect Correction if needed */
+	if (ccdc_config_vdfc(&config_params->vertical_dft) < 0)
+		return -EFAULT;
+
+	/* color space conversion */
+	ccdc_config_csc(&config_params->csc);
+
+	/* color pattern */
+	ccdc_config_color_patterns(&config_params->col_pat_field0,
+				   &config_params->col_pat_field1);
+
+	/* Configure the Gain  & offset control */
+	ccdc_config_gain_offset();
+
+	dev_dbg(ccdc_cfg.dev, "\nWriting %x to COLPTN...\n", val);
+
+	/* Configure DATAOFST  register */
+	val = (config_params->data_offset.horz_offset & CCDC_DATAOFST_MASK) <<
+		CCDC_DATAOFST_H_SHIFT;
+	val |= (config_params->data_offset.vert_offset & CCDC_DATAOFST_MASK) <<
+		CCDC_DATAOFST_V_SHIFT;
+	regw(val, DATAOFST);
+
+	/* configuring HSIZE register */
+	val = (params->horz_flip_enable & CCDC_HSIZE_FLIP_MASK) <<
+		CCDC_HSIZE_FLIP_SHIFT;
+
+	/* If pack 8 is enable then 1 pixel will take 1 byte */
+	if ((config_params->data_sz == CCDC_DATA_8BITS) ||
+	     config_params->alaw.enable) {
+		val |= (((params->win.width) + 31) >> 5) &
+			CCDC_HSIZE_VAL_MASK;
+
+		/* adjust to multiple of 32 */
+		dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to HSIZE...\n",
+		       (((params->win.width) + 31) >> 5) &
+			CCDC_HSIZE_VAL_MASK);
+	} else {
+		/* else one pixel will take 2 byte */
+		val |= (((params->win.width * 2) + 31) >> 5) &
+			CCDC_HSIZE_VAL_MASK;
+
+		dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to HSIZE...\n",
+		       (((params->win.width * 2) + 31) >> 5) &
+			CCDC_HSIZE_VAL_MASK);
+	}
+	regw(val, HSIZE);
+
+	/* Configure SDOFST register */
+	if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
+		if (params->image_invert_enable) {
+			/* For interlace inverse mode */
+			regw(CCDC_SDOFST_INTERLACE_INVERSE, SDOFST);
+			dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
+				CCDC_SDOFST_INTERLACE_INVERSE);
+		} else {
+			/* For interlace non inverse mode */
+			regw(CCDC_SDOFST_INTERLACE_NORMAL, SDOFST);
+			dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
+				CCDC_SDOFST_INTERLACE_NORMAL);
+		}
+	} else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+		if (params->image_invert_enable) {
+			/* For progessive inverse mode */
+			regw(CCDC_SDOFST_PROGRESSIVE_INVERSE, SDOFST);
+			dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
+				CCDC_SDOFST_PROGRESSIVE_INVERSE);
+		} else {
+			/* For progessive non inverse mode */
+			regw(CCDC_SDOFST_PROGRESSIVE_NORMAL, SDOFST);
+			dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
+				CCDC_SDOFST_PROGRESSIVE_NORMAL);
+		}
+	}
+	dev_dbg(ccdc_cfg.dev, "\nend of ccdc_config_raw...");
+	return 0;
+}
+
+static int ccdc_configure(void)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		return ccdc_config_raw();
+	else
+		ccdc_config_ycbcr();
+	return 0;
+}
+
+static int ccdc_set_buftype(enum ccdc_buftype buf_type)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		ccdc_cfg.bayer.buf_type = buf_type;
+	else
+		ccdc_cfg.ycbcr.buf_type = buf_type;
+	return 0;
+}
+static enum ccdc_buftype ccdc_get_buftype(void)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		return ccdc_cfg.bayer.buf_type;
+	return ccdc_cfg.ycbcr.buf_type;
+}
+
+static int ccdc_enum_pix(u32 *pix, int i)
+{
+	int ret = -EINVAL;
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+		if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) {
+			*pix = ccdc_raw_bayer_pix_formats[i];
+			ret = 0;
+		}
+	} else {
+		if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) {
+			*pix = ccdc_raw_yuv_pix_formats[i];
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static int ccdc_set_pixel_format(u32 pixfmt)
+{
+	struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
+
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+		ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+		if (pixfmt == V4L2_PIX_FMT_SBGGR8)
+			alaw->enable = 1;
+		else if (pixfmt != V4L2_PIX_FMT_SBGGR16)
+			return -EINVAL;
+	} else {
+		if (pixfmt == V4L2_PIX_FMT_YUYV)
+			ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
+		else if (pixfmt == V4L2_PIX_FMT_UYVY)
+			ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+		else
+			return -EINVAL;
+	}
+	return 0;
+}
+static u32 ccdc_get_pixel_format(void)
+{
+	struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
+	u32 pixfmt;
+
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		if (alaw->enable)
+			pixfmt = V4L2_PIX_FMT_SBGGR8;
+		else
+			pixfmt = V4L2_PIX_FMT_SBGGR16;
+	else {
+		if (ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
+			pixfmt = V4L2_PIX_FMT_YUYV;
+		else
+			pixfmt = V4L2_PIX_FMT_UYVY;
+	}
+	return pixfmt;
+}
+static int ccdc_set_image_window(struct v4l2_rect *win)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		ccdc_cfg.bayer.win = *win;
+	else
+		ccdc_cfg.ycbcr.win = *win;
+	return 0;
+}
+
+static void ccdc_get_image_window(struct v4l2_rect *win)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		*win = ccdc_cfg.bayer.win;
+	else
+		*win = ccdc_cfg.ycbcr.win;
+}
+
+static unsigned int ccdc_get_line_length(void)
+{
+	struct ccdc_config_params_raw *config_params =
+				&ccdc_cfg.bayer.config_params;
+	unsigned int len;
+
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+		if ((config_params->alaw.enable) ||
+		    (config_params->data_sz == CCDC_DATA_8BITS))
+			len = ccdc_cfg.bayer.win.width;
+		else
+			len = ccdc_cfg.bayer.win.width * 2;
+	} else
+		len = ccdc_cfg.ycbcr.win.width * 2;
+	return ALIGN(len, 32);
+}
+
+static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		ccdc_cfg.bayer.frm_fmt = frm_fmt;
+	else
+		ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
+	return 0;
+}
+
+static enum ccdc_frmfmt ccdc_get_frame_format(void)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		return ccdc_cfg.bayer.frm_fmt;
+	else
+		return ccdc_cfg.ycbcr.frm_fmt;
+}
+
+static int ccdc_getfid(void)
+{
+	return  (regr(MODESET) >> 15) & 1;
+}
+
+/* misc operations */
+static inline void ccdc_setfbaddr(unsigned long addr)
+{
+	regw((addr >> 21) & 0x007f, STADRH);
+	regw((addr >> 5) & 0x0ffff, STADRL);
+}
+
+static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
+{
+	ccdc_cfg.if_type = params->if_type;
+
+	switch (params->if_type) {
+	case VPFE_BT656:
+	case VPFE_YCBCR_SYNC_16:
+	case VPFE_YCBCR_SYNC_8:
+		ccdc_cfg.ycbcr.vd_pol = params->vdpol;
+		ccdc_cfg.ycbcr.hd_pol = params->hdpol;
+		break;
+	default:
+		/* TODO add support for raw bayer here */
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct ccdc_hw_device ccdc_hw_dev = {
+	.name = "DM355 CCDC",
+	.owner = THIS_MODULE,
+	.hw_ops = {
+		.open = ccdc_open,
+		.close = ccdc_close,
+		.enable = ccdc_enable,
+		.enable_out_to_sdram = ccdc_enable_output_to_sdram,
+		.set_hw_if_params = ccdc_set_hw_if_params,
+		.set_params = ccdc_set_params,
+		.configure = ccdc_configure,
+		.set_buftype = ccdc_set_buftype,
+		.get_buftype = ccdc_get_buftype,
+		.enum_pix = ccdc_enum_pix,
+		.set_pixel_format = ccdc_set_pixel_format,
+		.get_pixel_format = ccdc_get_pixel_format,
+		.set_frame_format = ccdc_set_frame_format,
+		.get_frame_format = ccdc_get_frame_format,
+		.set_image_window = ccdc_set_image_window,
+		.get_image_window = ccdc_get_image_window,
+		.get_line_length = ccdc_get_line_length,
+		.setfbaddr = ccdc_setfbaddr,
+		.getfid = ccdc_getfid,
+	},
+};
+
+static int __devinit dm355_ccdc_probe(struct platform_device *pdev)
+{
+	void (*setup_pinmux)(void);
+	struct resource	*res;
+	int status = 0;
+
+	/*
+	 * first try to register with vpfe. If not correct platform, then we
+	 * don't have to iomap
+	 */
+	status = vpfe_register_ccdc_device(&ccdc_hw_dev);
+	if (status < 0)
+		return status;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		status = -ENODEV;
+		goto fail_nores;
+	}
+
+	res = request_mem_region(res->start, resource_size(res), res->name);
+	if (!res) {
+		status = -EBUSY;
+		goto fail_nores;
+	}
+
+	ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res));
+	if (!ccdc_cfg.base_addr) {
+		status = -ENOMEM;
+		goto fail_nomem;
+	}
+
+	/* Get and enable Master clock */
+	ccdc_cfg.mclk = clk_get(&pdev->dev, "master");
+	if (IS_ERR(ccdc_cfg.mclk)) {
+		status = PTR_ERR(ccdc_cfg.mclk);
+		goto fail_nomap;
+	}
+	if (clk_enable(ccdc_cfg.mclk)) {
+		status = -ENODEV;
+		goto fail_mclk;
+	}
+
+	/* Get and enable Slave clock */
+	ccdc_cfg.sclk = clk_get(&pdev->dev, "slave");
+	if (IS_ERR(ccdc_cfg.sclk)) {
+		status = PTR_ERR(ccdc_cfg.sclk);
+		goto fail_mclk;
+	}
+	if (clk_enable(ccdc_cfg.sclk)) {
+		status = -ENODEV;
+		goto fail_sclk;
+	}
+
+	/* Platform data holds setup_pinmux function ptr */
+	if (NULL == pdev->dev.platform_data) {
+		status = -ENODEV;
+		goto fail_sclk;
+	}
+	setup_pinmux = pdev->dev.platform_data;
+	/*
+	 * setup Mux configuration for ccdc which may be different for
+	 * different SoCs using this CCDC
+	 */
+	setup_pinmux();
+	ccdc_cfg.dev = &pdev->dev;
+	printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
+	return 0;
+fail_sclk:
+	clk_put(ccdc_cfg.sclk);
+fail_mclk:
+	clk_put(ccdc_cfg.mclk);
+fail_nomap:
+	iounmap(ccdc_cfg.base_addr);
+fail_nomem:
+	release_mem_region(res->start, resource_size(res));
+fail_nores:
+	vpfe_unregister_ccdc_device(&ccdc_hw_dev);
+	return status;
+}
+
+static int dm355_ccdc_remove(struct platform_device *pdev)
+{
+	struct resource	*res;
+
+	clk_put(ccdc_cfg.mclk);
+	clk_put(ccdc_cfg.sclk);
+	iounmap(ccdc_cfg.base_addr);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res)
+		release_mem_region(res->start, resource_size(res));
+	vpfe_unregister_ccdc_device(&ccdc_hw_dev);
+	return 0;
+}
+
+static struct platform_driver dm355_ccdc_driver = {
+	.driver = {
+		.name	= "dm355_ccdc",
+		.owner = THIS_MODULE,
+	},
+	.remove = __devexit_p(dm355_ccdc_remove),
+	.probe = dm355_ccdc_probe,
+};
+
+module_platform_driver(dm355_ccdc_driver);
diff --git a/drivers/media/platform/davinci/dm355_ccdc_regs.h b/drivers/media/platform/davinci/dm355_ccdc_regs.h
new file mode 100644
index 0000000..d6d2ef0
--- /dev/null
+++ b/drivers/media/platform/davinci/dm355_ccdc_regs.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2005-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef _DM355_CCDC_REGS_H
+#define _DM355_CCDC_REGS_H
+
+/**************************************************************************\
+* Register OFFSET Definitions
+\**************************************************************************/
+#define SYNCEN				0x00
+#define MODESET				0x04
+#define HDWIDTH				0x08
+#define VDWIDTH				0x0c
+#define PPLN				0x10
+#define LPFR				0x14
+#define SPH				0x18
+#define NPH				0x1c
+#define SLV0				0x20
+#define SLV1				0x24
+#define NLV				0x28
+#define CULH				0x2c
+#define CULV				0x30
+#define HSIZE				0x34
+#define SDOFST				0x38
+#define STADRH				0x3c
+#define STADRL				0x40
+#define CLAMP				0x44
+#define DCSUB				0x48
+#define COLPTN				0x4c
+#define BLKCMP0				0x50
+#define BLKCMP1				0x54
+#define MEDFILT				0x58
+#define RYEGAIN				0x5c
+#define GRCYGAIN			0x60
+#define GBGGAIN				0x64
+#define BMGGAIN				0x68
+#define OFFSET				0x6c
+#define OUTCLIP				0x70
+#define VDINT0				0x74
+#define VDINT1				0x78
+#define RSV0				0x7c
+#define GAMMAWD				0x80
+#define REC656IF			0x84
+#define CCDCFG				0x88
+#define FMTCFG				0x8c
+#define FMTPLEN				0x90
+#define FMTSPH				0x94
+#define FMTLNH				0x98
+#define FMTSLV				0x9c
+#define FMTLNV				0xa0
+#define FMTRLEN				0xa4
+#define FMTHCNT				0xa8
+#define FMT_ADDR_PTR_B			0xac
+#define FMT_ADDR_PTR(i)			(FMT_ADDR_PTR_B + (i * 4))
+#define FMTPGM_VF0			0xcc
+#define FMTPGM_VF1			0xd0
+#define FMTPGM_AP0			0xd4
+#define FMTPGM_AP1			0xd8
+#define FMTPGM_AP2			0xdc
+#define FMTPGM_AP3                      0xe0
+#define FMTPGM_AP4                      0xe4
+#define FMTPGM_AP5                      0xe8
+#define FMTPGM_AP6                      0xec
+#define FMTPGM_AP7                      0xf0
+#define LSCCFG1                         0xf4
+#define LSCCFG2                         0xf8
+#define LSCH0                           0xfc
+#define LSCV0                           0x100
+#define LSCKH                           0x104
+#define LSCKV                           0x108
+#define LSCMEMCTL                       0x10c
+#define LSCMEMD                         0x110
+#define LSCMEMQ                         0x114
+#define DFCCTL                          0x118
+#define DFCVSAT                         0x11c
+#define DFCMEMCTL                       0x120
+#define DFCMEM0                         0x124
+#define DFCMEM1                         0x128
+#define DFCMEM2                         0x12c
+#define DFCMEM3                         0x130
+#define DFCMEM4                         0x134
+#define CSCCTL                          0x138
+#define CSCM0                           0x13c
+#define CSCM1                           0x140
+#define CSCM2                           0x144
+#define CSCM3                           0x148
+#define CSCM4                           0x14c
+#define CSCM5                           0x150
+#define CSCM6                           0x154
+#define CSCM7                           0x158
+#define DATAOFST			0x15c
+#define CCDC_REG_LAST			DATAOFST
+/**************************************************************
+*	Define for various register bit mask and shifts for CCDC
+*
+**************************************************************/
+#define CCDC_RAW_IP_MODE			0
+#define CCDC_VDHDOUT_INPUT			0
+#define CCDC_YCINSWP_RAW			(0 << 4)
+#define CCDC_EXWEN_DISABLE 			0
+#define CCDC_DATAPOL_NORMAL			0
+#define CCDC_CCDCFG_FIDMD_LATCH_VSYNC		0
+#define CCDC_CCDCFG_FIDMD_NO_LATCH_VSYNC	(1 << 6)
+#define CCDC_CCDCFG_WENLOG_AND			0
+#define CCDC_CCDCFG_TRGSEL_WEN			0
+#define CCDC_CCDCFG_EXTRG_DISABLE		0
+#define CCDC_CFA_MOSAIC				0
+#define CCDC_Y8POS_SHIFT			11
+
+#define CCDC_VDC_DFCVSAT_MASK			0x3fff
+#define CCDC_DATAOFST_MASK			0x0ff
+#define CCDC_DATAOFST_H_SHIFT			0
+#define CCDC_DATAOFST_V_SHIFT			8
+#define CCDC_GAMMAWD_CFA_MASK			1
+#define CCDC_GAMMAWD_CFA_SHIFT			5
+#define CCDC_GAMMAWD_INPUT_SHIFT		2
+#define CCDC_FID_POL_MASK			1
+#define CCDC_FID_POL_SHIFT			4
+#define CCDC_HD_POL_MASK			1
+#define CCDC_HD_POL_SHIFT			3
+#define CCDC_VD_POL_MASK			1
+#define CCDC_VD_POL_SHIFT			2
+#define CCDC_VD_POL_NEGATIVE			(1 << 2)
+#define CCDC_FRM_FMT_MASK			1
+#define CCDC_FRM_FMT_SHIFT			7
+#define CCDC_DATA_SZ_MASK			7
+#define CCDC_DATA_SZ_SHIFT			8
+#define CCDC_VDHDOUT_MASK			1
+#define CCDC_VDHDOUT_SHIFT			0
+#define CCDC_EXWEN_MASK				1
+#define CCDC_EXWEN_SHIFT			5
+#define CCDC_INPUT_MODE_MASK			3
+#define CCDC_INPUT_MODE_SHIFT			12
+#define CCDC_PIX_FMT_MASK			3
+#define CCDC_PIX_FMT_SHIFT			12
+#define CCDC_DATAPOL_MASK			1
+#define CCDC_DATAPOL_SHIFT			6
+#define CCDC_WEN_ENABLE				(1 << 1)
+#define CCDC_VDHDEN_ENABLE			(1 << 16)
+#define CCDC_LPF_ENABLE				(1 << 14)
+#define CCDC_ALAW_ENABLE			1
+#define CCDC_ALAW_GAMA_WD_MASK			7
+#define CCDC_REC656IF_BT656_EN			3
+
+#define CCDC_FMTCFG_FMTMODE_MASK 		3
+#define CCDC_FMTCFG_FMTMODE_SHIFT		1
+#define CCDC_FMTCFG_LNUM_MASK			3
+#define CCDC_FMTCFG_LNUM_SHIFT			4
+#define CCDC_FMTCFG_ADDRINC_MASK		7
+#define CCDC_FMTCFG_ADDRINC_SHIFT		8
+
+#define CCDC_CCDCFG_FIDMD_SHIFT			6
+#define	CCDC_CCDCFG_WENLOG_SHIFT		8
+#define CCDC_CCDCFG_TRGSEL_SHIFT		9
+#define CCDC_CCDCFG_EXTRG_SHIFT			10
+#define CCDC_CCDCFG_MSBINVI_SHIFT		13
+
+#define CCDC_HSIZE_FLIP_SHIFT			12
+#define CCDC_HSIZE_FLIP_MASK			1
+#define CCDC_HSIZE_VAL_MASK			0xFFF
+#define CCDC_SDOFST_FIELD_INTERLEAVED		0x249
+#define CCDC_SDOFST_INTERLACE_INVERSE		0x4B6D
+#define CCDC_SDOFST_INTERLACE_NORMAL		0x0B6D
+#define CCDC_SDOFST_PROGRESSIVE_INVERSE		0x4000
+#define CCDC_SDOFST_PROGRESSIVE_NORMAL		0
+#define CCDC_START_PX_HOR_MASK			0x7FFF
+#define CCDC_NUM_PX_HOR_MASK			0x7FFF
+#define CCDC_START_VER_ONE_MASK			0x7FFF
+#define CCDC_START_VER_TWO_MASK			0x7FFF
+#define CCDC_NUM_LINES_VER			0x7FFF
+
+#define CCDC_BLK_CLAMP_ENABLE			(1 << 15)
+#define CCDC_BLK_SGAIN_MASK			0x1F
+#define CCDC_BLK_ST_PXL_MASK			0x1FFF
+#define CCDC_BLK_SAMPLE_LN_MASK			3
+#define CCDC_BLK_SAMPLE_LN_SHIFT		13
+
+#define CCDC_NUM_LINE_CALC_MASK			3
+#define CCDC_NUM_LINE_CALC_SHIFT		14
+
+#define CCDC_BLK_DC_SUB_MASK			0x3FFF
+#define CCDC_BLK_COMP_MASK			0xFF
+#define CCDC_BLK_COMP_GB_COMP_SHIFT		8
+#define CCDC_BLK_COMP_GR_COMP_SHIFT		0
+#define CCDC_BLK_COMP_R_COMP_SHIFT		8
+#define CCDC_LATCH_ON_VSYNC_DISABLE		(1 << 15)
+#define CCDC_LATCH_ON_VSYNC_ENABLE		(0 << 15)
+#define CCDC_FPC_ENABLE				(1 << 15)
+#define CCDC_FPC_FPC_NUM_MASK 			0x7FFF
+#define CCDC_DATA_PACK_ENABLE			(1 << 11)
+#define CCDC_FMT_HORZ_FMTLNH_MASK		0x1FFF
+#define CCDC_FMT_HORZ_FMTSPH_MASK		0x1FFF
+#define CCDC_FMT_HORZ_FMTSPH_SHIFT		16
+#define CCDC_FMT_VERT_FMTLNV_MASK		0x1FFF
+#define CCDC_FMT_VERT_FMTSLV_MASK		0x1FFF
+#define CCDC_FMT_VERT_FMTSLV_SHIFT		16
+#define CCDC_VP_OUT_VERT_NUM_MASK		0x3FFF
+#define CCDC_VP_OUT_VERT_NUM_SHIFT		17
+#define CCDC_VP_OUT_HORZ_NUM_MASK		0x1FFF
+#define CCDC_VP_OUT_HORZ_NUM_SHIFT		4
+#define CCDC_VP_OUT_HORZ_ST_MASK		0xF
+
+#define CCDC_CSC_COEF_INTEG_MASK		7
+#define CCDC_CSC_COEF_DECIMAL_MASK		0x1f
+#define CCDC_CSC_COEF_INTEG_SHIFT		5
+#define CCDC_CSCM_MSB_SHIFT			8
+#define CCDC_CSC_ENABLE				1
+#define CCDC_CSC_DEC_MAX			32
+
+#define CCDC_MFILT1_SHIFT			10
+#define CCDC_MFILT2_SHIFT			8
+#define CCDC_MED_FILT_THRESH			0x3FFF
+#define CCDC_LPF_MASK				1
+#define CCDC_LPF_SHIFT				14
+#define CCDC_OFFSET_MASK			0x3FF
+#define CCDC_DATASFT_MASK			7
+#define CCDC_DATASFT_SHIFT			8
+
+#define CCDC_DF_ENABLE				1
+
+#define CCDC_FMTPLEN_P0_MASK			0xF
+#define CCDC_FMTPLEN_P1_MASK			0xF
+#define CCDC_FMTPLEN_P2_MASK			7
+#define CCDC_FMTPLEN_P3_MASK			7
+#define CCDC_FMTPLEN_P0_SHIFT			0
+#define CCDC_FMTPLEN_P1_SHIFT			4
+#define CCDC_FMTPLEN_P2_SHIFT			8
+#define CCDC_FMTPLEN_P3_SHIFT			12
+
+#define CCDC_FMTSPH_MASK			0x1FFF
+#define CCDC_FMTLNH_MASK			0x1FFF
+#define CCDC_FMTSLV_MASK			0x1FFF
+#define CCDC_FMTLNV_MASK			0x7FFF
+#define CCDC_FMTRLEN_MASK			0x1FFF
+#define CCDC_FMTHCNT_MASK			0x1FFF
+
+#define CCDC_ADP_INIT_MASK			0x1FFF
+#define CCDC_ADP_LINE_SHIFT			13
+#define CCDC_ADP_LINE_MASK			3
+#define CCDC_FMTPGN_APTR_MASK			7
+
+#define CCDC_DFCCTL_GDFCEN_MASK			1
+#define CCDC_DFCCTL_VDFCEN_MASK			1
+#define CCDC_DFCCTL_VDFC_DISABLE		(0 << 4)
+#define CCDC_DFCCTL_VDFCEN_SHIFT		4
+#define CCDC_DFCCTL_VDFCSL_MASK			3
+#define CCDC_DFCCTL_VDFCSL_SHIFT		5
+#define CCDC_DFCCTL_VDFCUDA_MASK		1
+#define CCDC_DFCCTL_VDFCUDA_SHIFT		7
+#define CCDC_DFCCTL_VDFLSFT_MASK		3
+#define CCDC_DFCCTL_VDFLSFT_SHIFT		8
+#define CCDC_DFCMEMCTL_DFCMARST_MASK		1
+#define CCDC_DFCMEMCTL_DFCMARST_SHIFT		2
+#define CCDC_DFCMEMCTL_DFCMWR_MASK		1
+#define CCDC_DFCMEMCTL_DFCMWR_SHIFT		0
+#define CCDC_DFCMEMCTL_INC_ADDR			(0 << 2)
+
+#define CCDC_LSCCFG_GFTSF_MASK			7
+#define CCDC_LSCCFG_GFTSF_SHIFT			1
+#define CCDC_LSCCFG_GFTINV_MASK			0xf
+#define CCDC_LSCCFG_GFTINV_SHIFT		4
+#define CCDC_LSC_GFTABLE_SEL_MASK		3
+#define CCDC_LSC_GFTABLE_EPEL_SHIFT		8
+#define CCDC_LSC_GFTABLE_OPEL_SHIFT		10
+#define CCDC_LSC_GFTABLE_EPOL_SHIFT		12
+#define CCDC_LSC_GFTABLE_OPOL_SHIFT		14
+#define CCDC_LSC_GFMODE_MASK			3
+#define CCDC_LSC_GFMODE_SHIFT			4
+#define CCDC_LSC_DISABLE			0
+#define CCDC_LSC_ENABLE				1
+#define CCDC_LSC_TABLE1_SLC			0
+#define CCDC_LSC_TABLE2_SLC			1
+#define CCDC_LSC_TABLE3_SLC			2
+#define CCDC_LSC_MEMADDR_RESET			(1 << 2)
+#define CCDC_LSC_MEMADDR_INCR			(0 << 2)
+#define CCDC_LSC_FRAC_MASK_T1			0xFF
+#define CCDC_LSC_INT_MASK			3
+#define CCDC_LSC_FRAC_MASK			0x3FFF
+#define CCDC_LSC_CENTRE_MASK			0x3FFF
+#define CCDC_LSC_COEF_MASK			0xff
+#define CCDC_LSC_COEFL_SHIFT			0
+#define CCDC_LSC_COEFU_SHIFT			8
+#define CCDC_GAIN_MASK				0x7FF
+#define CCDC_SYNCEN_VDHDEN_MASK			(1 << 0)
+#define CCDC_SYNCEN_WEN_MASK			(1 << 1)
+#define CCDC_SYNCEN_WEN_SHIFT			1
+
+/* Power on Defaults in hardware */
+#define MODESET_DEFAULT				0x200
+#define CULH_DEFAULT				0xFFFF
+#define CULV_DEFAULT				0xFF
+#define GAIN_DEFAULT				256
+#define OUTCLIP_DEFAULT				0x3FFF
+#define LSCCFG2_DEFAULT				0xE
+
+#endif
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
new file mode 100644
index 0000000..ee7942b
--- /dev/null
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -0,0 +1,1081 @@
+/*
+ * Copyright (C) 2006-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * CCDC hardware module for DM6446
+ * ------------------------------
+ *
+ * This module is for configuring CCD controller of DM6446 VPFE to capture
+ * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
+ * such as Defect Pixel Correction, Color Space Conversion etc to
+ * pre-process the Raw Bayer RGB data, before writing it to SDRAM. This
+ * module also allows application to configure individual
+ * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL.
+ * To do so, application includes dm644x_ccdc.h and vpfe_capture.h header
+ * files.  The setparams() API is called by vpfe_capture driver
+ * to configure module parameters. This file is named DM644x so that other
+ * variants such DM6443 may be supported using the same module.
+ *
+ * TODO: Test Raw bayer parameter settings and bayer capture
+ * 	 Split module parameter structure to module specific ioctl structs
+ * 	 investigate if enum used for user space type definition
+ * 	 to be replaced by #defines or integer
+ */
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/videodev2.h>
+#include <linux/gfp.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <media/davinci/dm644x_ccdc.h>
+#include <media/davinci/vpss.h>
+
+#include "dm644x_ccdc_regs.h"
+#include "ccdc_hw_device.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CCDC Driver for DM6446");
+MODULE_AUTHOR("Texas Instruments");
+
+static struct ccdc_oper_config {
+	struct device *dev;
+	/* CCDC interface type */
+	enum vpfe_hw_if_type if_type;
+	/* Raw Bayer configuration */
+	struct ccdc_params_raw bayer;
+	/* YCbCr configuration */
+	struct ccdc_params_ycbcr ycbcr;
+	/* Master clock */
+	struct clk *mclk;
+	/* slave clock */
+	struct clk *sclk;
+	/* ccdc base address */
+	void __iomem *base_addr;
+} ccdc_cfg = {
+	/* Raw configurations */
+	.bayer = {
+		.pix_fmt = CCDC_PIXFMT_RAW,
+		.frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
+		.win = CCDC_WIN_VGA,
+		.fid_pol = VPFE_PINPOL_POSITIVE,
+		.vd_pol = VPFE_PINPOL_POSITIVE,
+		.hd_pol = VPFE_PINPOL_POSITIVE,
+		.config_params = {
+			.data_sz = CCDC_DATA_10BITS,
+		},
+	},
+	.ycbcr = {
+		.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
+		.frm_fmt = CCDC_FRMFMT_INTERLACED,
+		.win = CCDC_WIN_PAL,
+		.fid_pol = VPFE_PINPOL_POSITIVE,
+		.vd_pol = VPFE_PINPOL_POSITIVE,
+		.hd_pol = VPFE_PINPOL_POSITIVE,
+		.bt656_enable = 1,
+		.pix_order = CCDC_PIXORDER_CBYCRY,
+		.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED
+	},
+};
+
+#define CCDC_MAX_RAW_YUV_FORMATS	2
+
+/* Raw Bayer formats */
+static u32 ccdc_raw_bayer_pix_formats[] =
+	{V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
+
+/* Raw YUV formats */
+static u32 ccdc_raw_yuv_pix_formats[] =
+	{V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
+
+/* CCDC Save/Restore context */
+static u32 ccdc_ctx[CCDC_REG_END / sizeof(u32)];
+
+/* register access routines */
+static inline u32 regr(u32 offset)
+{
+	return __raw_readl(ccdc_cfg.base_addr + offset);
+}
+
+static inline void regw(u32 val, u32 offset)
+{
+	__raw_writel(val, ccdc_cfg.base_addr + offset);
+}
+
+static void ccdc_enable(int flag)
+{
+	regw(flag, CCDC_PCR);
+}
+
+static void ccdc_enable_vport(int flag)
+{
+	if (flag)
+		/* enable video port */
+		regw(CCDC_ENABLE_VIDEO_PORT, CCDC_FMTCFG);
+	else
+		regw(CCDC_DISABLE_VIDEO_PORT, CCDC_FMTCFG);
+}
+
+/*
+ * ccdc_setwin()
+ * This function will configure the window size
+ * to be capture in CCDC reg
+ */
+void ccdc_setwin(struct v4l2_rect *image_win,
+		enum ccdc_frmfmt frm_fmt,
+		int ppc)
+{
+	int horz_start, horz_nr_pixels;
+	int vert_start, vert_nr_lines;
+	int val = 0, mid_img = 0;
+
+	dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_setwin...");
+	/*
+	 * ppc - per pixel count. indicates how many pixels per cell
+	 * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
+	 * raw capture this is 1
+	 */
+	horz_start = image_win->left << (ppc - 1);
+	horz_nr_pixels = (image_win->width << (ppc - 1)) - 1;
+	regw((horz_start << CCDC_HORZ_INFO_SPH_SHIFT) | horz_nr_pixels,
+	     CCDC_HORZ_INFO);
+
+	vert_start = image_win->top;
+
+	if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+		vert_nr_lines = (image_win->height >> 1) - 1;
+		vert_start >>= 1;
+		/* Since first line doesn't have any data */
+		vert_start += 1;
+		/* configure VDINT0 */
+		val = (vert_start << CCDC_VDINT_VDINT0_SHIFT);
+		regw(val, CCDC_VDINT);
+
+	} else {
+		/* Since first line doesn't have any data */
+		vert_start += 1;
+		vert_nr_lines = image_win->height - 1;
+		/*
+		 * configure VDINT0 and VDINT1. VDINT1 will be at half
+		 * of image height
+		 */
+		mid_img = vert_start + (image_win->height / 2);
+		val = (vert_start << CCDC_VDINT_VDINT0_SHIFT) |
+		    (mid_img & CCDC_VDINT_VDINT1_MASK);
+		regw(val, CCDC_VDINT);
+
+	}
+	regw((vert_start << CCDC_VERT_START_SLV0_SHIFT) | vert_start,
+	     CCDC_VERT_START);
+	regw(vert_nr_lines, CCDC_VERT_LINES);
+	dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin...");
+}
+
+static void ccdc_readregs(void)
+{
+	unsigned int val = 0;
+
+	val = regr(CCDC_ALAW);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to ALAW...\n", val);
+	val = regr(CCDC_CLAMP);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to CLAMP...\n", val);
+	val = regr(CCDC_DCSUB);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to DCSUB...\n", val);
+	val = regr(CCDC_BLKCMP);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to BLKCMP...\n", val);
+	val = regr(CCDC_FPC_ADDR);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FPC_ADDR...\n", val);
+	val = regr(CCDC_FPC);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FPC...\n", val);
+	val = regr(CCDC_FMTCFG);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMTCFG...\n", val);
+	val = regr(CCDC_COLPTN);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to COLPTN...\n", val);
+	val = regr(CCDC_FMT_HORZ);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMT_HORZ...\n", val);
+	val = regr(CCDC_FMT_VERT);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMT_VERT...\n", val);
+	val = regr(CCDC_HSIZE_OFF);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to HSIZE_OFF...\n", val);
+	val = regr(CCDC_SDOFST);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to SDOFST...\n", val);
+	val = regr(CCDC_VP_OUT);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VP_OUT...\n", val);
+	val = regr(CCDC_SYN_MODE);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to SYN_MODE...\n", val);
+	val = regr(CCDC_HORZ_INFO);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to HORZ_INFO...\n", val);
+	val = regr(CCDC_VERT_START);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_START...\n", val);
+	val = regr(CCDC_VERT_LINES);
+	dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val);
+}
+
+static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam)
+{
+	if (ccdcparam->alaw.enable) {
+		if ((ccdcparam->alaw.gama_wd > CCDC_GAMMA_BITS_09_0) ||
+		    (ccdcparam->alaw.gama_wd < CCDC_GAMMA_BITS_15_6) ||
+		    (ccdcparam->alaw.gama_wd < ccdcparam->data_sz)) {
+			dev_dbg(ccdc_cfg.dev, "\nInvalid data line select");
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params)
+{
+	struct ccdc_config_params_raw *config_params =
+				&ccdc_cfg.bayer.config_params;
+	unsigned int *fpc_virtaddr = NULL;
+	unsigned int *fpc_physaddr = NULL;
+
+	memcpy(config_params, raw_params, sizeof(*raw_params));
+	/*
+	 * allocate memory for fault pixel table and copy the user
+	 * values to the table
+	 */
+	if (!config_params->fault_pxl.enable)
+		return 0;
+
+	fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr;
+	fpc_virtaddr = (unsigned int *)phys_to_virt(
+				(unsigned long)fpc_physaddr);
+	/*
+	 * Allocate memory for FPC table if current
+	 * FPC table buffer is not big enough to
+	 * accommodate FPC Number requested
+	 */
+	if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) {
+		if (fpc_physaddr != NULL) {
+			free_pages((unsigned long)fpc_physaddr,
+				   get_order
+				   (config_params->fault_pxl.fp_num *
+				   FP_NUM_BYTES));
+		}
+
+		/* Allocate memory for FPC table */
+		fpc_virtaddr =
+			(unsigned int *)__get_free_pages(GFP_KERNEL | GFP_DMA,
+							 get_order(raw_params->
+							 fault_pxl.fp_num *
+							 FP_NUM_BYTES));
+
+		if (fpc_virtaddr == NULL) {
+			dev_dbg(ccdc_cfg.dev,
+				"\nUnable to allocate memory for FPC");
+			return -EFAULT;
+		}
+		fpc_physaddr =
+		    (unsigned int *)virt_to_phys((void *)fpc_virtaddr);
+	}
+
+	/* Copy number of fault pixels and FPC table */
+	config_params->fault_pxl.fp_num = raw_params->fault_pxl.fp_num;
+	if (copy_from_user(fpc_virtaddr,
+			(void __user *)raw_params->fault_pxl.fpc_table_addr,
+			config_params->fault_pxl.fp_num * FP_NUM_BYTES)) {
+		dev_dbg(ccdc_cfg.dev, "\n copy_from_user failed");
+		return -EFAULT;
+	}
+	config_params->fault_pxl.fpc_table_addr = (unsigned int)fpc_physaddr;
+	return 0;
+}
+
+static int ccdc_close(struct device *dev)
+{
+	struct ccdc_config_params_raw *config_params =
+				&ccdc_cfg.bayer.config_params;
+	unsigned int *fpc_physaddr = NULL, *fpc_virtaddr = NULL;
+
+	fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr;
+
+	if (fpc_physaddr != NULL) {
+		fpc_virtaddr = (unsigned int *)
+		    phys_to_virt((unsigned long)fpc_physaddr);
+		free_pages((unsigned long)fpc_virtaddr,
+			   get_order(config_params->fault_pxl.fp_num *
+			   FP_NUM_BYTES));
+	}
+	return 0;
+}
+
+/*
+ * ccdc_restore_defaults()
+ * This function will write defaults to all CCDC registers
+ */
+static void ccdc_restore_defaults(void)
+{
+	int i;
+
+	/* disable CCDC */
+	ccdc_enable(0);
+	/* set all registers to default value */
+	for (i = 4; i <= 0x94; i += 4)
+		regw(0,  i);
+	regw(CCDC_NO_CULLING, CCDC_CULLING);
+	regw(CCDC_GAMMA_BITS_11_2, CCDC_ALAW);
+}
+
+static int ccdc_open(struct device *device)
+{
+	ccdc_restore_defaults();
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		ccdc_enable_vport(1);
+	return 0;
+}
+
+static void ccdc_sbl_reset(void)
+{
+	vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O);
+}
+
+/* Parameter operations */
+static int ccdc_set_params(void __user *params)
+{
+	struct ccdc_config_params_raw ccdc_raw_params;
+	int x;
+
+	if (ccdc_cfg.if_type != VPFE_RAW_BAYER)
+		return -EINVAL;
+
+	x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
+	if (x) {
+		dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying"
+			   "ccdc params, %d\n", x);
+		return -EFAULT;
+	}
+
+	if (!validate_ccdc_param(&ccdc_raw_params)) {
+		if (!ccdc_update_raw_params(&ccdc_raw_params))
+			return 0;
+	}
+	return -EINVAL;
+}
+
+/*
+ * ccdc_config_ycbcr()
+ * This function will configure CCDC for YCbCr video capture
+ */
+void ccdc_config_ycbcr(void)
+{
+	struct ccdc_params_ycbcr *params = &ccdc_cfg.ycbcr;
+	u32 syn_mode;
+
+	dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_ycbcr...");
+	/*
+	 * first restore the CCDC registers to default values
+	 * This is important since we assume default values to be set in
+	 * a lot of registers that we didn't touch
+	 */
+	ccdc_restore_defaults();
+
+	/*
+	 * configure pixel format, frame format, configure video frame
+	 * format, enable output to SDRAM, enable internal timing generator
+	 * and 8bit pack mode
+	 */
+	syn_mode = (((params->pix_fmt & CCDC_SYN_MODE_INPMOD_MASK) <<
+		    CCDC_SYN_MODE_INPMOD_SHIFT) |
+		    ((params->frm_fmt & CCDC_SYN_FLDMODE_MASK) <<
+		    CCDC_SYN_FLDMODE_SHIFT) | CCDC_VDHDEN_ENABLE |
+		    CCDC_WEN_ENABLE | CCDC_DATA_PACK_ENABLE);
+
+	/* setup BT.656 sync mode */
+	if (params->bt656_enable) {
+		regw(CCDC_REC656IF_BT656_EN, CCDC_REC656IF);
+
+		/*
+		 * configure the FID, VD, HD pin polarity,
+		 * fld,hd pol positive, vd negative, 8-bit data
+		 */
+		syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE;
+		if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
+			syn_mode |= CCDC_SYN_MODE_10BITS;
+		else
+			syn_mode |= CCDC_SYN_MODE_8BITS;
+	} else {
+		/* y/c external sync mode */
+		syn_mode |= (((params->fid_pol & CCDC_FID_POL_MASK) <<
+			     CCDC_FID_POL_SHIFT) |
+			     ((params->hd_pol & CCDC_HD_POL_MASK) <<
+			     CCDC_HD_POL_SHIFT) |
+			     ((params->vd_pol & CCDC_VD_POL_MASK) <<
+			     CCDC_VD_POL_SHIFT));
+	}
+	regw(syn_mode, CCDC_SYN_MODE);
+
+	/* configure video window */
+	ccdc_setwin(&params->win, params->frm_fmt, 2);
+
+	/*
+	 * configure the order of y cb cr in SDRAM, and disable latch
+	 * internal register on vsync
+	 */
+	if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
+		regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
+			CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_BW656_10BIT,
+			CCDC_CCDCFG);
+	else
+		regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
+			CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
+
+	/*
+	 * configure the horizontal line offset. This should be a
+	 * on 32 byte boundary. So clear LSB 5 bits
+	 */
+	regw(((params->win.width * 2  + 31) & ~0x1f), CCDC_HSIZE_OFF);
+
+	/* configure the memory line offset */
+	if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
+		/* two fields are interleaved in memory */
+		regw(CCDC_SDOFST_FIELD_INTERLEAVED, CCDC_SDOFST);
+
+	ccdc_sbl_reset();
+	dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n");
+}
+
+static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp)
+{
+	u32 val;
+
+	if (!bclamp->enable) {
+		/* configure DCSub */
+		val = (bclamp->dc_sub) & CCDC_BLK_DC_SUB_MASK;
+		regw(val, CCDC_DCSUB);
+		dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to DCSUB...\n", val);
+		regw(CCDC_CLAMP_DEFAULT_VAL, CCDC_CLAMP);
+		dev_dbg(ccdc_cfg.dev, "\nWriting 0x0000 to CLAMP...\n");
+		return;
+	}
+	/*
+	 * Configure gain,  Start pixel, No of line to be avg,
+	 * No of pixel/line to be avg, & Enable the Black clamping
+	 */
+	val = ((bclamp->sgain & CCDC_BLK_SGAIN_MASK) |
+	       ((bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) <<
+		CCDC_BLK_ST_PXL_SHIFT) |
+	       ((bclamp->sample_ln & CCDC_BLK_SAMPLE_LINE_MASK) <<
+		CCDC_BLK_SAMPLE_LINE_SHIFT) |
+	       ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) <<
+		CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE);
+	regw(val, CCDC_CLAMP);
+	dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to CLAMP...\n", val);
+	/* If Black clamping is enable then make dcsub 0 */
+	regw(CCDC_DCSUB_DEFAULT_VAL, CCDC_DCSUB);
+	dev_dbg(ccdc_cfg.dev, "\nWriting 0x00000000 to DCSUB...\n");
+}
+
+static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp)
+{
+	u32 val;
+
+	val = ((bcomp->b & CCDC_BLK_COMP_MASK) |
+	      ((bcomp->gb & CCDC_BLK_COMP_MASK) <<
+	       CCDC_BLK_COMP_GB_COMP_SHIFT) |
+	      ((bcomp->gr & CCDC_BLK_COMP_MASK) <<
+	       CCDC_BLK_COMP_GR_COMP_SHIFT) |
+	      ((bcomp->r & CCDC_BLK_COMP_MASK) <<
+	       CCDC_BLK_COMP_R_COMP_SHIFT));
+	regw(val, CCDC_BLKCMP);
+}
+
+static void ccdc_config_fpc(struct ccdc_fault_pixel *fpc)
+{
+	u32 val;
+
+	/* Initially disable FPC */
+	val = CCDC_FPC_DISABLE;
+	regw(val, CCDC_FPC);
+
+	if (!fpc->enable)
+		return;
+
+	/* Configure Fault pixel if needed */
+	regw(fpc->fpc_table_addr, CCDC_FPC_ADDR);
+	dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC_ADDR...\n",
+		       (fpc->fpc_table_addr));
+	/* Write the FPC params with FPC disable */
+	val = fpc->fp_num & CCDC_FPC_FPC_NUM_MASK;
+	regw(val, CCDC_FPC);
+
+	dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val);
+	/* read the FPC register */
+	val = regr(CCDC_FPC) | CCDC_FPC_ENABLE;
+	regw(val, CCDC_FPC);
+	dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val);
+}
+
+/*
+ * ccdc_config_raw()
+ * This function will configure CCDC for Raw capture mode
+ */
+void ccdc_config_raw(void)
+{
+	struct ccdc_params_raw *params = &ccdc_cfg.bayer;
+	struct ccdc_config_params_raw *config_params =
+				&ccdc_cfg.bayer.config_params;
+	unsigned int syn_mode = 0;
+	unsigned int val;
+
+	dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_raw...");
+
+	/*      Reset CCDC */
+	ccdc_restore_defaults();
+
+	/* Disable latching function registers on VSYNC  */
+	regw(CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
+
+	/*
+	 * Configure the vertical sync polarity(SYN_MODE.VDPOL),
+	 * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity
+	 * (SYN_MODE.FLDPOL), frame format(progressive or interlace),
+	 * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output
+	 * SDRAM, enable internal timing generator
+	 */
+	syn_mode =
+		(((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) |
+		((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) |
+		((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) |
+		((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) |
+		((config_params->data_sz & CCDC_DATA_SZ_MASK) <<
+		CCDC_DATA_SZ_SHIFT) |
+		((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT) |
+		CCDC_WEN_ENABLE | CCDC_VDHDEN_ENABLE);
+
+	/* Enable and configure aLaw register if needed */
+	if (config_params->alaw.enable) {
+		val = ((config_params->alaw.gama_wd &
+		      CCDC_ALAW_GAMA_WD_MASK) | CCDC_ALAW_ENABLE);
+		regw(val, CCDC_ALAW);
+		dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to ALAW...\n", val);
+	}
+
+	/* Configure video window */
+	ccdc_setwin(&params->win, params->frm_fmt, CCDC_PPC_RAW);
+
+	/* Configure Black Clamp */
+	ccdc_config_black_clamp(&config_params->blk_clamp);
+
+	/* Configure Black level compensation */
+	ccdc_config_black_compense(&config_params->blk_comp);
+
+	/* Configure Fault Pixel Correction */
+	ccdc_config_fpc(&config_params->fault_pxl);
+
+	/* If data size is 8 bit then pack the data */
+	if ((config_params->data_sz == CCDC_DATA_8BITS) ||
+	     config_params->alaw.enable)
+		syn_mode |= CCDC_DATA_PACK_ENABLE;
+
+#ifdef CONFIG_DM644X_VIDEO_PORT_ENABLE
+	/* enable video port */
+	val = CCDC_ENABLE_VIDEO_PORT;
+#else
+	/* disable video port */
+	val = CCDC_DISABLE_VIDEO_PORT;
+#endif
+
+	if (config_params->data_sz == CCDC_DATA_8BITS)
+		val |= (CCDC_DATA_10BITS & CCDC_FMTCFG_VPIN_MASK)
+		    << CCDC_FMTCFG_VPIN_SHIFT;
+	else
+		val |= (config_params->data_sz & CCDC_FMTCFG_VPIN_MASK)
+		    << CCDC_FMTCFG_VPIN_SHIFT;
+	/* Write value in FMTCFG */
+	regw(val, CCDC_FMTCFG);
+
+	dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMTCFG...\n", val);
+	/* Configure the color pattern according to mt9t001 sensor */
+	regw(CCDC_COLPTN_VAL, CCDC_COLPTN);
+
+	dev_dbg(ccdc_cfg.dev, "\nWriting 0xBB11BB11 to COLPTN...\n");
+	/*
+	 * Configure Data formatter(Video port) pixel selection
+	 * (FMT_HORZ, FMT_VERT)
+	 */
+	val = ((params->win.left & CCDC_FMT_HORZ_FMTSPH_MASK) <<
+	      CCDC_FMT_HORZ_FMTSPH_SHIFT) |
+	      (params->win.width & CCDC_FMT_HORZ_FMTLNH_MASK);
+	regw(val, CCDC_FMT_HORZ);
+
+	dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMT_HORZ...\n", val);
+	val = (params->win.top & CCDC_FMT_VERT_FMTSLV_MASK)
+	    << CCDC_FMT_VERT_FMTSLV_SHIFT;
+	if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
+		val |= (params->win.height) & CCDC_FMT_VERT_FMTLNV_MASK;
+	else
+		val |= (params->win.height >> 1) & CCDC_FMT_VERT_FMTLNV_MASK;
+
+	dev_dbg(ccdc_cfg.dev, "\nparams->win.height  0x%x ...\n",
+	       params->win.height);
+	regw(val, CCDC_FMT_VERT);
+
+	dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMT_VERT...\n", val);
+
+	dev_dbg(ccdc_cfg.dev, "\nbelow regw(val, FMT_VERT)...");
+
+	/*
+	 * Configure Horizontal offset register. If pack 8 is enabled then
+	 * 1 pixel will take 1 byte
+	 */
+	if ((config_params->data_sz == CCDC_DATA_8BITS) ||
+	    config_params->alaw.enable)
+		regw((params->win.width + CCDC_32BYTE_ALIGN_VAL) &
+		    CCDC_HSIZE_OFF_MASK, CCDC_HSIZE_OFF);
+	else
+		/* else one pixel will take 2 byte */
+		regw(((params->win.width * CCDC_TWO_BYTES_PER_PIXEL) +
+		    CCDC_32BYTE_ALIGN_VAL) & CCDC_HSIZE_OFF_MASK,
+		    CCDC_HSIZE_OFF);
+
+	/* Set value for SDOFST */
+	if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
+		if (params->image_invert_enable) {
+			/* For intelace inverse mode */
+			regw(CCDC_INTERLACED_IMAGE_INVERT, CCDC_SDOFST);
+			dev_dbg(ccdc_cfg.dev, "\nWriting 0x4B6D to SDOFST..\n");
+		}
+
+		else {
+			/* For intelace non inverse mode */
+			regw(CCDC_INTERLACED_NO_IMAGE_INVERT, CCDC_SDOFST);
+			dev_dbg(ccdc_cfg.dev, "\nWriting 0x0249 to SDOFST..\n");
+		}
+	} else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+		regw(CCDC_PROGRESSIVE_NO_IMAGE_INVERT, CCDC_SDOFST);
+		dev_dbg(ccdc_cfg.dev, "\nWriting 0x0000 to SDOFST...\n");
+	}
+
+	/*
+	 * Configure video port pixel selection (VPOUT)
+	 * Here -1 is to make the height value less than FMT_VERT.FMTLNV
+	 */
+	if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
+		val = (((params->win.height - 1) & CCDC_VP_OUT_VERT_NUM_MASK))
+		    << CCDC_VP_OUT_VERT_NUM_SHIFT;
+	else
+		val =
+		    ((((params->win.height >> CCDC_INTERLACED_HEIGHT_SHIFT) -
+		     1) & CCDC_VP_OUT_VERT_NUM_MASK)) <<
+		    CCDC_VP_OUT_VERT_NUM_SHIFT;
+
+	val |= ((((params->win.width))) & CCDC_VP_OUT_HORZ_NUM_MASK)
+	    << CCDC_VP_OUT_HORZ_NUM_SHIFT;
+	val |= (params->win.left) & CCDC_VP_OUT_HORZ_ST_MASK;
+	regw(val, CCDC_VP_OUT);
+
+	dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to VP_OUT...\n", val);
+	regw(syn_mode, CCDC_SYN_MODE);
+	dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to SYN_MODE...\n", syn_mode);
+
+	ccdc_sbl_reset();
+	dev_dbg(ccdc_cfg.dev, "\nend of ccdc_config_raw...");
+	ccdc_readregs();
+}
+
+static int ccdc_configure(void)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		ccdc_config_raw();
+	else
+		ccdc_config_ycbcr();
+	return 0;
+}
+
+static int ccdc_set_buftype(enum ccdc_buftype buf_type)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		ccdc_cfg.bayer.buf_type = buf_type;
+	else
+		ccdc_cfg.ycbcr.buf_type = buf_type;
+	return 0;
+}
+
+static enum ccdc_buftype ccdc_get_buftype(void)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		return ccdc_cfg.bayer.buf_type;
+	return ccdc_cfg.ycbcr.buf_type;
+}
+
+static int ccdc_enum_pix(u32 *pix, int i)
+{
+	int ret = -EINVAL;
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+		if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) {
+			*pix = ccdc_raw_bayer_pix_formats[i];
+			ret = 0;
+		}
+	} else {
+		if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) {
+			*pix = ccdc_raw_yuv_pix_formats[i];
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static int ccdc_set_pixel_format(u32 pixfmt)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+		ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+		if (pixfmt == V4L2_PIX_FMT_SBGGR8)
+			ccdc_cfg.bayer.config_params.alaw.enable = 1;
+		else if (pixfmt != V4L2_PIX_FMT_SBGGR16)
+			return -EINVAL;
+	} else {
+		if (pixfmt == V4L2_PIX_FMT_YUYV)
+			ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
+		else if (pixfmt == V4L2_PIX_FMT_UYVY)
+			ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+		else
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static u32 ccdc_get_pixel_format(void)
+{
+	struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
+	u32 pixfmt;
+
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		if (alaw->enable)
+			pixfmt = V4L2_PIX_FMT_SBGGR8;
+		else
+			pixfmt = V4L2_PIX_FMT_SBGGR16;
+	else {
+		if (ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
+			pixfmt = V4L2_PIX_FMT_YUYV;
+		else
+			pixfmt = V4L2_PIX_FMT_UYVY;
+	}
+	return pixfmt;
+}
+
+static int ccdc_set_image_window(struct v4l2_rect *win)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		ccdc_cfg.bayer.win = *win;
+	else
+		ccdc_cfg.ycbcr.win = *win;
+	return 0;
+}
+
+static void ccdc_get_image_window(struct v4l2_rect *win)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		*win = ccdc_cfg.bayer.win;
+	else
+		*win = ccdc_cfg.ycbcr.win;
+}
+
+static unsigned int ccdc_get_line_length(void)
+{
+	struct ccdc_config_params_raw *config_params =
+				&ccdc_cfg.bayer.config_params;
+	unsigned int len;
+
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+		if ((config_params->alaw.enable) ||
+		    (config_params->data_sz == CCDC_DATA_8BITS))
+			len = ccdc_cfg.bayer.win.width;
+		else
+			len = ccdc_cfg.bayer.win.width * 2;
+	} else
+		len = ccdc_cfg.ycbcr.win.width * 2;
+	return ALIGN(len, 32);
+}
+
+static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		ccdc_cfg.bayer.frm_fmt = frm_fmt;
+	else
+		ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
+	return 0;
+}
+
+static enum ccdc_frmfmt ccdc_get_frame_format(void)
+{
+	if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
+		return ccdc_cfg.bayer.frm_fmt;
+	else
+		return ccdc_cfg.ycbcr.frm_fmt;
+}
+
+static int ccdc_getfid(void)
+{
+	return (regr(CCDC_SYN_MODE) >> 15) & 1;
+}
+
+/* misc operations */
+static inline void ccdc_setfbaddr(unsigned long addr)
+{
+	regw(addr & 0xffffffe0, CCDC_SDR_ADDR);
+}
+
+static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
+{
+	ccdc_cfg.if_type = params->if_type;
+
+	switch (params->if_type) {
+	case VPFE_BT656:
+	case VPFE_YCBCR_SYNC_16:
+	case VPFE_YCBCR_SYNC_8:
+	case VPFE_BT656_10BIT:
+		ccdc_cfg.ycbcr.vd_pol = params->vdpol;
+		ccdc_cfg.ycbcr.hd_pol = params->hdpol;
+		break;
+	default:
+		/* TODO add support for raw bayer here */
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void ccdc_save_context(void)
+{
+	ccdc_ctx[CCDC_PCR >> 2] = regr(CCDC_PCR);
+	ccdc_ctx[CCDC_SYN_MODE >> 2] = regr(CCDC_SYN_MODE);
+	ccdc_ctx[CCDC_HD_VD_WID >> 2] = regr(CCDC_HD_VD_WID);
+	ccdc_ctx[CCDC_PIX_LINES >> 2] = regr(CCDC_PIX_LINES);
+	ccdc_ctx[CCDC_HORZ_INFO >> 2] = regr(CCDC_HORZ_INFO);
+	ccdc_ctx[CCDC_VERT_START >> 2] = regr(CCDC_VERT_START);
+	ccdc_ctx[CCDC_VERT_LINES >> 2] = regr(CCDC_VERT_LINES);
+	ccdc_ctx[CCDC_CULLING >> 2] = regr(CCDC_CULLING);
+	ccdc_ctx[CCDC_HSIZE_OFF >> 2] = regr(CCDC_HSIZE_OFF);
+	ccdc_ctx[CCDC_SDOFST >> 2] = regr(CCDC_SDOFST);
+	ccdc_ctx[CCDC_SDR_ADDR >> 2] = regr(CCDC_SDR_ADDR);
+	ccdc_ctx[CCDC_CLAMP >> 2] = regr(CCDC_CLAMP);
+	ccdc_ctx[CCDC_DCSUB >> 2] = regr(CCDC_DCSUB);
+	ccdc_ctx[CCDC_COLPTN >> 2] = regr(CCDC_COLPTN);
+	ccdc_ctx[CCDC_BLKCMP >> 2] = regr(CCDC_BLKCMP);
+	ccdc_ctx[CCDC_FPC >> 2] = regr(CCDC_FPC);
+	ccdc_ctx[CCDC_FPC_ADDR >> 2] = regr(CCDC_FPC_ADDR);
+	ccdc_ctx[CCDC_VDINT >> 2] = regr(CCDC_VDINT);
+	ccdc_ctx[CCDC_ALAW >> 2] = regr(CCDC_ALAW);
+	ccdc_ctx[CCDC_REC656IF >> 2] = regr(CCDC_REC656IF);
+	ccdc_ctx[CCDC_CCDCFG >> 2] = regr(CCDC_CCDCFG);
+	ccdc_ctx[CCDC_FMTCFG >> 2] = regr(CCDC_FMTCFG);
+	ccdc_ctx[CCDC_FMT_HORZ >> 2] = regr(CCDC_FMT_HORZ);
+	ccdc_ctx[CCDC_FMT_VERT >> 2] = regr(CCDC_FMT_VERT);
+	ccdc_ctx[CCDC_FMT_ADDR0 >> 2] = regr(CCDC_FMT_ADDR0);
+	ccdc_ctx[CCDC_FMT_ADDR1 >> 2] = regr(CCDC_FMT_ADDR1);
+	ccdc_ctx[CCDC_FMT_ADDR2 >> 2] = regr(CCDC_FMT_ADDR2);
+	ccdc_ctx[CCDC_FMT_ADDR3 >> 2] = regr(CCDC_FMT_ADDR3);
+	ccdc_ctx[CCDC_FMT_ADDR4 >> 2] = regr(CCDC_FMT_ADDR4);
+	ccdc_ctx[CCDC_FMT_ADDR5 >> 2] = regr(CCDC_FMT_ADDR5);
+	ccdc_ctx[CCDC_FMT_ADDR6 >> 2] = regr(CCDC_FMT_ADDR6);
+	ccdc_ctx[CCDC_FMT_ADDR7 >> 2] = regr(CCDC_FMT_ADDR7);
+	ccdc_ctx[CCDC_PRGEVEN_0 >> 2] = regr(CCDC_PRGEVEN_0);
+	ccdc_ctx[CCDC_PRGEVEN_1 >> 2] = regr(CCDC_PRGEVEN_1);
+	ccdc_ctx[CCDC_PRGODD_0 >> 2] = regr(CCDC_PRGODD_0);
+	ccdc_ctx[CCDC_PRGODD_1 >> 2] = regr(CCDC_PRGODD_1);
+	ccdc_ctx[CCDC_VP_OUT >> 2] = regr(CCDC_VP_OUT);
+}
+
+static void ccdc_restore_context(void)
+{
+	regw(ccdc_ctx[CCDC_SYN_MODE >> 2], CCDC_SYN_MODE);
+	regw(ccdc_ctx[CCDC_HD_VD_WID >> 2], CCDC_HD_VD_WID);
+	regw(ccdc_ctx[CCDC_PIX_LINES >> 2], CCDC_PIX_LINES);
+	regw(ccdc_ctx[CCDC_HORZ_INFO >> 2], CCDC_HORZ_INFO);
+	regw(ccdc_ctx[CCDC_VERT_START >> 2], CCDC_VERT_START);
+	regw(ccdc_ctx[CCDC_VERT_LINES >> 2], CCDC_VERT_LINES);
+	regw(ccdc_ctx[CCDC_CULLING >> 2], CCDC_CULLING);
+	regw(ccdc_ctx[CCDC_HSIZE_OFF >> 2], CCDC_HSIZE_OFF);
+	regw(ccdc_ctx[CCDC_SDOFST >> 2], CCDC_SDOFST);
+	regw(ccdc_ctx[CCDC_SDR_ADDR >> 2], CCDC_SDR_ADDR);
+	regw(ccdc_ctx[CCDC_CLAMP >> 2], CCDC_CLAMP);
+	regw(ccdc_ctx[CCDC_DCSUB >> 2], CCDC_DCSUB);
+	regw(ccdc_ctx[CCDC_COLPTN >> 2], CCDC_COLPTN);
+	regw(ccdc_ctx[CCDC_BLKCMP >> 2], CCDC_BLKCMP);
+	regw(ccdc_ctx[CCDC_FPC >> 2], CCDC_FPC);
+	regw(ccdc_ctx[CCDC_FPC_ADDR >> 2], CCDC_FPC_ADDR);
+	regw(ccdc_ctx[CCDC_VDINT >> 2], CCDC_VDINT);
+	regw(ccdc_ctx[CCDC_ALAW >> 2], CCDC_ALAW);
+	regw(ccdc_ctx[CCDC_REC656IF >> 2], CCDC_REC656IF);
+	regw(ccdc_ctx[CCDC_CCDCFG >> 2], CCDC_CCDCFG);
+	regw(ccdc_ctx[CCDC_FMTCFG >> 2], CCDC_FMTCFG);
+	regw(ccdc_ctx[CCDC_FMT_HORZ >> 2], CCDC_FMT_HORZ);
+	regw(ccdc_ctx[CCDC_FMT_VERT >> 2], CCDC_FMT_VERT);
+	regw(ccdc_ctx[CCDC_FMT_ADDR0 >> 2], CCDC_FMT_ADDR0);
+	regw(ccdc_ctx[CCDC_FMT_ADDR1 >> 2], CCDC_FMT_ADDR1);
+	regw(ccdc_ctx[CCDC_FMT_ADDR2 >> 2], CCDC_FMT_ADDR2);
+	regw(ccdc_ctx[CCDC_FMT_ADDR3 >> 2], CCDC_FMT_ADDR3);
+	regw(ccdc_ctx[CCDC_FMT_ADDR4 >> 2], CCDC_FMT_ADDR4);
+	regw(ccdc_ctx[CCDC_FMT_ADDR5 >> 2], CCDC_FMT_ADDR5);
+	regw(ccdc_ctx[CCDC_FMT_ADDR6 >> 2], CCDC_FMT_ADDR6);
+	regw(ccdc_ctx[CCDC_FMT_ADDR7 >> 2], CCDC_FMT_ADDR7);
+	regw(ccdc_ctx[CCDC_PRGEVEN_0 >> 2], CCDC_PRGEVEN_0);
+	regw(ccdc_ctx[CCDC_PRGEVEN_1 >> 2], CCDC_PRGEVEN_1);
+	regw(ccdc_ctx[CCDC_PRGODD_0 >> 2], CCDC_PRGODD_0);
+	regw(ccdc_ctx[CCDC_PRGODD_1 >> 2], CCDC_PRGODD_1);
+	regw(ccdc_ctx[CCDC_VP_OUT >> 2], CCDC_VP_OUT);
+	regw(ccdc_ctx[CCDC_PCR >> 2], CCDC_PCR);
+}
+static struct ccdc_hw_device ccdc_hw_dev = {
+	.name = "DM6446 CCDC",
+	.owner = THIS_MODULE,
+	.hw_ops = {
+		.open = ccdc_open,
+		.close = ccdc_close,
+		.reset = ccdc_sbl_reset,
+		.enable = ccdc_enable,
+		.set_hw_if_params = ccdc_set_hw_if_params,
+		.set_params = ccdc_set_params,
+		.configure = ccdc_configure,
+		.set_buftype = ccdc_set_buftype,
+		.get_buftype = ccdc_get_buftype,
+		.enum_pix = ccdc_enum_pix,
+		.set_pixel_format = ccdc_set_pixel_format,
+		.get_pixel_format = ccdc_get_pixel_format,
+		.set_frame_format = ccdc_set_frame_format,
+		.get_frame_format = ccdc_get_frame_format,
+		.set_image_window = ccdc_set_image_window,
+		.get_image_window = ccdc_get_image_window,
+		.get_line_length = ccdc_get_line_length,
+		.setfbaddr = ccdc_setfbaddr,
+		.getfid = ccdc_getfid,
+	},
+};
+
+static int __devinit dm644x_ccdc_probe(struct platform_device *pdev)
+{
+	struct resource	*res;
+	int status = 0;
+
+	/*
+	 * first try to register with vpfe. If not correct platform, then we
+	 * don't have to iomap
+	 */
+	status = vpfe_register_ccdc_device(&ccdc_hw_dev);
+	if (status < 0)
+		return status;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		status = -ENODEV;
+		goto fail_nores;
+	}
+
+	res = request_mem_region(res->start, resource_size(res), res->name);
+	if (!res) {
+		status = -EBUSY;
+		goto fail_nores;
+	}
+
+	ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res));
+	if (!ccdc_cfg.base_addr) {
+		status = -ENOMEM;
+		goto fail_nomem;
+	}
+
+	/* Get and enable Master clock */
+	ccdc_cfg.mclk = clk_get(&pdev->dev, "master");
+	if (IS_ERR(ccdc_cfg.mclk)) {
+		status = PTR_ERR(ccdc_cfg.mclk);
+		goto fail_nomap;
+	}
+	if (clk_enable(ccdc_cfg.mclk)) {
+		status = -ENODEV;
+		goto fail_mclk;
+	}
+
+	/* Get and enable Slave clock */
+	ccdc_cfg.sclk = clk_get(&pdev->dev, "slave");
+	if (IS_ERR(ccdc_cfg.sclk)) {
+		status = PTR_ERR(ccdc_cfg.sclk);
+		goto fail_mclk;
+	}
+	if (clk_enable(ccdc_cfg.sclk)) {
+		status = -ENODEV;
+		goto fail_sclk;
+	}
+	ccdc_cfg.dev = &pdev->dev;
+	printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
+	return 0;
+fail_sclk:
+	clk_put(ccdc_cfg.sclk);
+fail_mclk:
+	clk_put(ccdc_cfg.mclk);
+fail_nomap:
+	iounmap(ccdc_cfg.base_addr);
+fail_nomem:
+	release_mem_region(res->start, resource_size(res));
+fail_nores:
+	vpfe_unregister_ccdc_device(&ccdc_hw_dev);
+	return status;
+}
+
+static int dm644x_ccdc_remove(struct platform_device *pdev)
+{
+	struct resource	*res;
+
+	clk_put(ccdc_cfg.mclk);
+	clk_put(ccdc_cfg.sclk);
+	iounmap(ccdc_cfg.base_addr);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res)
+		release_mem_region(res->start, resource_size(res));
+	vpfe_unregister_ccdc_device(&ccdc_hw_dev);
+	return 0;
+}
+
+static int dm644x_ccdc_suspend(struct device *dev)
+{
+	/* Save CCDC context */
+	ccdc_save_context();
+	/* Disable CCDC */
+	ccdc_enable(0);
+	/* Disable both master and slave clock */
+	clk_disable(ccdc_cfg.mclk);
+	clk_disable(ccdc_cfg.sclk);
+
+	return 0;
+}
+
+static int dm644x_ccdc_resume(struct device *dev)
+{
+	/* Enable both master and slave clock */
+	clk_enable(ccdc_cfg.mclk);
+	clk_enable(ccdc_cfg.sclk);
+	/* Restore CCDC context */
+	ccdc_restore_context();
+
+	return 0;
+}
+
+static const struct dev_pm_ops dm644x_ccdc_pm_ops = {
+	.suspend = dm644x_ccdc_suspend,
+	.resume = dm644x_ccdc_resume,
+};
+
+static struct platform_driver dm644x_ccdc_driver = {
+	.driver = {
+		.name	= "dm644x_ccdc",
+		.owner = THIS_MODULE,
+		.pm = &dm644x_ccdc_pm_ops,
+	},
+	.remove = __devexit_p(dm644x_ccdc_remove),
+	.probe = dm644x_ccdc_probe,
+};
+
+module_platform_driver(dm644x_ccdc_driver);
diff --git a/drivers/media/platform/davinci/dm644x_ccdc_regs.h b/drivers/media/platform/davinci/dm644x_ccdc_regs.h
new file mode 100644
index 0000000..90370e4
--- /dev/null
+++ b/drivers/media/platform/davinci/dm644x_ccdc_regs.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2006-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef _DM644X_CCDC_REGS_H
+#define _DM644X_CCDC_REGS_H
+
+/**************************************************************************\
+* Register OFFSET Definitions
+\**************************************************************************/
+#define CCDC_PID				0x0
+#define CCDC_PCR				0x4
+#define CCDC_SYN_MODE				0x8
+#define CCDC_HD_VD_WID				0xc
+#define CCDC_PIX_LINES				0x10
+#define CCDC_HORZ_INFO				0x14
+#define CCDC_VERT_START				0x18
+#define CCDC_VERT_LINES				0x1c
+#define CCDC_CULLING				0x20
+#define CCDC_HSIZE_OFF				0x24
+#define CCDC_SDOFST				0x28
+#define CCDC_SDR_ADDR				0x2c
+#define CCDC_CLAMP				0x30
+#define CCDC_DCSUB				0x34
+#define CCDC_COLPTN				0x38
+#define CCDC_BLKCMP				0x3c
+#define CCDC_FPC				0x40
+#define CCDC_FPC_ADDR				0x44
+#define CCDC_VDINT				0x48
+#define CCDC_ALAW				0x4c
+#define CCDC_REC656IF				0x50
+#define CCDC_CCDCFG				0x54
+#define CCDC_FMTCFG				0x58
+#define CCDC_FMT_HORZ				0x5c
+#define CCDC_FMT_VERT				0x60
+#define CCDC_FMT_ADDR0				0x64
+#define CCDC_FMT_ADDR1				0x68
+#define CCDC_FMT_ADDR2				0x6c
+#define CCDC_FMT_ADDR3				0x70
+#define CCDC_FMT_ADDR4				0x74
+#define CCDC_FMT_ADDR5				0x78
+#define CCDC_FMT_ADDR6				0x7c
+#define CCDC_FMT_ADDR7				0x80
+#define CCDC_PRGEVEN_0				0x84
+#define CCDC_PRGEVEN_1				0x88
+#define CCDC_PRGODD_0				0x8c
+#define CCDC_PRGODD_1				0x90
+#define CCDC_VP_OUT				0x94
+#define CCDC_REG_END				0x98
+
+/***************************************************************
+*	Define for various register bit mask and shifts for CCDC
+****************************************************************/
+#define CCDC_FID_POL_MASK			1
+#define CCDC_FID_POL_SHIFT			4
+#define CCDC_HD_POL_MASK			1
+#define CCDC_HD_POL_SHIFT			3
+#define CCDC_VD_POL_MASK			1
+#define CCDC_VD_POL_SHIFT			2
+#define CCDC_HSIZE_OFF_MASK			0xffffffe0
+#define CCDC_32BYTE_ALIGN_VAL			31
+#define CCDC_FRM_FMT_MASK			0x1
+#define CCDC_FRM_FMT_SHIFT			7
+#define CCDC_DATA_SZ_MASK			7
+#define CCDC_DATA_SZ_SHIFT			8
+#define CCDC_PIX_FMT_MASK			3
+#define CCDC_PIX_FMT_SHIFT			12
+#define CCDC_VP2SDR_DISABLE			0xFFFBFFFF
+#define CCDC_WEN_ENABLE				(1 << 17)
+#define CCDC_SDR2RSZ_DISABLE			0xFFF7FFFF
+#define CCDC_VDHDEN_ENABLE			(1 << 16)
+#define CCDC_LPF_ENABLE				(1 << 14)
+#define CCDC_ALAW_ENABLE			(1 << 3)
+#define CCDC_ALAW_GAMA_WD_MASK			7
+#define CCDC_BLK_CLAMP_ENABLE			(1 << 31)
+#define CCDC_BLK_SGAIN_MASK			0x1F
+#define CCDC_BLK_ST_PXL_MASK			0x7FFF
+#define CCDC_BLK_ST_PXL_SHIFT			10
+#define CCDC_BLK_SAMPLE_LN_MASK			7
+#define CCDC_BLK_SAMPLE_LN_SHIFT		28
+#define CCDC_BLK_SAMPLE_LINE_MASK		7
+#define CCDC_BLK_SAMPLE_LINE_SHIFT		25
+#define CCDC_BLK_DC_SUB_MASK			0x03FFF
+#define CCDC_BLK_COMP_MASK			0xFF
+#define CCDC_BLK_COMP_GB_COMP_SHIFT		8
+#define CCDC_BLK_COMP_GR_COMP_SHIFT		16
+#define CCDC_BLK_COMP_R_COMP_SHIFT		24
+#define CCDC_LATCH_ON_VSYNC_DISABLE		(1 << 15)
+#define CCDC_FPC_ENABLE				(1 << 15)
+#define CCDC_FPC_DISABLE			0
+#define CCDC_FPC_FPC_NUM_MASK 			0x7FFF
+#define CCDC_DATA_PACK_ENABLE			(1 << 11)
+#define CCDC_FMTCFG_VPIN_MASK			7
+#define CCDC_FMTCFG_VPIN_SHIFT			12
+#define CCDC_FMT_HORZ_FMTLNH_MASK		0x1FFF
+#define CCDC_FMT_HORZ_FMTSPH_MASK		0x1FFF
+#define CCDC_FMT_HORZ_FMTSPH_SHIFT		16
+#define CCDC_FMT_VERT_FMTLNV_MASK		0x1FFF
+#define CCDC_FMT_VERT_FMTSLV_MASK		0x1FFF
+#define CCDC_FMT_VERT_FMTSLV_SHIFT		16
+#define CCDC_VP_OUT_VERT_NUM_MASK		0x3FFF
+#define CCDC_VP_OUT_VERT_NUM_SHIFT		17
+#define CCDC_VP_OUT_HORZ_NUM_MASK		0x1FFF
+#define CCDC_VP_OUT_HORZ_NUM_SHIFT		4
+#define CCDC_VP_OUT_HORZ_ST_MASK		0xF
+#define CCDC_HORZ_INFO_SPH_SHIFT		16
+#define CCDC_VERT_START_SLV0_SHIFT		16
+#define CCDC_VDINT_VDINT0_SHIFT			16
+#define CCDC_VDINT_VDINT1_MASK			0xFFFF
+#define CCDC_PPC_RAW				1
+#define CCDC_DCSUB_DEFAULT_VAL			0
+#define CCDC_CLAMP_DEFAULT_VAL			0
+#define CCDC_ENABLE_VIDEO_PORT			0x8000
+#define CCDC_DISABLE_VIDEO_PORT			0
+#define CCDC_COLPTN_VAL				0xBB11BB11
+#define CCDC_TWO_BYTES_PER_PIXEL		2
+#define CCDC_INTERLACED_IMAGE_INVERT		0x4B6D
+#define CCDC_INTERLACED_NO_IMAGE_INVERT		0x0249
+#define CCDC_PROGRESSIVE_IMAGE_INVERT		0x4000
+#define CCDC_PROGRESSIVE_NO_IMAGE_INVERT	0
+#define CCDC_INTERLACED_HEIGHT_SHIFT		1
+#define CCDC_SYN_MODE_INPMOD_SHIFT		12
+#define CCDC_SYN_MODE_INPMOD_MASK		3
+#define CCDC_SYN_MODE_8BITS			(7 << 8)
+#define CCDC_SYN_MODE_10BITS			(6 << 8)
+#define CCDC_SYN_MODE_11BITS			(5 << 8)
+#define CCDC_SYN_MODE_12BITS			(4 << 8)
+#define CCDC_SYN_MODE_13BITS			(3 << 8)
+#define CCDC_SYN_MODE_14BITS			(2 << 8)
+#define CCDC_SYN_MODE_15BITS			(1 << 8)
+#define CCDC_SYN_MODE_16BITS			(0 << 8)
+#define CCDC_SYN_FLDMODE_MASK			1
+#define CCDC_SYN_FLDMODE_SHIFT			7
+#define CCDC_REC656IF_BT656_EN			3
+#define CCDC_SYN_MODE_VD_POL_NEGATIVE		(1 << 2)
+#define CCDC_CCDCFG_Y8POS_SHIFT			11
+#define CCDC_CCDCFG_BW656_10BIT 		(1 << 5)
+#define CCDC_SDOFST_FIELD_INTERLEAVED		0x249
+#define CCDC_NO_CULLING				0xffff00ff
+#endif
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
new file mode 100644
index 0000000..b99d542
--- /dev/null
+++ b/drivers/media/platform/davinci/isif.c
@@ -0,0 +1,1162 @@
+/*
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * Image Sensor Interface (ISIF) driver
+ *
+ * This driver is for configuring the ISIF IP available on DM365 or any other
+ * TI SoCs. This is used for capturing yuv or bayer video or image data
+ * from a decoder or sensor. This IP is similar to the CCDC IP on DM355
+ * and DM6446, but with enhanced or additional ip blocks. The driver
+ * configures the ISIF upon commands from the vpfe bridge driver through
+ * ccdc_hw_device interface.
+ *
+ * TODO: 1) Raw bayer parameter settings and bayer capture
+ *	 2) Add support for control ioctl
+ */
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/videodev2.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <mach/mux.h>
+
+#include <media/davinci/isif.h>
+#include <media/davinci/vpss.h>
+
+#include "isif_regs.h"
+#include "ccdc_hw_device.h"
+
+/* Defaults for module configuration parameters */
+static struct isif_config_params_raw isif_config_defaults = {
+	.linearize = {
+		.en = 0,
+		.corr_shft = ISIF_NO_SHIFT,
+		.scale_fact = {1, 0},
+	},
+	.df_csc = {
+		.df_or_csc = 0,
+		.csc = {
+			.en = 0,
+		},
+	},
+	.dfc = {
+		.en = 0,
+	},
+	.bclamp = {
+		.en = 0,
+	},
+	.gain_offset = {
+		.gain = {
+			.r_ye = {1, 0},
+			.gr_cy = {1, 0},
+			.gb_g = {1, 0},
+			.b_mg = {1, 0},
+		},
+	},
+	.culling = {
+		.hcpat_odd = 0xff,
+		.hcpat_even = 0xff,
+		.vcpat = 0xff,
+	},
+	.compress = {
+		.alg = ISIF_ALAW,
+	},
+};
+
+/* ISIF operation configuration */
+static struct isif_oper_config {
+	struct device *dev;
+	enum vpfe_hw_if_type if_type;
+	struct isif_ycbcr_config ycbcr;
+	struct isif_params_raw bayer;
+	enum isif_data_pack data_pack;
+	/* Master clock */
+	struct clk *mclk;
+	/* ISIF base address */
+	void __iomem *base_addr;
+	/* ISIF Linear Table 0 */
+	void __iomem *linear_tbl0_addr;
+	/* ISIF Linear Table 1 */
+	void __iomem *linear_tbl1_addr;
+} isif_cfg = {
+	.ycbcr = {
+		.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
+		.frm_fmt = CCDC_FRMFMT_INTERLACED,
+		.win = ISIF_WIN_NTSC,
+		.fid_pol = VPFE_PINPOL_POSITIVE,
+		.vd_pol = VPFE_PINPOL_POSITIVE,
+		.hd_pol = VPFE_PINPOL_POSITIVE,
+		.pix_order = CCDC_PIXORDER_CBYCRY,
+		.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED,
+	},
+	.bayer = {
+		.pix_fmt = CCDC_PIXFMT_RAW,
+		.frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
+		.win = ISIF_WIN_VGA,
+		.fid_pol = VPFE_PINPOL_POSITIVE,
+		.vd_pol = VPFE_PINPOL_POSITIVE,
+		.hd_pol = VPFE_PINPOL_POSITIVE,
+		.gain = {
+			.r_ye = {1, 0},
+			.gr_cy = {1, 0},
+			.gb_g = {1, 0},
+			.b_mg = {1, 0},
+		},
+		.cfa_pat = ISIF_CFA_PAT_MOSAIC,
+		.data_msb = ISIF_BIT_MSB_11,
+		.config_params = {
+			.data_shift = ISIF_NO_SHIFT,
+			.col_pat_field0 = {
+				.olop = ISIF_GREEN_BLUE,
+				.olep = ISIF_BLUE,
+				.elop = ISIF_RED,
+				.elep = ISIF_GREEN_RED,
+			},
+			.col_pat_field1 = {
+				.olop = ISIF_GREEN_BLUE,
+				.olep = ISIF_BLUE,
+				.elop = ISIF_RED,
+				.elep = ISIF_GREEN_RED,
+			},
+			.test_pat_gen = 0,
+		},
+	},
+	.data_pack = ISIF_DATA_PACK8,
+};
+
+/* Raw Bayer formats */
+static const u32 isif_raw_bayer_pix_formats[] = {
+	V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
+
+/* Raw YUV formats */
+static const u32 isif_raw_yuv_pix_formats[] = {
+	V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
+
+/* register access routines */
+static inline u32 regr(u32 offset)
+{
+	return __raw_readl(isif_cfg.base_addr + offset);
+}
+
+static inline void regw(u32 val, u32 offset)
+{
+	__raw_writel(val, isif_cfg.base_addr + offset);
+}
+
+/* reg_modify() - read, modify and write register */
+static inline u32 reg_modify(u32 mask, u32 val, u32 offset)
+{
+	u32 new_val = (regr(offset) & ~mask) | (val & mask);
+
+	regw(new_val, offset);
+	return new_val;
+}
+
+static inline void regw_lin_tbl(u32 val, u32 offset, int i)
+{
+	if (!i)
+		__raw_writel(val, isif_cfg.linear_tbl0_addr + offset);
+	else
+		__raw_writel(val, isif_cfg.linear_tbl1_addr + offset);
+}
+
+static void isif_disable_all_modules(void)
+{
+	/* disable BC */
+	regw(0, CLAMPCFG);
+	/* disable vdfc */
+	regw(0, DFCCTL);
+	/* disable CSC */
+	regw(0, CSCCTL);
+	/* disable linearization */
+	regw(0, LINCFG0);
+	/* disable other modules here as they are supported */
+}
+
+static void isif_enable(int en)
+{
+	if (!en) {
+		/* Before disable isif, disable all ISIF modules */
+		isif_disable_all_modules();
+		/*
+		 * wait for next VD. Assume lowest scan rate is 12 Hz. So
+		 * 100 msec delay is good enough
+		 */
+		msleep(100);
+	}
+	reg_modify(ISIF_SYNCEN_VDHDEN_MASK, en, SYNCEN);
+}
+
+static void isif_enable_output_to_sdram(int en)
+{
+	reg_modify(ISIF_SYNCEN_WEN_MASK, en << ISIF_SYNCEN_WEN_SHIFT, SYNCEN);
+}
+
+static void isif_config_culling(struct isif_cul *cul)
+{
+	u32 val;
+
+	/* Horizontal pattern */
+	val = (cul->hcpat_even << CULL_PAT_EVEN_LINE_SHIFT) | cul->hcpat_odd;
+	regw(val, CULH);
+
+	/* vertical pattern */
+	regw(cul->vcpat, CULV);
+
+	/* LPF */
+	reg_modify(ISIF_LPF_MASK << ISIF_LPF_SHIFT,
+		  cul->en_lpf << ISIF_LPF_SHIFT, MODESET);
+}
+
+static void isif_config_gain_offset(void)
+{
+	struct isif_gain_offsets_adj *gain_off_p =
+		&isif_cfg.bayer.config_params.gain_offset;
+	u32 val;
+
+	val = (!!gain_off_p->gain_sdram_en << GAIN_SDRAM_EN_SHIFT) |
+	      (!!gain_off_p->gain_ipipe_en << GAIN_IPIPE_EN_SHIFT) |
+	      (!!gain_off_p->gain_h3a_en << GAIN_H3A_EN_SHIFT) |
+	      (!!gain_off_p->offset_sdram_en << OFST_SDRAM_EN_SHIFT) |
+	      (!!gain_off_p->offset_ipipe_en << OFST_IPIPE_EN_SHIFT) |
+	      (!!gain_off_p->offset_h3a_en << OFST_H3A_EN_SHIFT);
+
+	reg_modify(GAIN_OFFSET_EN_MASK, val, CGAMMAWD);
+
+	val = (gain_off_p->gain.r_ye.integer << GAIN_INTEGER_SHIFT) |
+	       gain_off_p->gain.r_ye.decimal;
+	regw(val, CRGAIN);
+
+	val = (gain_off_p->gain.gr_cy.integer << GAIN_INTEGER_SHIFT) |
+	       gain_off_p->gain.gr_cy.decimal;
+	regw(val, CGRGAIN);
+
+	val = (gain_off_p->gain.gb_g.integer << GAIN_INTEGER_SHIFT) |
+	       gain_off_p->gain.gb_g.decimal;
+	regw(val, CGBGAIN);
+
+	val = (gain_off_p->gain.b_mg.integer << GAIN_INTEGER_SHIFT) |
+	       gain_off_p->gain.b_mg.decimal;
+	regw(val, CBGAIN);
+
+	regw(gain_off_p->offset, COFSTA);
+}
+
+static void isif_restore_defaults(void)
+{
+	enum vpss_ccdc_source_sel source = VPSS_CCDCIN;
+
+	dev_dbg(isif_cfg.dev, "\nstarting isif_restore_defaults...");
+	isif_cfg.bayer.config_params = isif_config_defaults;
+	/* Enable clock to ISIF, IPIPEIF and BL */
+	vpss_enable_clock(VPSS_CCDC_CLOCK, 1);
+	vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1);
+	vpss_enable_clock(VPSS_BL_CLOCK, 1);
+	/* Set default offset and gain */
+	isif_config_gain_offset();
+	vpss_select_ccdc_source(source);
+	dev_dbg(isif_cfg.dev, "\nEnd of isif_restore_defaults...");
+}
+
+static int isif_open(struct device *device)
+{
+	isif_restore_defaults();
+	return 0;
+}
+
+/* This function will configure the window size to be capture in ISIF reg */
+static void isif_setwin(struct v4l2_rect *image_win,
+			enum ccdc_frmfmt frm_fmt, int ppc)
+{
+	int horz_start, horz_nr_pixels;
+	int vert_start, vert_nr_lines;
+	int mid_img = 0;
+
+	dev_dbg(isif_cfg.dev, "\nStarting isif_setwin...");
+	/*
+	 * ppc - per pixel count. indicates how many pixels per cell
+	 * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
+	 * raw capture this is 1
+	 */
+	horz_start = image_win->left << (ppc - 1);
+	horz_nr_pixels = ((image_win->width) << (ppc - 1)) - 1;
+
+	/* Writing the horizontal info into the registers */
+	regw(horz_start & START_PX_HOR_MASK, SPH);
+	regw(horz_nr_pixels & NUM_PX_HOR_MASK, LNH);
+	vert_start = image_win->top;
+
+	if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+		vert_nr_lines = (image_win->height >> 1) - 1;
+		vert_start >>= 1;
+		/* To account for VD since line 0 doesn't have any data */
+		vert_start += 1;
+	} else {
+		/* To account for VD since line 0 doesn't have any data */
+		vert_start += 1;
+		vert_nr_lines = image_win->height - 1;
+		/* configure VDINT0 and VDINT1 */
+		mid_img = vert_start + (image_win->height / 2);
+		regw(mid_img, VDINT1);
+	}
+
+	regw(0, VDINT0);
+	regw(vert_start & START_VER_ONE_MASK, SLV0);
+	regw(vert_start & START_VER_TWO_MASK, SLV1);
+	regw(vert_nr_lines & NUM_LINES_VER, LNV);
+}
+
+static void isif_config_bclamp(struct isif_black_clamp *bc)
+{
+	u32 val;
+
+	/*
+	 * DC Offset is always added to image data irrespective of bc enable
+	 * status
+	 */
+	regw(bc->dc_offset, CLDCOFST);
+
+	if (bc->en) {
+		val = bc->bc_mode_color << ISIF_BC_MODE_COLOR_SHIFT;
+
+		/* Enable BC and horizontal clamp caculation paramaters */
+		val = val | 1 | (bc->horz.mode << ISIF_HORZ_BC_MODE_SHIFT);
+
+		regw(val, CLAMPCFG);
+
+		if (bc->horz.mode != ISIF_HORZ_BC_DISABLE) {
+			/*
+			 * Window count for calculation
+			 * Base window selection
+			 * pixel limit
+			 * Horizontal size of window
+			 * vertical size of the window
+			 * Horizontal start position of the window
+			 * Vertical start position of the window
+			 */
+			val = bc->horz.win_count_calc |
+			      ((!!bc->horz.base_win_sel_calc) <<
+				ISIF_HORZ_BC_WIN_SEL_SHIFT) |
+			      ((!!bc->horz.clamp_pix_limit) <<
+				ISIF_HORZ_BC_PIX_LIMIT_SHIFT) |
+			      (bc->horz.win_h_sz_calc <<
+				ISIF_HORZ_BC_WIN_H_SIZE_SHIFT) |
+			      (bc->horz.win_v_sz_calc <<
+				ISIF_HORZ_BC_WIN_V_SIZE_SHIFT);
+			regw(val, CLHWIN0);
+
+			regw(bc->horz.win_start_h_calc, CLHWIN1);
+			regw(bc->horz.win_start_v_calc, CLHWIN2);
+		}
+
+		/* vertical clamp caculation paramaters */
+
+		/* Reset clamp value sel for previous line */
+		val |=
+		(bc->vert.reset_val_sel << ISIF_VERT_BC_RST_VAL_SEL_SHIFT) |
+		(bc->vert.line_ave_coef << ISIF_VERT_BC_LINE_AVE_COEF_SHIFT);
+		regw(val, CLVWIN0);
+
+		/* Optical Black horizontal start position */
+		regw(bc->vert.ob_start_h, CLVWIN1);
+		/* Optical Black vertical start position */
+		regw(bc->vert.ob_start_v, CLVWIN2);
+		/* Optical Black vertical size for calculation */
+		regw(bc->vert.ob_v_sz_calc, CLVWIN3);
+		/* Vertical start position for BC subtraction */
+		regw(bc->vert_start_sub, CLSV);
+	}
+}
+
+static void isif_config_linearization(struct isif_linearize *linearize)
+{
+	u32 val, i;
+
+	if (!linearize->en) {
+		regw(0, LINCFG0);
+		return;
+	}
+
+	/* shift value for correction & enable linearization (set lsb) */
+	val = (linearize->corr_shft << ISIF_LIN_CORRSFT_SHIFT) | 1;
+	regw(val, LINCFG0);
+
+	/* Scale factor */
+	val = ((!!linearize->scale_fact.integer) <<
+	       ISIF_LIN_SCALE_FACT_INTEG_SHIFT) |
+	       linearize->scale_fact.decimal;
+	regw(val, LINCFG1);
+
+	for (i = 0; i < ISIF_LINEAR_TAB_SIZE; i++) {
+		if (i % 2)
+			regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 1);
+		else
+			regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 0);
+	}
+}
+
+static int isif_config_dfc(struct isif_dfc *vdfc)
+{
+	/* initialize retries to loop for max ~ 250 usec */
+	u32 val, count, retries = loops_per_jiffy / (4000/HZ);
+	int i;
+
+	if (!vdfc->en)
+		return 0;
+
+	/* Correction mode */
+	val = (vdfc->corr_mode << ISIF_VDFC_CORR_MOD_SHIFT);
+
+	/* Correct whole line or partial */
+	if (vdfc->corr_whole_line)
+		val |= 1 << ISIF_VDFC_CORR_WHOLE_LN_SHIFT;
+
+	/* level shift value */
+	val |= vdfc->def_level_shift << ISIF_VDFC_LEVEL_SHFT_SHIFT;
+
+	regw(val, DFCCTL);
+
+	/* Defect saturation level */
+	regw(vdfc->def_sat_level, VDFSATLV);
+
+	regw(vdfc->table[0].pos_vert, DFCMEM0);
+	regw(vdfc->table[0].pos_horz, DFCMEM1);
+	if (vdfc->corr_mode == ISIF_VDFC_NORMAL ||
+	    vdfc->corr_mode == ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
+		regw(vdfc->table[0].level_at_pos, DFCMEM2);
+		regw(vdfc->table[0].level_up_pixels, DFCMEM3);
+		regw(vdfc->table[0].level_low_pixels, DFCMEM4);
+	}
+
+	/* set DFCMARST and set DFCMWR */
+	val = regr(DFCMEMCTL) | (1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT) | 1;
+	regw(val, DFCMEMCTL);
+
+	count = retries;
+	while (count && (regr(DFCMEMCTL) & 0x1))
+		count--;
+
+	if (!count) {
+		dev_dbg(isif_cfg.dev, "defect table write timeout !!!\n");
+		return -1;
+	}
+
+	for (i = 1; i < vdfc->num_vdefects; i++) {
+		regw(vdfc->table[i].pos_vert, DFCMEM0);
+		regw(vdfc->table[i].pos_horz, DFCMEM1);
+		if (vdfc->corr_mode == ISIF_VDFC_NORMAL ||
+		    vdfc->corr_mode == ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
+			regw(vdfc->table[i].level_at_pos, DFCMEM2);
+			regw(vdfc->table[i].level_up_pixels, DFCMEM3);
+			regw(vdfc->table[i].level_low_pixels, DFCMEM4);
+		}
+		val = regr(DFCMEMCTL);
+		/* clear DFCMARST and set DFCMWR */
+		val &= ~BIT(ISIF_DFCMEMCTL_DFCMARST_SHIFT);
+		val |= 1;
+		regw(val, DFCMEMCTL);
+
+		count = retries;
+		while (count && (regr(DFCMEMCTL) & 0x1))
+			count--;
+
+		if (!count) {
+			dev_err(isif_cfg.dev,
+				"defect table write timeout !!!\n");
+			return -1;
+		}
+	}
+	if (vdfc->num_vdefects < ISIF_VDFC_TABLE_SIZE) {
+		/* Extra cycle needed */
+		regw(0, DFCMEM0);
+		regw(0x1FFF, DFCMEM1);
+		regw(1, DFCMEMCTL);
+	}
+
+	/* enable VDFC */
+	reg_modify((1 << ISIF_VDFC_EN_SHIFT), (1 << ISIF_VDFC_EN_SHIFT),
+		   DFCCTL);
+	return 0;
+}
+
+static void isif_config_csc(struct isif_df_csc *df_csc)
+{
+	u32 val1 = 0, val2 = 0, i;
+
+	if (!df_csc->csc.en) {
+		regw(0, CSCCTL);
+		return;
+	}
+	for (i = 0; i < ISIF_CSC_NUM_COEFF; i++) {
+		if ((i % 2) == 0) {
+			/* CSCM - LSB */
+			val1 = (df_csc->csc.coeff[i].integer <<
+				ISIF_CSC_COEF_INTEG_SHIFT) |
+				df_csc->csc.coeff[i].decimal;
+		} else {
+
+			/* CSCM - MSB */
+			val2 = (df_csc->csc.coeff[i].integer <<
+				ISIF_CSC_COEF_INTEG_SHIFT) |
+				df_csc->csc.coeff[i].decimal;
+			val2 <<= ISIF_CSCM_MSB_SHIFT;
+			val2 |= val1;
+			regw(val2, (CSCM0 + ((i - 1) << 1)));
+		}
+	}
+
+	/* program the active area */
+	regw(df_csc->start_pix, FMTSPH);
+	/*
+	 * one extra pixel as required for CSC. Actually number of
+	 * pixel - 1 should be configured in this register. So we
+	 * need to subtract 1 before writing to FMTSPH, but we will
+	 * not do this since csc requires one extra pixel
+	 */
+	regw(df_csc->num_pixels, FMTLNH);
+	regw(df_csc->start_line, FMTSLV);
+	/*
+	 * one extra line as required for CSC. See reason documented for
+	 * num_pixels
+	 */
+	regw(df_csc->num_lines, FMTLNV);
+
+	/* Enable CSC */
+	regw(1, CSCCTL);
+}
+
+static int isif_config_raw(void)
+{
+	struct isif_params_raw *params = &isif_cfg.bayer;
+	struct isif_config_params_raw *module_params =
+		&isif_cfg.bayer.config_params;
+	struct vpss_pg_frame_size frame_size;
+	struct vpss_sync_pol sync;
+	u32 val;
+
+	dev_dbg(isif_cfg.dev, "\nStarting isif_config_raw..\n");
+
+	/*
+	 * Configure CCDCFG register:-
+	 * Set CCD Not to swap input since input is RAW data
+	 * Set FID detection function to Latch at V-Sync
+	 * Set WENLOG - isif valid area
+	 * Set TRGSEL
+	 * Set EXTRG
+	 * Packed to 8 or 16 bits
+	 */
+
+	val = ISIF_YCINSWP_RAW | ISIF_CCDCFG_FIDMD_LATCH_VSYNC |
+		ISIF_CCDCFG_WENLOG_AND | ISIF_CCDCFG_TRGSEL_WEN |
+		ISIF_CCDCFG_EXTRG_DISABLE | isif_cfg.data_pack;
+
+	dev_dbg(isif_cfg.dev, "Writing 0x%x to ...CCDCFG \n", val);
+	regw(val, CCDCFG);
+
+	/*
+	 * Configure the vertical sync polarity(MODESET.VDPOL)
+	 * Configure the horizontal sync polarity (MODESET.HDPOL)
+	 * Configure frame id polarity (MODESET.FLDPOL)
+	 * Configure data polarity
+	 * Configure External WEN Selection
+	 * Configure frame format(progressive or interlace)
+	 * Configure pixel format (Input mode)
+	 * Configure the data shift
+	 */
+
+	val = ISIF_VDHDOUT_INPUT | (params->vd_pol << ISIF_VD_POL_SHIFT) |
+		(params->hd_pol << ISIF_HD_POL_SHIFT) |
+		(params->fid_pol << ISIF_FID_POL_SHIFT) |
+		(ISIF_DATAPOL_NORMAL << ISIF_DATAPOL_SHIFT) |
+		(ISIF_EXWEN_DISABLE << ISIF_EXWEN_SHIFT) |
+		(params->frm_fmt << ISIF_FRM_FMT_SHIFT) |
+		(params->pix_fmt << ISIF_INPUT_SHIFT) |
+		(params->config_params.data_shift << ISIF_DATASFT_SHIFT);
+
+	regw(val, MODESET);
+	dev_dbg(isif_cfg.dev, "Writing 0x%x to MODESET...\n", val);
+
+	/*
+	 * Configure GAMMAWD register
+	 * CFA pattern setting
+	 */
+	val = params->cfa_pat << ISIF_GAMMAWD_CFA_SHIFT;
+
+	/* Gamma msb */
+	if (module_params->compress.alg == ISIF_ALAW)
+		val |= ISIF_ALAW_ENABLE;
+
+	val |= (params->data_msb << ISIF_ALAW_GAMA_WD_SHIFT);
+	regw(val, CGAMMAWD);
+
+	/* Configure DPCM compression settings */
+	if (module_params->compress.alg == ISIF_DPCM) {
+		val =  BIT(ISIF_DPCM_EN_SHIFT) |
+		       (module_params->compress.pred <<
+		       ISIF_DPCM_PREDICTOR_SHIFT);
+	}
+
+	regw(val, MISC);
+
+	/* Configure Gain & Offset */
+	isif_config_gain_offset();
+
+	/* Configure Color pattern */
+	val = (params->config_params.col_pat_field0.olop) |
+	      (params->config_params.col_pat_field0.olep << 2) |
+	      (params->config_params.col_pat_field0.elop << 4) |
+	      (params->config_params.col_pat_field0.elep << 6) |
+	      (params->config_params.col_pat_field1.olop << 8) |
+	      (params->config_params.col_pat_field1.olep << 10) |
+	      (params->config_params.col_pat_field1.elop << 12) |
+	      (params->config_params.col_pat_field1.elep << 14);
+	regw(val, CCOLP);
+	dev_dbg(isif_cfg.dev, "Writing %x to CCOLP ...\n", val);
+
+	/* Configure HSIZE register  */
+	val = (!!params->horz_flip_en) << ISIF_HSIZE_FLIP_SHIFT;
+
+	/* calculate line offset in 32 bytes based on pack value */
+	if (isif_cfg.data_pack == ISIF_PACK_8BIT)
+		val |= ((params->win.width + 31) >> 5);
+	else if (isif_cfg.data_pack == ISIF_PACK_12BIT)
+		val |= (((params->win.width +
+		       (params->win.width >> 2)) + 31) >> 5);
+	else
+		val |= (((params->win.width * 2) + 31) >> 5);
+	regw(val, HSIZE);
+
+	/* Configure SDOFST register  */
+	if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
+		if (params->image_invert_en) {
+			/* For interlace inverse mode */
+			regw(0x4B6D, SDOFST);
+			dev_dbg(isif_cfg.dev, "Writing 0x4B6D to SDOFST...\n");
+		} else {
+			/* For interlace non inverse mode */
+			regw(0x0B6D, SDOFST);
+			dev_dbg(isif_cfg.dev, "Writing 0x0B6D to SDOFST...\n");
+		}
+	} else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+		if (params->image_invert_en) {
+			/* For progressive inverse mode */
+			regw(0x4000, SDOFST);
+			dev_dbg(isif_cfg.dev, "Writing 0x4000 to SDOFST...\n");
+		} else {
+			/* For progressive non inverse mode */
+			regw(0x0000, SDOFST);
+			dev_dbg(isif_cfg.dev, "Writing 0x0000 to SDOFST...\n");
+		}
+	}
+
+	/* Configure video window */
+	isif_setwin(&params->win, params->frm_fmt, 1);
+
+	/* Configure Black Clamp */
+	isif_config_bclamp(&module_params->bclamp);
+
+	/* Configure Vertical Defection Pixel Correction */
+	if (isif_config_dfc(&module_params->dfc) < 0)
+		return -EFAULT;
+
+	if (!module_params->df_csc.df_or_csc)
+		/* Configure Color Space Conversion */
+		isif_config_csc(&module_params->df_csc);
+
+	isif_config_linearization(&module_params->linearize);
+
+	/* Configure Culling */
+	isif_config_culling(&module_params->culling);
+
+	/* Configure horizontal and vertical offsets(DFC,LSC,Gain) */
+	regw(module_params->horz_offset, DATAHOFST);
+	regw(module_params->vert_offset, DATAVOFST);
+
+	/* Setup test pattern if enabled */
+	if (params->config_params.test_pat_gen) {
+		/* Use the HD/VD pol settings from user */
+		sync.ccdpg_hdpol = params->hd_pol;
+		sync.ccdpg_vdpol = params->vd_pol;
+		dm365_vpss_set_sync_pol(sync);
+		frame_size.hlpfr = isif_cfg.bayer.win.width;
+		frame_size.pplen = isif_cfg.bayer.win.height;
+		dm365_vpss_set_pg_frame_size(frame_size);
+		vpss_select_ccdc_source(VPSS_PGLPBK);
+	}
+
+	dev_dbg(isif_cfg.dev, "\nEnd of isif_config_ycbcr...\n");
+	return 0;
+}
+
+static int isif_set_buftype(enum ccdc_buftype buf_type)
+{
+	if (isif_cfg.if_type == VPFE_RAW_BAYER)
+		isif_cfg.bayer.buf_type = buf_type;
+	else
+		isif_cfg.ycbcr.buf_type = buf_type;
+
+	return 0;
+
+}
+static enum ccdc_buftype isif_get_buftype(void)
+{
+	if (isif_cfg.if_type == VPFE_RAW_BAYER)
+		return isif_cfg.bayer.buf_type;
+
+	return isif_cfg.ycbcr.buf_type;
+}
+
+static int isif_enum_pix(u32 *pix, int i)
+{
+	int ret = -EINVAL;
+
+	if (isif_cfg.if_type == VPFE_RAW_BAYER) {
+		if (i < ARRAY_SIZE(isif_raw_bayer_pix_formats)) {
+			*pix = isif_raw_bayer_pix_formats[i];
+			ret = 0;
+		}
+	} else {
+		if (i < ARRAY_SIZE(isif_raw_yuv_pix_formats)) {
+			*pix = isif_raw_yuv_pix_formats[i];
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+static int isif_set_pixel_format(unsigned int pixfmt)
+{
+	if (isif_cfg.if_type == VPFE_RAW_BAYER) {
+		if (pixfmt == V4L2_PIX_FMT_SBGGR8) {
+			if ((isif_cfg.bayer.config_params.compress.alg !=
+			     ISIF_ALAW) &&
+			    (isif_cfg.bayer.config_params.compress.alg !=
+			     ISIF_DPCM)) {
+				dev_dbg(isif_cfg.dev,
+					"Either configure A-Law or DPCM\n");
+				return -EINVAL;
+			}
+			isif_cfg.data_pack = ISIF_PACK_8BIT;
+		} else if (pixfmt == V4L2_PIX_FMT_SBGGR16) {
+			isif_cfg.bayer.config_params.compress.alg =
+					ISIF_NO_COMPRESSION;
+			isif_cfg.data_pack = ISIF_PACK_16BIT;
+		} else
+			return -EINVAL;
+		isif_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+	} else {
+		if (pixfmt == V4L2_PIX_FMT_YUYV)
+			isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
+		else if (pixfmt == V4L2_PIX_FMT_UYVY)
+			isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+		else
+			return -EINVAL;
+		isif_cfg.data_pack = ISIF_PACK_8BIT;
+	}
+	return 0;
+}
+
+static u32 isif_get_pixel_format(void)
+{
+	u32 pixfmt;
+
+	if (isif_cfg.if_type == VPFE_RAW_BAYER)
+		if (isif_cfg.bayer.config_params.compress.alg == ISIF_ALAW ||
+		    isif_cfg.bayer.config_params.compress.alg == ISIF_DPCM)
+			pixfmt = V4L2_PIX_FMT_SBGGR8;
+		else
+			pixfmt = V4L2_PIX_FMT_SBGGR16;
+	else {
+		if (isif_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
+			pixfmt = V4L2_PIX_FMT_YUYV;
+		else
+			pixfmt = V4L2_PIX_FMT_UYVY;
+	}
+	return pixfmt;
+}
+
+static int isif_set_image_window(struct v4l2_rect *win)
+{
+	if (isif_cfg.if_type == VPFE_RAW_BAYER) {
+		isif_cfg.bayer.win.top = win->top;
+		isif_cfg.bayer.win.left = win->left;
+		isif_cfg.bayer.win.width = win->width;
+		isif_cfg.bayer.win.height = win->height;
+	} else {
+		isif_cfg.ycbcr.win.top = win->top;
+		isif_cfg.ycbcr.win.left = win->left;
+		isif_cfg.ycbcr.win.width = win->width;
+		isif_cfg.ycbcr.win.height = win->height;
+	}
+	return 0;
+}
+
+static void isif_get_image_window(struct v4l2_rect *win)
+{
+	if (isif_cfg.if_type == VPFE_RAW_BAYER)
+		*win = isif_cfg.bayer.win;
+	else
+		*win = isif_cfg.ycbcr.win;
+}
+
+static unsigned int isif_get_line_length(void)
+{
+	unsigned int len;
+
+	if (isif_cfg.if_type == VPFE_RAW_BAYER) {
+		if (isif_cfg.data_pack == ISIF_PACK_8BIT)
+			len = ((isif_cfg.bayer.win.width));
+		else if (isif_cfg.data_pack == ISIF_PACK_12BIT)
+			len = (((isif_cfg.bayer.win.width * 2) +
+				 (isif_cfg.bayer.win.width >> 2)));
+		else
+			len = (((isif_cfg.bayer.win.width * 2)));
+	} else
+		len = (((isif_cfg.ycbcr.win.width * 2)));
+	return ALIGN(len, 32);
+}
+
+static int isif_set_frame_format(enum ccdc_frmfmt frm_fmt)
+{
+	if (isif_cfg.if_type == VPFE_RAW_BAYER)
+		isif_cfg.bayer.frm_fmt = frm_fmt;
+	else
+		isif_cfg.ycbcr.frm_fmt = frm_fmt;
+	return 0;
+}
+static enum ccdc_frmfmt isif_get_frame_format(void)
+{
+	if (isif_cfg.if_type == VPFE_RAW_BAYER)
+		return isif_cfg.bayer.frm_fmt;
+	return isif_cfg.ycbcr.frm_fmt;
+}
+
+static int isif_getfid(void)
+{
+	return (regr(MODESET) >> 15) & 0x1;
+}
+
+/* misc operations */
+static void isif_setfbaddr(unsigned long addr)
+{
+	regw((addr >> 21) & 0x07ff, CADU);
+	regw((addr >> 5) & 0x0ffff, CADL);
+}
+
+static int isif_set_hw_if_params(struct vpfe_hw_if_param *params)
+{
+	isif_cfg.if_type = params->if_type;
+
+	switch (params->if_type) {
+	case VPFE_BT656:
+	case VPFE_BT656_10BIT:
+	case VPFE_YCBCR_SYNC_8:
+		isif_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
+		isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+		break;
+	case VPFE_BT1120:
+	case VPFE_YCBCR_SYNC_16:
+		isif_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_16BIT;
+		isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+		break;
+	case VPFE_RAW_BAYER:
+		isif_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+		break;
+	default:
+		dev_dbg(isif_cfg.dev, "Invalid interface type\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* This function will configure ISIF for YCbCr parameters. */
+static int isif_config_ycbcr(void)
+{
+	struct isif_ycbcr_config *params = &isif_cfg.ycbcr;
+	struct vpss_pg_frame_size frame_size;
+	u32 modeset = 0, ccdcfg = 0;
+	struct vpss_sync_pol sync;
+
+	dev_dbg(isif_cfg.dev, "\nStarting isif_config_ycbcr...");
+
+	/* configure pixel format or input mode */
+	modeset = modeset | (params->pix_fmt << ISIF_INPUT_SHIFT) |
+		  (params->frm_fmt << ISIF_FRM_FMT_SHIFT) |
+		  (params->fid_pol << ISIF_FID_POL_SHIFT) |
+		  (params->hd_pol << ISIF_HD_POL_SHIFT) |
+		  (params->vd_pol << ISIF_VD_POL_SHIFT);
+
+	/* pack the data to 8-bit ISIFCFG */
+	switch (isif_cfg.if_type) {
+	case VPFE_BT656:
+		if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
+			dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+			return -EINVAL;
+		}
+		modeset |= (VPFE_PINPOL_NEGATIVE << ISIF_VD_POL_SHIFT);
+		regw(3, REC656IF);
+		ccdcfg = ccdcfg | ISIF_DATA_PACK8 | ISIF_YCINSWP_YCBCR;
+		break;
+	case VPFE_BT656_10BIT:
+		if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
+			dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+			return -EINVAL;
+		}
+		/* setup BT.656, embedded sync  */
+		regw(3, REC656IF);
+		/* enable 10 bit mode in ccdcfg */
+		ccdcfg = ccdcfg | ISIF_DATA_PACK8 | ISIF_YCINSWP_YCBCR |
+			ISIF_BW656_ENABLE;
+		break;
+	case VPFE_BT1120:
+		if (params->pix_fmt != CCDC_PIXFMT_YCBCR_16BIT) {
+			dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+			return -EINVAL;
+		}
+		regw(3, REC656IF);
+		break;
+
+	case VPFE_YCBCR_SYNC_8:
+		ccdcfg |= ISIF_DATA_PACK8;
+		ccdcfg |= ISIF_YCINSWP_YCBCR;
+		if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
+			dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+			return -EINVAL;
+		}
+		break;
+	case VPFE_YCBCR_SYNC_16:
+		if (params->pix_fmt != CCDC_PIXFMT_YCBCR_16BIT) {
+			dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		/* should never come here */
+		dev_dbg(isif_cfg.dev, "Invalid interface type\n");
+		return -EINVAL;
+	}
+
+	regw(modeset, MODESET);
+
+	/* Set up pix order */
+	ccdcfg |= params->pix_order << ISIF_PIX_ORDER_SHIFT;
+
+	regw(ccdcfg, CCDCFG);
+
+	/* configure video window */
+	if ((isif_cfg.if_type == VPFE_BT1120) ||
+	    (isif_cfg.if_type == VPFE_YCBCR_SYNC_16))
+		isif_setwin(&params->win, params->frm_fmt, 1);
+	else
+		isif_setwin(&params->win, params->frm_fmt, 2);
+
+	/*
+	 * configure the horizontal line offset
+	 * this is done by rounding up width to a multiple of 16 pixels
+	 * and multiply by two to account for y:cb:cr 4:2:2 data
+	 */
+	regw(((((params->win.width * 2) + 31) & 0xffffffe0) >> 5), HSIZE);
+
+	/* configure the memory line offset */
+	if ((params->frm_fmt == CCDC_FRMFMT_INTERLACED) &&
+	    (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED))
+		/* two fields are interleaved in memory */
+		regw(0x00000249, SDOFST);
+
+	/* Setup test pattern if enabled */
+	if (isif_cfg.bayer.config_params.test_pat_gen) {
+		sync.ccdpg_hdpol = params->hd_pol;
+		sync.ccdpg_vdpol = params->vd_pol;
+		dm365_vpss_set_sync_pol(sync);
+		dm365_vpss_set_pg_frame_size(frame_size);
+	}
+	return 0;
+}
+
+static int isif_configure(void)
+{
+	if (isif_cfg.if_type == VPFE_RAW_BAYER)
+		return isif_config_raw();
+	return isif_config_ycbcr();
+}
+
+static int isif_close(struct device *device)
+{
+	/* copy defaults to module params */
+	isif_cfg.bayer.config_params = isif_config_defaults;
+	return 0;
+}
+
+static struct ccdc_hw_device isif_hw_dev = {
+	.name = "ISIF",
+	.owner = THIS_MODULE,
+	.hw_ops = {
+		.open = isif_open,
+		.close = isif_close,
+		.enable = isif_enable,
+		.enable_out_to_sdram = isif_enable_output_to_sdram,
+		.set_hw_if_params = isif_set_hw_if_params,
+		.configure = isif_configure,
+		.set_buftype = isif_set_buftype,
+		.get_buftype = isif_get_buftype,
+		.enum_pix = isif_enum_pix,
+		.set_pixel_format = isif_set_pixel_format,
+		.get_pixel_format = isif_get_pixel_format,
+		.set_frame_format = isif_set_frame_format,
+		.get_frame_format = isif_get_frame_format,
+		.set_image_window = isif_set_image_window,
+		.get_image_window = isif_get_image_window,
+		.get_line_length = isif_get_line_length,
+		.setfbaddr = isif_setfbaddr,
+		.getfid = isif_getfid,
+	},
+};
+
+static int __devinit isif_probe(struct platform_device *pdev)
+{
+	void (*setup_pinmux)(void);
+	struct resource	*res;
+	void *__iomem addr;
+	int status = 0, i;
+
+	/*
+	 * first try to register with vpfe. If not correct platform, then we
+	 * don't have to iomap
+	 */
+	status = vpfe_register_ccdc_device(&isif_hw_dev);
+	if (status < 0)
+		return status;
+
+	/* Get and enable Master clock */
+	isif_cfg.mclk = clk_get(&pdev->dev, "master");
+	if (IS_ERR(isif_cfg.mclk)) {
+		status = PTR_ERR(isif_cfg.mclk);
+		goto fail_mclk;
+	}
+	if (clk_enable(isif_cfg.mclk)) {
+		status = -ENODEV;
+		goto fail_mclk;
+	}
+
+	/* Platform data holds setup_pinmux function ptr */
+	if (NULL == pdev->dev.platform_data) {
+		status = -ENODEV;
+		goto fail_mclk;
+	}
+	setup_pinmux = pdev->dev.platform_data;
+	/*
+	 * setup Mux configuration for ccdc which may be different for
+	 * different SoCs using this CCDC
+	 */
+	setup_pinmux();
+
+	i = 0;
+	/* Get the ISIF base address, linearization table0 and table1 addr. */
+	while (i < 3) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		if (!res) {
+			status = -ENODEV;
+			goto fail_nobase_res;
+		}
+		res = request_mem_region(res->start, resource_size(res),
+					 res->name);
+		if (!res) {
+			status = -EBUSY;
+			goto fail_nobase_res;
+		}
+		addr = ioremap_nocache(res->start, resource_size(res));
+		if (!addr) {
+			status = -ENOMEM;
+			goto fail_base_iomap;
+		}
+		switch (i) {
+		case 0:
+			/* ISIF base address */
+			isif_cfg.base_addr = addr;
+			break;
+		case 1:
+			/* ISIF linear tbl0 address */
+			isif_cfg.linear_tbl0_addr = addr;
+			break;
+		default:
+			/* ISIF linear tbl0 address */
+			isif_cfg.linear_tbl1_addr = addr;
+			break;
+		}
+		i++;
+	}
+	isif_cfg.dev = &pdev->dev;
+
+	printk(KERN_NOTICE "%s is registered with vpfe.\n",
+		isif_hw_dev.name);
+	return 0;
+fail_base_iomap:
+	release_mem_region(res->start, resource_size(res));
+	i--;
+fail_nobase_res:
+	if (isif_cfg.base_addr)
+		iounmap(isif_cfg.base_addr);
+	if (isif_cfg.linear_tbl0_addr)
+		iounmap(isif_cfg.linear_tbl0_addr);
+
+	while (i >= 0) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		release_mem_region(res->start, resource_size(res));
+		i--;
+	}
+fail_mclk:
+	clk_put(isif_cfg.mclk);
+	vpfe_unregister_ccdc_device(&isif_hw_dev);
+	return status;
+}
+
+static int isif_remove(struct platform_device *pdev)
+{
+	struct resource	*res;
+	int i = 0;
+
+	iounmap(isif_cfg.base_addr);
+	iounmap(isif_cfg.linear_tbl0_addr);
+	iounmap(isif_cfg.linear_tbl1_addr);
+	while (i < 3) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		if (res)
+			release_mem_region(res->start, resource_size(res));
+		i++;
+	}
+	vpfe_unregister_ccdc_device(&isif_hw_dev);
+	return 0;
+}
+
+static struct platform_driver isif_driver = {
+	.driver = {
+		.name	= "isif",
+		.owner = THIS_MODULE,
+	},
+	.remove = __devexit_p(isif_remove),
+	.probe = isif_probe,
+};
+
+module_platform_driver(isif_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/davinci/isif_regs.h b/drivers/media/platform/davinci/isif_regs.h
new file mode 100644
index 0000000..aa69a46
--- /dev/null
+++ b/drivers/media/platform/davinci/isif_regs.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef _ISIF_REGS_H
+#define _ISIF_REGS_H
+
+/* ISIF registers relative offsets */
+#define SYNCEN					0x00
+#define MODESET					0x04
+#define HDW					0x08
+#define VDW					0x0c
+#define PPLN					0x10
+#define LPFR					0x14
+#define SPH					0x18
+#define LNH					0x1c
+#define SLV0					0x20
+#define SLV1					0x24
+#define LNV					0x28
+#define CULH					0x2c
+#define CULV					0x30
+#define HSIZE					0x34
+#define SDOFST					0x38
+#define CADU					0x3c
+#define CADL					0x40
+#define LINCFG0					0x44
+#define LINCFG1					0x48
+#define CCOLP					0x4c
+#define CRGAIN 					0x50
+#define CGRGAIN					0x54
+#define CGBGAIN					0x58
+#define CBGAIN					0x5c
+#define COFSTA					0x60
+#define FLSHCFG0				0x64
+#define FLSHCFG1				0x68
+#define FLSHCFG2				0x6c
+#define VDINT0					0x70
+#define VDINT1					0x74
+#define VDINT2					0x78
+#define MISC 					0x7c
+#define CGAMMAWD				0x80
+#define REC656IF				0x84
+#define CCDCFG					0x88
+/*****************************************************
+* Defect Correction registers
+*****************************************************/
+#define DFCCTL					0x8c
+#define VDFSATLV				0x90
+#define DFCMEMCTL				0x94
+#define DFCMEM0					0x98
+#define DFCMEM1					0x9c
+#define DFCMEM2					0xa0
+#define DFCMEM3					0xa4
+#define DFCMEM4					0xa8
+/****************************************************
+* Black Clamp registers
+****************************************************/
+#define CLAMPCFG				0xac
+#define CLDCOFST				0xb0
+#define CLSV					0xb4
+#define CLHWIN0					0xb8
+#define CLHWIN1					0xbc
+#define CLHWIN2					0xc0
+#define CLVRV					0xc4
+#define CLVWIN0					0xc8
+#define CLVWIN1					0xcc
+#define CLVWIN2					0xd0
+#define CLVWIN3					0xd4
+/****************************************************
+* Lense Shading Correction
+****************************************************/
+#define DATAHOFST				0xd8
+#define DATAVOFST				0xdc
+#define LSCHVAL					0xe0
+#define LSCVVAL					0xe4
+#define TWODLSCCFG				0xe8
+#define TWODLSCOFST				0xec
+#define TWODLSCINI				0xf0
+#define TWODLSCGRBU				0xf4
+#define TWODLSCGRBL				0xf8
+#define TWODLSCGROF				0xfc
+#define TWODLSCORBU				0x100
+#define TWODLSCORBL				0x104
+#define TWODLSCOROF				0x108
+#define TWODLSCIRQEN				0x10c
+#define TWODLSCIRQST				0x110
+/****************************************************
+* Data formatter
+****************************************************/
+#define FMTCFG					0x114
+#define FMTPLEN					0x118
+#define FMTSPH					0x11c
+#define FMTLNH					0x120
+#define FMTSLV					0x124
+#define FMTLNV					0x128
+#define FMTRLEN					0x12c
+#define FMTHCNT					0x130
+#define FMTAPTR_BASE				0x134
+/* Below macro for addresses FMTAPTR0 - FMTAPTR15 */
+#define FMTAPTR(i)			(FMTAPTR_BASE + (i * 4))
+#define FMTPGMVF0				0x174
+#define FMTPGMVF1				0x178
+#define FMTPGMAPU0				0x17c
+#define FMTPGMAPU1				0x180
+#define FMTPGMAPS0				0x184
+#define FMTPGMAPS1				0x188
+#define FMTPGMAPS2				0x18c
+#define FMTPGMAPS3				0x190
+#define FMTPGMAPS4				0x194
+#define FMTPGMAPS5				0x198
+#define FMTPGMAPS6				0x19c
+#define FMTPGMAPS7				0x1a0
+/************************************************
+* Color Space Converter
+************************************************/
+#define CSCCTL					0x1a4
+#define CSCM0					0x1a8
+#define CSCM1					0x1ac
+#define CSCM2					0x1b0
+#define CSCM3					0x1b4
+#define CSCM4					0x1b8
+#define CSCM5					0x1bc
+#define CSCM6					0x1c0
+#define CSCM7					0x1c4
+#define OBWIN0					0x1c8
+#define OBWIN1					0x1cc
+#define OBWIN2					0x1d0
+#define OBWIN3					0x1d4
+#define OBVAL0					0x1d8
+#define OBVAL1					0x1dc
+#define OBVAL2					0x1e0
+#define OBVAL3					0x1e4
+#define OBVAL4					0x1e8
+#define OBVAL5					0x1ec
+#define OBVAL6					0x1f0
+#define OBVAL7					0x1f4
+#define CLKCTL					0x1f8
+
+/* Masks & Shifts below */
+#define START_PX_HOR_MASK			0x7FFF
+#define NUM_PX_HOR_MASK				0x7FFF
+#define START_VER_ONE_MASK			0x7FFF
+#define START_VER_TWO_MASK			0x7FFF
+#define NUM_LINES_VER				0x7FFF
+
+/* gain - offset masks */
+#define GAIN_INTEGER_SHIFT			9
+#define OFFSET_MASK				0xFFF
+#define GAIN_SDRAM_EN_SHIFT			12
+#define GAIN_IPIPE_EN_SHIFT			13
+#define GAIN_H3A_EN_SHIFT			14
+#define OFST_SDRAM_EN_SHIFT			8
+#define OFST_IPIPE_EN_SHIFT			9
+#define OFST_H3A_EN_SHIFT			10
+#define GAIN_OFFSET_EN_MASK			0x7700
+
+/* Culling */
+#define CULL_PAT_EVEN_LINE_SHIFT		8
+
+/* CCDCFG register */
+#define ISIF_YCINSWP_RAW			(0x00 << 4)
+#define ISIF_YCINSWP_YCBCR			(0x01 << 4)
+#define ISIF_CCDCFG_FIDMD_LATCH_VSYNC		(0x00 << 6)
+#define ISIF_CCDCFG_WENLOG_AND			(0x00 << 8)
+#define ISIF_CCDCFG_TRGSEL_WEN			(0x00 << 9)
+#define ISIF_CCDCFG_EXTRG_DISABLE		(0x00 << 10)
+#define ISIF_LATCH_ON_VSYNC_DISABLE		(0x01 << 15)
+#define ISIF_LATCH_ON_VSYNC_ENABLE		(0x00 << 15)
+#define ISIF_DATA_PACK_MASK			3
+#define ISIF_DATA_PACK16			0
+#define ISIF_DATA_PACK12			1
+#define ISIF_DATA_PACK8				2
+#define ISIF_PIX_ORDER_SHIFT			11
+#define ISIF_BW656_ENABLE			(0x01 << 5)
+
+/* MODESET registers */
+#define ISIF_VDHDOUT_INPUT			(0x00 << 0)
+#define ISIF_INPUT_SHIFT			12
+#define ISIF_RAW_INPUT_MODE			0
+#define ISIF_FID_POL_SHIFT			4
+#define ISIF_HD_POL_SHIFT			3
+#define ISIF_VD_POL_SHIFT			2
+#define ISIF_DATAPOL_NORMAL			0
+#define ISIF_DATAPOL_SHIFT			6
+#define ISIF_EXWEN_DISABLE 			0
+#define ISIF_EXWEN_SHIFT			5
+#define ISIF_FRM_FMT_SHIFT			7
+#define ISIF_DATASFT_SHIFT			8
+#define ISIF_LPF_SHIFT				14
+#define ISIF_LPF_MASK				1
+
+/* GAMMAWD registers */
+#define ISIF_ALAW_GAMA_WD_MASK			0xF
+#define ISIF_ALAW_GAMA_WD_SHIFT			1
+#define ISIF_ALAW_ENABLE			1
+#define ISIF_GAMMAWD_CFA_SHIFT			5
+
+/* HSIZE registers */
+#define ISIF_HSIZE_FLIP_MASK			1
+#define ISIF_HSIZE_FLIP_SHIFT			12
+
+/* MISC registers */
+#define ISIF_DPCM_EN_SHIFT			12
+#define ISIF_DPCM_PREDICTOR_SHIFT		13
+
+/* Black clamp related */
+#define ISIF_BC_MODE_COLOR_SHIFT		4
+#define ISIF_HORZ_BC_MODE_SHIFT			1
+#define ISIF_HORZ_BC_WIN_SEL_SHIFT		5
+#define ISIF_HORZ_BC_PIX_LIMIT_SHIFT		6
+#define ISIF_HORZ_BC_WIN_H_SIZE_SHIFT		8
+#define ISIF_HORZ_BC_WIN_V_SIZE_SHIFT		12
+#define	ISIF_VERT_BC_RST_VAL_SEL_SHIFT		4
+#define ISIF_VERT_BC_LINE_AVE_COEF_SHIFT	8
+
+/* VDFC registers */
+#define ISIF_VDFC_EN_SHIFT			4
+#define ISIF_VDFC_CORR_MOD_SHIFT		5
+#define ISIF_VDFC_CORR_WHOLE_LN_SHIFT		7
+#define ISIF_VDFC_LEVEL_SHFT_SHIFT		8
+#define ISIF_VDFC_POS_MASK			0x1FFF
+#define ISIF_DFCMEMCTL_DFCMARST_SHIFT		2
+
+/* CSC registers */
+#define ISIF_CSC_COEF_INTEG_MASK		7
+#define ISIF_CSC_COEF_DECIMAL_MASK		0x1f
+#define ISIF_CSC_COEF_INTEG_SHIFT		5
+#define ISIF_CSCM_MSB_SHIFT			8
+#define ISIF_DF_CSC_SPH_MASK			0x1FFF
+#define ISIF_DF_CSC_LNH_MASK			0x1FFF
+#define ISIF_DF_CSC_SLV_MASK			0x1FFF
+#define ISIF_DF_CSC_LNV_MASK			0x1FFF
+#define ISIF_DF_NUMLINES			0x7FFF
+#define ISIF_DF_NUMPIX				0x1FFF
+
+/* Offsets for LSC/DFC/Gain */
+#define ISIF_DATA_H_OFFSET_MASK			0x1FFF
+#define ISIF_DATA_V_OFFSET_MASK			0x1FFF
+
+/* Linearization */
+#define ISIF_LIN_CORRSFT_SHIFT			4
+#define ISIF_LIN_SCALE_FACT_INTEG_SHIFT		10
+
+
+/* Pattern registers */
+#define ISIF_PG_EN				(1 << 3)
+#define ISIF_SEL_PG_SRC				(3 << 4)
+#define ISIF_PG_VD_POL_SHIFT			0
+#define ISIF_PG_HD_POL_SHIFT			1
+
+/*random other junk*/
+#define ISIF_SYNCEN_VDHDEN_MASK			(1 << 0)
+#define ISIF_SYNCEN_WEN_MASK			(1 << 1)
+#define ISIF_SYNCEN_WEN_SHIFT			1
+
+#endif
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
new file mode 100644
index 0000000..c4a82a1
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -0,0 +1,886 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe.h>
+#include <media/davinci/vpss.h>
+#include <media/davinci/vpbe_venc.h>
+
+#define VPBE_DEFAULT_OUTPUT	"Composite"
+#define VPBE_DEFAULT_MODE	"ntsc"
+
+static char *def_output = VPBE_DEFAULT_OUTPUT;
+static char *def_mode = VPBE_DEFAULT_MODE;
+static int debug;
+
+module_param(def_output, charp, S_IRUGO);
+module_param(def_mode, charp, S_IRUGO);
+module_param(debug, int, 0644);
+
+MODULE_PARM_DESC(def_output, "vpbe output name (default:Composite)");
+MODULE_PARM_DESC(def_mode, "vpbe output mode name (default:ntsc");
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+MODULE_DESCRIPTION("TI DMXXX VPBE Display controller");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+
+/**
+ * vpbe_current_encoder_info - Get config info for current encoder
+ * @vpbe_dev - vpbe device ptr
+ *
+ * Return ptr to current encoder config info
+ */
+static struct encoder_config_info*
+vpbe_current_encoder_info(struct vpbe_device *vpbe_dev)
+{
+	struct vpbe_config *cfg = vpbe_dev->cfg;
+	int index = vpbe_dev->current_sd_index;
+
+	return ((index == 0) ? &cfg->venc :
+				&cfg->ext_encoders[index-1]);
+}
+
+/**
+ * vpbe_find_encoder_sd_index - Given a name find encoder sd index
+ *
+ * @vpbe_config - ptr to vpbe cfg
+ * @output_index - index used by application
+ *
+ * Return sd index of the encoder
+ */
+static int vpbe_find_encoder_sd_index(struct vpbe_config *cfg,
+			     int index)
+{
+	char *encoder_name = cfg->outputs[index].subdev_name;
+	int i;
+
+	/* Venc is always first	*/
+	if (!strcmp(encoder_name, cfg->venc.module_name))
+		return 0;
+
+	for (i = 0; i < cfg->num_ext_encoders; i++) {
+		if (!strcmp(encoder_name,
+		     cfg->ext_encoders[i].module_name))
+			return i+1;
+	}
+
+	return -EINVAL;
+}
+
+/**
+ * vpbe_g_cropcap - Get crop capabilities of the display
+ * @vpbe_dev - vpbe device ptr
+ * @cropcap - cropcap is a ptr to struct v4l2_cropcap
+ *
+ * Update the crop capabilities in crop cap for current
+ * mode
+ */
+static int vpbe_g_cropcap(struct vpbe_device *vpbe_dev,
+			  struct v4l2_cropcap *cropcap)
+{
+	if (NULL == cropcap)
+		return -EINVAL;
+	cropcap->bounds.left = 0;
+	cropcap->bounds.top = 0;
+	cropcap->bounds.width = vpbe_dev->current_timings.xres;
+	cropcap->bounds.height = vpbe_dev->current_timings.yres;
+	cropcap->defrect = cropcap->bounds;
+
+	return 0;
+}
+
+/**
+ * vpbe_enum_outputs - enumerate outputs
+ * @vpbe_dev - vpbe device ptr
+ * @output - ptr to v4l2_output structure
+ *
+ * Enumerates the outputs available at the vpbe display
+ * returns the status, -EINVAL if end of output list
+ */
+static int vpbe_enum_outputs(struct vpbe_device *vpbe_dev,
+			     struct v4l2_output *output)
+{
+	struct vpbe_config *cfg = vpbe_dev->cfg;
+	int temp_index = output->index;
+
+	if (temp_index >= cfg->num_outputs)
+		return -EINVAL;
+
+	*output = cfg->outputs[temp_index].output;
+	output->index = temp_index;
+
+	return 0;
+}
+
+static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode,
+			      int output_index)
+{
+	struct vpbe_config *cfg = vpbe_dev->cfg;
+	struct vpbe_enc_mode_info var;
+	int curr_output = output_index;
+	int i;
+
+	if (NULL == mode)
+		return -EINVAL;
+
+	for (i = 0; i < cfg->outputs[curr_output].num_modes; i++) {
+		var = cfg->outputs[curr_output].modes[i];
+		if (!strcmp(mode, var.name)) {
+			vpbe_dev->current_timings = var;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int vpbe_get_current_mode_info(struct vpbe_device *vpbe_dev,
+				      struct vpbe_enc_mode_info *mode_info)
+{
+	if (NULL == mode_info)
+		return -EINVAL;
+
+	*mode_info = vpbe_dev->current_timings;
+
+	return 0;
+}
+
+static int vpbe_get_dv_preset_info(struct vpbe_device *vpbe_dev,
+				   unsigned int dv_preset)
+{
+	struct vpbe_config *cfg = vpbe_dev->cfg;
+	struct vpbe_enc_mode_info var;
+	int curr_output = vpbe_dev->current_out_index;
+	int i;
+
+	for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
+		var = cfg->outputs[curr_output].modes[i];
+		if ((var.timings_type & VPBE_ENC_DV_PRESET) &&
+		  (var.timings.dv_preset == dv_preset)) {
+			vpbe_dev->current_timings = var;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+/* Get std by std id */
+static int vpbe_get_std_info(struct vpbe_device *vpbe_dev,
+			     v4l2_std_id std_id)
+{
+	struct vpbe_config *cfg = vpbe_dev->cfg;
+	struct vpbe_enc_mode_info var;
+	int curr_output = vpbe_dev->current_out_index;
+	int i;
+
+	for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
+		var = cfg->outputs[curr_output].modes[i];
+		if ((var.timings_type & VPBE_ENC_STD) &&
+		  (var.timings.std_id & std_id)) {
+			vpbe_dev->current_timings = var;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int vpbe_get_std_info_by_name(struct vpbe_device *vpbe_dev,
+				char *std_name)
+{
+	struct vpbe_config *cfg = vpbe_dev->cfg;
+	struct vpbe_enc_mode_info var;
+	int curr_output = vpbe_dev->current_out_index;
+	int i;
+
+	for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
+		var = cfg->outputs[curr_output].modes[i];
+		if (!strcmp(var.name, std_name)) {
+			vpbe_dev->current_timings = var;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+/**
+ * vpbe_set_output - Set output
+ * @vpbe_dev - vpbe device ptr
+ * @index - index of output
+ *
+ * Set vpbe output to the output specified by the index
+ */
+static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
+{
+	struct encoder_config_info *curr_enc_info =
+			vpbe_current_encoder_info(vpbe_dev);
+	struct vpbe_config *cfg = vpbe_dev->cfg;
+	struct venc_platform_data *venc_device = vpbe_dev->venc_device;
+	enum v4l2_mbus_pixelcode if_params;
+	int enc_out_index;
+	int sd_index;
+	int ret = 0;
+
+	if (index >= cfg->num_outputs)
+		return -EINVAL;
+
+	mutex_lock(&vpbe_dev->lock);
+
+	sd_index = vpbe_dev->current_sd_index;
+	enc_out_index = cfg->outputs[index].output.index;
+	/*
+	 * Currently we switch the encoder based on output selected
+	 * by the application. If media controller is implemented later
+	 * there is will be an API added to setup_link between venc
+	 * and external encoder. So in that case below comparison always
+	 * match and encoder will not be switched. But if application
+	 * chose not to use media controller, then this provides current
+	 * way of switching encoder at the venc output.
+	 */
+	if (strcmp(curr_enc_info->module_name,
+		   cfg->outputs[index].subdev_name)) {
+		/* Need to switch the encoder at the output */
+		sd_index = vpbe_find_encoder_sd_index(cfg, index);
+		if (sd_index < 0) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		if_params = cfg->outputs[index].if_params;
+		venc_device->setup_if_config(if_params);
+		if (ret)
+			goto out;
+	}
+
+	/* Set output at the encoder */
+	ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+				       s_routing, 0, enc_out_index, 0);
+	if (ret)
+		goto out;
+
+	/*
+	 * It is assumed that venc or extenal encoder will set a default
+	 * mode in the sub device. For external encoder or LCD pannel output,
+	 * we also need to set up the lcd port for the required mode. So setup
+	 * the lcd port for the default mode that is configured in the board
+	 * arch/arm/mach-davinci/board-dm355-evm.setup file for the external
+	 * encoder.
+	 */
+	ret = vpbe_get_mode_info(vpbe_dev,
+				 cfg->outputs[index].default_mode, index);
+	if (!ret) {
+		struct osd_state *osd_device = vpbe_dev->osd_device;
+
+		osd_device->ops.set_left_margin(osd_device,
+			vpbe_dev->current_timings.left_margin);
+		osd_device->ops.set_top_margin(osd_device,
+		vpbe_dev->current_timings.upper_margin);
+		vpbe_dev->current_sd_index = sd_index;
+		vpbe_dev->current_out_index = index;
+	}
+out:
+	mutex_unlock(&vpbe_dev->lock);
+	return ret;
+}
+
+static int vpbe_set_default_output(struct vpbe_device *vpbe_dev)
+{
+	struct vpbe_config *cfg = vpbe_dev->cfg;
+	int ret = 0;
+	int i;
+
+	for (i = 0; i < cfg->num_outputs; i++) {
+		if (!strcmp(def_output,
+			    cfg->outputs[i].output.name)) {
+			ret = vpbe_set_output(vpbe_dev, i);
+			if (!ret)
+				vpbe_dev->current_out_index = i;
+			return ret;
+		}
+	}
+	return ret;
+}
+
+/**
+ * vpbe_get_output - Get output
+ * @vpbe_dev - vpbe device ptr
+ *
+ * return current vpbe output to the the index
+ */
+static unsigned int vpbe_get_output(struct vpbe_device *vpbe_dev)
+{
+	return vpbe_dev->current_out_index;
+}
+
+/**
+ * vpbe_s_dv_preset - Set the given preset timings in the encoder
+ *
+ * Sets the preset if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_s_dv_preset(struct vpbe_device *vpbe_dev,
+		     struct v4l2_dv_preset *dv_preset)
+{
+	struct vpbe_config *cfg = vpbe_dev->cfg;
+	int out_index = vpbe_dev->current_out_index;
+	int sd_index = vpbe_dev->current_sd_index;
+	int ret;
+
+
+	if (!(cfg->outputs[out_index].output.capabilities &
+	    V4L2_OUT_CAP_PRESETS))
+		return -EINVAL;
+
+	ret = vpbe_get_dv_preset_info(vpbe_dev, dv_preset->preset);
+
+	if (ret)
+		return ret;
+
+	mutex_lock(&vpbe_dev->lock);
+
+
+	ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+					s_dv_preset, dv_preset);
+	if (!ret && (vpbe_dev->amp != NULL)) {
+		/* Call amplifier subdevice */
+		ret = v4l2_subdev_call(vpbe_dev->amp, video,
+				s_dv_preset, dv_preset);
+	}
+	/* set the lcd controller output for the given mode */
+	if (!ret) {
+		struct osd_state *osd_device = vpbe_dev->osd_device;
+
+		osd_device->ops.set_left_margin(osd_device,
+		vpbe_dev->current_timings.left_margin);
+		osd_device->ops.set_top_margin(osd_device,
+		vpbe_dev->current_timings.upper_margin);
+	}
+	mutex_unlock(&vpbe_dev->lock);
+
+	return ret;
+}
+
+/**
+ * vpbe_g_dv_preset - Get the preset in the current encoder
+ *
+ * Get the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_g_dv_preset(struct vpbe_device *vpbe_dev,
+		     struct v4l2_dv_preset *dv_preset)
+{
+	if (vpbe_dev->current_timings.timings_type &
+	  VPBE_ENC_DV_PRESET) {
+		dv_preset->preset = vpbe_dev->current_timings.timings.dv_preset;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+/**
+ * vpbe_enum_dv_presets - Enumerate the dv presets in the current encoder
+ *
+ * Get the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_enum_dv_presets(struct vpbe_device *vpbe_dev,
+			 struct v4l2_dv_enum_preset *preset_info)
+{
+	struct vpbe_config *cfg = vpbe_dev->cfg;
+	int out_index = vpbe_dev->current_out_index;
+	struct vpbe_output *output = &cfg->outputs[out_index];
+	int j = 0;
+	int i;
+
+	if (!(output->output.capabilities & V4L2_OUT_CAP_PRESETS))
+		return -EINVAL;
+
+	for (i = 0; i < output->num_modes; i++) {
+		if (output->modes[i].timings_type == VPBE_ENC_DV_PRESET) {
+			if (j == preset_info->index)
+				break;
+			j++;
+		}
+	}
+
+	if (i == output->num_modes)
+		return -EINVAL;
+
+	return v4l_fill_dv_preset_info(output->modes[i].timings.dv_preset,
+					preset_info);
+}
+
+/**
+ * vpbe_s_std - Set the given standard in the encoder
+ *
+ * Sets the standard if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_s_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
+{
+	struct vpbe_config *cfg = vpbe_dev->cfg;
+	int out_index = vpbe_dev->current_out_index;
+	int sd_index = vpbe_dev->current_sd_index;
+	int ret;
+
+	if (!(cfg->outputs[out_index].output.capabilities &
+		V4L2_OUT_CAP_STD))
+		return -EINVAL;
+
+	ret = vpbe_get_std_info(vpbe_dev, *std_id);
+	if (ret)
+		return ret;
+
+	mutex_lock(&vpbe_dev->lock);
+
+	ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+			       s_std_output, *std_id);
+	/* set the lcd controller output for the given mode */
+	if (!ret) {
+		struct osd_state *osd_device = vpbe_dev->osd_device;
+
+		osd_device->ops.set_left_margin(osd_device,
+		vpbe_dev->current_timings.left_margin);
+		osd_device->ops.set_top_margin(osd_device,
+		vpbe_dev->current_timings.upper_margin);
+	}
+	mutex_unlock(&vpbe_dev->lock);
+
+	return ret;
+}
+
+/**
+ * vpbe_g_std - Get the standard in the current encoder
+ *
+ * Get the standard in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_g_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
+{
+	struct vpbe_enc_mode_info cur_timings = vpbe_dev->current_timings;
+
+	if (cur_timings.timings_type & VPBE_ENC_STD) {
+		*std_id = cur_timings.timings.std_id;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+/**
+ * vpbe_set_mode - Set mode in the current encoder using mode info
+ *
+ * Use the mode string to decide what timings to set in the encoder
+ * This is typically useful when fbset command is used to change the current
+ * timings by specifying a string to indicate the timings.
+ */
+static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
+			 struct vpbe_enc_mode_info *mode_info)
+{
+	struct vpbe_enc_mode_info *preset_mode = NULL;
+	struct vpbe_config *cfg = vpbe_dev->cfg;
+	struct v4l2_dv_preset dv_preset;
+	struct osd_state *osd_device;
+	int out_index = vpbe_dev->current_out_index;
+	int ret = 0;
+	int i;
+
+	if ((NULL == mode_info) || (NULL == mode_info->name))
+		return -EINVAL;
+
+	for (i = 0; i < cfg->outputs[out_index].num_modes; i++) {
+		if (!strcmp(mode_info->name,
+		     cfg->outputs[out_index].modes[i].name)) {
+			preset_mode = &cfg->outputs[out_index].modes[i];
+			/*
+			 * it may be one of the 3 timings type. Check and
+			 * invoke right API
+			 */
+			if (preset_mode->timings_type & VPBE_ENC_STD)
+				return vpbe_s_std(vpbe_dev,
+						 &preset_mode->timings.std_id);
+			if (preset_mode->timings_type & VPBE_ENC_DV_PRESET) {
+				dv_preset.preset =
+					preset_mode->timings.dv_preset;
+				return vpbe_s_dv_preset(vpbe_dev, &dv_preset);
+			}
+		}
+	}
+
+	/* Only custom timing should reach here */
+	if (preset_mode == NULL)
+		return -EINVAL;
+
+	mutex_lock(&vpbe_dev->lock);
+
+	osd_device = vpbe_dev->osd_device;
+	vpbe_dev->current_timings = *preset_mode;
+	osd_device->ops.set_left_margin(osd_device,
+		vpbe_dev->current_timings.left_margin);
+	osd_device->ops.set_top_margin(osd_device,
+		vpbe_dev->current_timings.upper_margin);
+
+	mutex_unlock(&vpbe_dev->lock);
+
+	return ret;
+}
+
+static int vpbe_set_default_mode(struct vpbe_device *vpbe_dev)
+{
+	int ret;
+
+	ret = vpbe_get_std_info_by_name(vpbe_dev, def_mode);
+	if (ret)
+		return ret;
+
+	/* set the default mode in the encoder */
+	return vpbe_set_mode(vpbe_dev, &vpbe_dev->current_timings);
+}
+
+static int platform_device_get(struct device *dev, void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct vpbe_device *vpbe_dev = data;
+
+	if (strcmp("vpbe-osd", pdev->name) == 0)
+		vpbe_dev->osd_device = platform_get_drvdata(pdev);
+	if (strcmp("vpbe-venc", pdev->name) == 0)
+		vpbe_dev->venc_device = dev_get_platdata(&pdev->dev);
+
+	return 0;
+}
+
+/**
+ * vpbe_initialize() - Initialize the vpbe display controller
+ * @vpbe_dev - vpbe device ptr
+ *
+ * Master frame buffer device drivers calls this to initialize vpbe
+ * display controller. This will then registers v4l2 device and the sub
+ * devices and sets a current encoder sub device for display. v4l2 display
+ * device driver is the master and frame buffer display device driver is
+ * the slave. Frame buffer display driver checks the initialized during
+ * probe and exit if not initialized. Returns status.
+ */
+static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
+{
+	struct encoder_config_info *enc_info;
+	struct amp_config_info *amp_info;
+	struct v4l2_subdev **enc_subdev;
+	struct osd_state *osd_device;
+	struct i2c_adapter *i2c_adap;
+	int output_index;
+	int num_encoders;
+	int ret = 0;
+	int err;
+	int i;
+
+	/*
+	 * v4l2 abd FBDev frame buffer devices will get the vpbe_dev pointer
+	 * from the platform device by iteration of platform drivers and
+	 * matching with device name
+	 */
+	if (NULL == vpbe_dev || NULL == dev) {
+		printk(KERN_ERR "Null device pointers.\n");
+		return -ENODEV;
+	}
+
+	if (vpbe_dev->initialized)
+		return 0;
+
+	mutex_lock(&vpbe_dev->lock);
+
+	if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+		/* We have dac clock available for platform */
+		vpbe_dev->dac_clk = clk_get(vpbe_dev->pdev, "vpss_dac");
+		if (IS_ERR(vpbe_dev->dac_clk)) {
+			ret =  PTR_ERR(vpbe_dev->dac_clk);
+			goto vpbe_unlock;
+		}
+		if (clk_enable(vpbe_dev->dac_clk)) {
+			ret =  -ENODEV;
+			goto vpbe_unlock;
+		}
+	}
+
+	/* first enable vpss clocks */
+	vpss_enable_clock(VPSS_VPBE_CLOCK, 1);
+
+	/* First register a v4l2 device */
+	ret = v4l2_device_register(dev, &vpbe_dev->v4l2_dev);
+	if (ret) {
+		v4l2_err(dev->driver,
+			"Unable to register v4l2 device.\n");
+		goto vpbe_fail_clock;
+	}
+	v4l2_info(&vpbe_dev->v4l2_dev, "vpbe v4l2 device registered\n");
+
+	err = bus_for_each_dev(&platform_bus_type, NULL, vpbe_dev,
+			       platform_device_get);
+	if (err < 0)
+		return err;
+
+	vpbe_dev->venc = venc_sub_dev_init(&vpbe_dev->v4l2_dev,
+					   vpbe_dev->cfg->venc.module_name);
+	/* register venc sub device */
+	if (vpbe_dev->venc == NULL) {
+		v4l2_err(&vpbe_dev->v4l2_dev,
+			"vpbe unable to init venc sub device\n");
+		ret = -ENODEV;
+		goto vpbe_fail_v4l2_device;
+	}
+	/* initialize osd device */
+	osd_device = vpbe_dev->osd_device;
+
+	if (NULL != osd_device->ops.initialize) {
+		err = osd_device->ops.initialize(osd_device);
+		if (err) {
+			v4l2_err(&vpbe_dev->v4l2_dev,
+				 "unable to initialize the OSD device");
+			err = -ENOMEM;
+			goto vpbe_fail_v4l2_device;
+		}
+	}
+
+	/*
+	 * Register any external encoders that are configured. At index 0 we
+	 * store venc sd index.
+	 */
+	num_encoders = vpbe_dev->cfg->num_ext_encoders + 1;
+	vpbe_dev->encoders = kmalloc(
+				sizeof(struct v4l2_subdev *)*num_encoders,
+				GFP_KERNEL);
+	if (NULL == vpbe_dev->encoders) {
+		v4l2_err(&vpbe_dev->v4l2_dev,
+			"unable to allocate memory for encoders sub devices");
+		ret = -ENOMEM;
+		goto vpbe_fail_v4l2_device;
+	}
+
+	i2c_adap = i2c_get_adapter(vpbe_dev->cfg->i2c_adapter_id);
+	for (i = 0; i < (vpbe_dev->cfg->num_ext_encoders + 1); i++) {
+		if (i == 0) {
+			/* venc is at index 0 */
+			enc_subdev = &vpbe_dev->encoders[i];
+			*enc_subdev = vpbe_dev->venc;
+			continue;
+		}
+		enc_info = &vpbe_dev->cfg->ext_encoders[i];
+		if (enc_info->is_i2c) {
+			enc_subdev = &vpbe_dev->encoders[i];
+			*enc_subdev = v4l2_i2c_new_subdev_board(
+						&vpbe_dev->v4l2_dev, i2c_adap,
+						&enc_info->board_info, NULL);
+			if (*enc_subdev)
+				v4l2_info(&vpbe_dev->v4l2_dev,
+					  "v4l2 sub device %s registered\n",
+					  enc_info->module_name);
+			else {
+				v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s"
+					 " failed to register",
+					 enc_info->module_name);
+				ret = -ENODEV;
+				goto vpbe_fail_sd_register;
+			}
+		} else
+			v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders"
+				 " currently not supported");
+	}
+	/* Add amplifier subdevice for dm365 */
+	if ((strcmp(vpbe_dev->cfg->module_name, "dm365-vpbe-display") == 0) &&
+			vpbe_dev->cfg->amp != NULL) {
+		amp_info = vpbe_dev->cfg->amp;
+		if (amp_info->is_i2c) {
+			vpbe_dev->amp = v4l2_i2c_new_subdev_board(
+			&vpbe_dev->v4l2_dev, i2c_adap,
+			&amp_info->board_info, NULL);
+			if (!vpbe_dev->amp) {
+				v4l2_err(&vpbe_dev->v4l2_dev,
+					 "amplifier %s failed to register",
+					 amp_info->module_name);
+				ret = -ENODEV;
+				goto vpbe_fail_amp_register;
+			}
+			v4l2_info(&vpbe_dev->v4l2_dev,
+					  "v4l2 sub device %s registered\n",
+					  amp_info->module_name);
+		} else {
+			    vpbe_dev->amp = NULL;
+			    v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c amplifiers"
+			    " currently not supported");
+		}
+	} else {
+	    vpbe_dev->amp = NULL;
+	}
+
+	/* set the current encoder and output to that of venc by default */
+	vpbe_dev->current_sd_index = 0;
+	vpbe_dev->current_out_index = 0;
+	output_index = 0;
+
+	mutex_unlock(&vpbe_dev->lock);
+
+	printk(KERN_NOTICE "Setting default output to %s\n", def_output);
+	ret = vpbe_set_default_output(vpbe_dev);
+	if (ret) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
+			 def_output);
+		return ret;
+	}
+
+	printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
+	ret = vpbe_set_default_mode(vpbe_dev);
+	if (ret) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
+			 def_mode);
+		return ret;
+	}
+	vpbe_dev->initialized = 1;
+	/* TBD handling of bootargs for default output and mode */
+	return 0;
+
+vpbe_fail_amp_register:
+	kfree(vpbe_dev->amp);
+vpbe_fail_sd_register:
+	kfree(vpbe_dev->encoders);
+vpbe_fail_v4l2_device:
+	v4l2_device_unregister(&vpbe_dev->v4l2_dev);
+vpbe_fail_clock:
+	if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
+		clk_put(vpbe_dev->dac_clk);
+vpbe_unlock:
+	mutex_unlock(&vpbe_dev->lock);
+	return ret;
+}
+
+/**
+ * vpbe_deinitialize() - de-initialize the vpbe display controller
+ * @dev - Master and slave device ptr
+ *
+ * vpbe_master and slave frame buffer devices calls this to de-initialize
+ * the display controller. It is called when master and slave device
+ * driver modules are removed and no longer requires the display controller.
+ */
+static void vpbe_deinitialize(struct device *dev, struct vpbe_device *vpbe_dev)
+{
+	v4l2_device_unregister(&vpbe_dev->v4l2_dev);
+	if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
+		clk_put(vpbe_dev->dac_clk);
+
+	kfree(vpbe_dev->amp);
+	kfree(vpbe_dev->encoders);
+	vpbe_dev->initialized = 0;
+	/* disable vpss clocks */
+	vpss_enable_clock(VPSS_VPBE_CLOCK, 0);
+}
+
+static struct vpbe_device_ops vpbe_dev_ops = {
+	.g_cropcap = vpbe_g_cropcap,
+	.enum_outputs = vpbe_enum_outputs,
+	.set_output = vpbe_set_output,
+	.get_output = vpbe_get_output,
+	.s_dv_preset = vpbe_s_dv_preset,
+	.g_dv_preset = vpbe_g_dv_preset,
+	.enum_dv_presets = vpbe_enum_dv_presets,
+	.s_std = vpbe_s_std,
+	.g_std = vpbe_g_std,
+	.initialize = vpbe_initialize,
+	.deinitialize = vpbe_deinitialize,
+	.get_mode_info = vpbe_get_current_mode_info,
+	.set_mode = vpbe_set_mode,
+};
+
+static __devinit int vpbe_probe(struct platform_device *pdev)
+{
+	struct vpbe_device *vpbe_dev;
+	struct vpbe_config *cfg;
+	int ret = -EINVAL;
+
+	if (pdev->dev.platform_data == NULL) {
+		v4l2_err(pdev->dev.driver, "No platform data\n");
+		return -ENODEV;
+	}
+	cfg = pdev->dev.platform_data;
+
+	if (!cfg->module_name[0] ||
+	    !cfg->osd.module_name[0] ||
+	    !cfg->venc.module_name[0]) {
+		v4l2_err(pdev->dev.driver, "vpbe display module names not"
+			 " defined\n");
+		return ret;
+	}
+
+	vpbe_dev = kzalloc(sizeof(*vpbe_dev), GFP_KERNEL);
+	if (vpbe_dev == NULL) {
+		v4l2_err(pdev->dev.driver, "Unable to allocate memory"
+			 " for vpbe_device\n");
+		return -ENOMEM;
+	}
+	vpbe_dev->cfg = cfg;
+	vpbe_dev->ops = vpbe_dev_ops;
+	vpbe_dev->pdev = &pdev->dev;
+
+	if (cfg->outputs->num_modes > 0)
+		vpbe_dev->current_timings = vpbe_dev->cfg->outputs[0].modes[0];
+	else {
+		kfree(vpbe_dev);
+		return -ENODEV;
+	}
+
+	/* set the driver data in platform device */
+	platform_set_drvdata(pdev, vpbe_dev);
+	mutex_init(&vpbe_dev->lock);
+
+	return 0;
+}
+
+static int vpbe_remove(struct platform_device *device)
+{
+	struct vpbe_device *vpbe_dev = platform_get_drvdata(device);
+
+	kfree(vpbe_dev);
+
+	return 0;
+}
+
+static struct platform_driver vpbe_driver = {
+	.driver	= {
+		.name	= "vpbe_controller",
+		.owner	= THIS_MODULE,
+	},
+	.probe = vpbe_probe,
+	.remove = vpbe_remove,
+};
+
+module_platform_driver(vpbe_driver);
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
new file mode 100644
index 0000000..9a05c81
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -0,0 +1,1838 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+#include <mach/cputype.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_display.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe.h>
+#include <media/davinci/vpbe_venc.h>
+#include <media/davinci/vpbe_osd.h>
+#include "vpbe_venc_regs.h"
+
+#define VPBE_DISPLAY_DRIVER "vpbe-v4l2"
+
+static int debug;
+
+#define VPBE_DEFAULT_NUM_BUFS 3
+
+module_param(debug, int, 0644);
+
+static int venc_is_second_field(struct vpbe_display *disp_dev)
+{
+	struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+	int ret;
+	int val;
+
+	ret = v4l2_subdev_call(vpbe_dev->venc,
+			       core,
+			       ioctl,
+			       VENC_GET_FLD,
+			       &val);
+	if (ret < 0) {
+		v4l2_err(&vpbe_dev->v4l2_dev,
+			 "Error in getting Field ID 0\n");
+	}
+	return val;
+}
+
+static void vpbe_isr_even_field(struct vpbe_display *disp_obj,
+				struct vpbe_layer *layer)
+{
+	struct timespec timevalue;
+
+	if (layer->cur_frm == layer->next_frm)
+		return;
+	ktime_get_ts(&timevalue);
+	layer->cur_frm->ts.tv_sec = timevalue.tv_sec;
+	layer->cur_frm->ts.tv_usec = timevalue.tv_nsec / NSEC_PER_USEC;
+	layer->cur_frm->state = VIDEOBUF_DONE;
+	wake_up_interruptible(&layer->cur_frm->done);
+	/* Make cur_frm pointing to next_frm */
+	layer->cur_frm = layer->next_frm;
+}
+
+static void vpbe_isr_odd_field(struct vpbe_display *disp_obj,
+				struct vpbe_layer *layer)
+{
+	struct osd_state *osd_device = disp_obj->osd_device;
+	unsigned long addr;
+
+	spin_lock(&disp_obj->dma_queue_lock);
+	if (list_empty(&layer->dma_queue) ||
+		(layer->cur_frm != layer->next_frm)) {
+		spin_unlock(&disp_obj->dma_queue_lock);
+		return;
+	}
+	/*
+	 * one field is displayed configure
+	 * the next frame if it is available
+	 * otherwise hold on current frame
+	 * Get next from the buffer queue
+	 */
+	layer->next_frm = list_entry(
+				layer->dma_queue.next,
+				struct  videobuf_buffer,
+				queue);
+	/* Remove that from the buffer queue */
+	list_del(&layer->next_frm->queue);
+	spin_unlock(&disp_obj->dma_queue_lock);
+	/* Mark state of the frame to active */
+	layer->next_frm->state = VIDEOBUF_ACTIVE;
+	addr = videobuf_to_dma_contig(layer->next_frm);
+	osd_device->ops.start_layer(osd_device,
+			layer->layer_info.id,
+			addr,
+			disp_obj->cbcr_ofst);
+}
+
+/* interrupt service routine */
+static irqreturn_t venc_isr(int irq, void *arg)
+{
+	struct vpbe_display *disp_dev = (struct vpbe_display *)arg;
+	struct vpbe_layer *layer;
+	static unsigned last_event;
+	unsigned event = 0;
+	int fid;
+	int i;
+
+	if ((NULL == arg) || (NULL == disp_dev->dev[0]))
+		return IRQ_HANDLED;
+
+	if (venc_is_second_field(disp_dev))
+		event |= VENC_SECOND_FIELD;
+	else
+		event |= VENC_FIRST_FIELD;
+
+	if (event == (last_event & ~VENC_END_OF_FRAME)) {
+		/*
+		* If the display is non-interlaced, then we need to flag the
+		* end-of-frame event at every interrupt regardless of the
+		* value of the FIDST bit.  We can conclude that the display is
+		* non-interlaced if the value of the FIDST bit is unchanged
+		* from the previous interrupt.
+		*/
+		event |= VENC_END_OF_FRAME;
+	} else if (event == VENC_SECOND_FIELD) {
+		/* end-of-frame for interlaced display */
+		event |= VENC_END_OF_FRAME;
+	}
+	last_event = event;
+
+	for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+		layer = disp_dev->dev[i];
+		/* If streaming is started in this layer */
+		if (!layer->started)
+			continue;
+
+		if (layer->layer_first_int) {
+			layer->layer_first_int = 0;
+			continue;
+		}
+		/* Check the field format */
+		if ((V4L2_FIELD_NONE == layer->pix_fmt.field) &&
+			(event & VENC_END_OF_FRAME)) {
+			/* Progressive mode */
+
+			vpbe_isr_even_field(disp_dev, layer);
+			vpbe_isr_odd_field(disp_dev, layer);
+		} else {
+		/* Interlaced mode */
+
+			layer->field_id ^= 1;
+			if (event & VENC_FIRST_FIELD)
+				fid = 0;
+			else
+				fid = 1;
+
+			/*
+			* If field id does not match with store
+			* field id
+			*/
+			if (fid != layer->field_id) {
+				/* Make them in sync */
+				layer->field_id = fid;
+				continue;
+			}
+			/*
+			* device field id and local field id are
+			* in sync. If this is even field
+			*/
+			if (0 == fid)
+				vpbe_isr_even_field(disp_dev, layer);
+			else  /* odd field */
+				vpbe_isr_odd_field(disp_dev, layer);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * vpbe_buffer_prepare()
+ * This is the callback function called from videobuf_qbuf() function
+ * the buffer is prepared and user space virtual address is converted into
+ * physical address
+ */
+static int vpbe_buffer_prepare(struct videobuf_queue *q,
+				  struct videobuf_buffer *vb,
+				  enum v4l2_field field)
+{
+	struct vpbe_fh *fh = q->priv_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	unsigned long addr;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+				"vpbe_buffer_prepare\n");
+
+	/* If buffer is not initialized, initialize it */
+	if (VIDEOBUF_NEEDS_INIT == vb->state) {
+		vb->width = layer->pix_fmt.width;
+		vb->height = layer->pix_fmt.height;
+		vb->size = layer->pix_fmt.sizeimage;
+		vb->field = field;
+
+		ret = videobuf_iolock(q, vb, NULL);
+		if (ret < 0) {
+			v4l2_err(&vpbe_dev->v4l2_dev, "Failed to map \
+				user address\n");
+			return -EINVAL;
+		}
+
+		addr = videobuf_to_dma_contig(vb);
+
+		if (q->streaming) {
+			if (!IS_ALIGNED(addr, 8)) {
+				v4l2_err(&vpbe_dev->v4l2_dev,
+					"buffer_prepare:offset is \
+					not aligned to 32 bytes\n");
+				return -EINVAL;
+			}
+		}
+		vb->state = VIDEOBUF_PREPARED;
+	}
+	return 0;
+}
+
+/*
+ * vpbe_buffer_setup()
+ * This function allocates memory for the buffers
+ */
+static int vpbe_buffer_setup(struct videobuf_queue *q,
+				unsigned int *count,
+				unsigned int *size)
+{
+	/* Get the file handle object and layer object */
+	struct vpbe_fh *fh = q->priv_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n");
+
+	*size = layer->pix_fmt.sizeimage;
+
+	/* Store number of buffers allocated in numbuffer member */
+	if (*count < VPBE_DEFAULT_NUM_BUFS)
+		*count = layer->numbuffers = VPBE_DEFAULT_NUM_BUFS;
+
+	return 0;
+}
+
+/*
+ * vpbe_buffer_queue()
+ * This function adds the buffer to DMA queue
+ */
+static void vpbe_buffer_queue(struct videobuf_queue *q,
+				 struct videobuf_buffer *vb)
+{
+	/* Get the file handle object and layer object */
+	struct vpbe_fh *fh = q->priv_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_display *disp = fh->disp_dev;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	unsigned long flags;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+			"vpbe_buffer_queue\n");
+
+	/* add the buffer to the DMA queue */
+	spin_lock_irqsave(&disp->dma_queue_lock, flags);
+	list_add_tail(&vb->queue, &layer->dma_queue);
+	spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
+	/* Change state of the buffer */
+	vb->state = VIDEOBUF_QUEUED;
+}
+
+/*
+ * vpbe_buffer_release()
+ * This function is called from the videobuf layer to free memory allocated to
+ * the buffers
+ */
+static void vpbe_buffer_release(struct videobuf_queue *q,
+				   struct videobuf_buffer *vb)
+{
+	/* Get the file handle object and layer object */
+	struct vpbe_fh *fh = q->priv_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+			"vpbe_buffer_release\n");
+
+	if (V4L2_MEMORY_USERPTR != layer->memory)
+		videobuf_dma_contig_free(q, vb);
+
+	vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static struct videobuf_queue_ops video_qops = {
+	.buf_setup = vpbe_buffer_setup,
+	.buf_prepare = vpbe_buffer_prepare,
+	.buf_queue = vpbe_buffer_queue,
+	.buf_release = vpbe_buffer_release,
+};
+
+static
+struct vpbe_layer*
+_vpbe_display_get_other_win_layer(struct vpbe_display *disp_dev,
+			struct vpbe_layer *layer)
+{
+	enum vpbe_display_device_id thiswin, otherwin;
+	thiswin = layer->device_id;
+
+	otherwin = (thiswin == VPBE_DISPLAY_DEVICE_0) ?
+	VPBE_DISPLAY_DEVICE_1 : VPBE_DISPLAY_DEVICE_0;
+	return disp_dev->dev[otherwin];
+}
+
+static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
+			struct vpbe_layer *layer)
+{
+	struct osd_layer_config *cfg  = &layer->layer_info.config;
+	struct osd_state *osd_device = disp_dev->osd_device;
+	struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+	unsigned long addr;
+	int ret;
+
+	addr = videobuf_to_dma_contig(layer->cur_frm);
+	/* Set address in the display registers */
+	osd_device->ops.start_layer(osd_device,
+				    layer->layer_info.id,
+				    addr,
+				    disp_dev->cbcr_ofst);
+
+	ret = osd_device->ops.enable_layer(osd_device,
+				layer->layer_info.id, 0);
+	if (ret < 0) {
+		v4l2_err(&vpbe_dev->v4l2_dev,
+			"Error in enabling osd window layer 0\n");
+		return -1;
+	}
+
+	/* Enable the window */
+	layer->layer_info.enable = 1;
+	if (cfg->pixfmt == PIXFMT_NV12) {
+		struct vpbe_layer *otherlayer =
+			_vpbe_display_get_other_win_layer(disp_dev, layer);
+
+		ret = osd_device->ops.enable_layer(osd_device,
+				otherlayer->layer_info.id, 1);
+		if (ret < 0) {
+			v4l2_err(&vpbe_dev->v4l2_dev,
+				"Error in enabling osd window layer 1\n");
+			return -1;
+		}
+		otherlayer->layer_info.enable = 1;
+	}
+	return 0;
+}
+
+static void
+vpbe_disp_calculate_scale_factor(struct vpbe_display *disp_dev,
+			struct vpbe_layer *layer,
+			int expected_xsize, int expected_ysize)
+{
+	struct display_layer_info *layer_info = &layer->layer_info;
+	struct v4l2_pix_format *pixfmt = &layer->pix_fmt;
+	struct osd_layer_config *cfg  = &layer->layer_info.config;
+	struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+	int calculated_xsize;
+	int h_exp = 0;
+	int v_exp = 0;
+	int h_scale;
+	int v_scale;
+
+	v4l2_std_id standard_id = vpbe_dev->current_timings.timings.std_id;
+
+	/*
+	 * Application initially set the image format. Current display
+	 * size is obtained from the vpbe display controller. expected_xsize
+	 * and expected_ysize are set through S_CROP ioctl. Based on this,
+	 * driver will calculate the scale factors for vertical and
+	 * horizontal direction so that the image is displayed scaled
+	 * and expanded. Application uses expansion to display the image
+	 * in a square pixel. Otherwise it is displayed using displays
+	 * pixel aspect ratio.It is expected that application chooses
+	 * the crop coordinates for cropped or scaled display. if crop
+	 * size is less than the image size, it is displayed cropped or
+	 * it is displayed scaled and/or expanded.
+	 *
+	 * to begin with, set the crop window same as expected. Later we
+	 * will override with scaled window size
+	 */
+
+	cfg->xsize = pixfmt->width;
+	cfg->ysize = pixfmt->height;
+	layer_info->h_zoom = ZOOM_X1;	/* no horizontal zoom */
+	layer_info->v_zoom = ZOOM_X1;	/* no horizontal zoom */
+	layer_info->h_exp = H_EXP_OFF;	/* no horizontal zoom */
+	layer_info->v_exp = V_EXP_OFF;	/* no horizontal zoom */
+
+	if (pixfmt->width < expected_xsize) {
+		h_scale = vpbe_dev->current_timings.xres / pixfmt->width;
+		if (h_scale < 2)
+			h_scale = 1;
+		else if (h_scale >= 4)
+			h_scale = 4;
+		else
+			h_scale = 2;
+		cfg->xsize *= h_scale;
+		if (cfg->xsize < expected_xsize) {
+			if ((standard_id & V4L2_STD_525_60) ||
+			(standard_id & V4L2_STD_625_50)) {
+				calculated_xsize = (cfg->xsize *
+					VPBE_DISPLAY_H_EXP_RATIO_N) /
+					VPBE_DISPLAY_H_EXP_RATIO_D;
+				if (calculated_xsize <= expected_xsize) {
+					h_exp = 1;
+					cfg->xsize = calculated_xsize;
+				}
+			}
+		}
+		if (h_scale == 2)
+			layer_info->h_zoom = ZOOM_X2;
+		else if (h_scale == 4)
+			layer_info->h_zoom = ZOOM_X4;
+		if (h_exp)
+			layer_info->h_exp = H_EXP_9_OVER_8;
+	} else {
+		/* no scaling, only cropping. Set display area to crop area */
+		cfg->xsize = expected_xsize;
+	}
+
+	if (pixfmt->height < expected_ysize) {
+		v_scale = expected_ysize / pixfmt->height;
+		if (v_scale < 2)
+			v_scale = 1;
+		else if (v_scale >= 4)
+			v_scale = 4;
+		else
+			v_scale = 2;
+		cfg->ysize *= v_scale;
+		if (cfg->ysize < expected_ysize) {
+			if ((standard_id & V4L2_STD_625_50)) {
+				calculated_xsize = (cfg->ysize *
+					VPBE_DISPLAY_V_EXP_RATIO_N) /
+					VPBE_DISPLAY_V_EXP_RATIO_D;
+				if (calculated_xsize <= expected_ysize) {
+					v_exp = 1;
+					cfg->ysize = calculated_xsize;
+				}
+			}
+		}
+		if (v_scale == 2)
+			layer_info->v_zoom = ZOOM_X2;
+		else if (v_scale == 4)
+			layer_info->v_zoom = ZOOM_X4;
+		if (v_exp)
+			layer_info->h_exp = V_EXP_6_OVER_5;
+	} else {
+		/* no scaling, only cropping. Set display area to crop area */
+		cfg->ysize = expected_ysize;
+	}
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+		"crop display xsize = %d, ysize = %d\n",
+		cfg->xsize, cfg->ysize);
+}
+
+static void vpbe_disp_adj_position(struct vpbe_display *disp_dev,
+			struct vpbe_layer *layer,
+			int top, int left)
+{
+	struct osd_layer_config *cfg = &layer->layer_info.config;
+	struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+
+	cfg->xpos = min((unsigned int)left,
+			vpbe_dev->current_timings.xres - cfg->xsize);
+	cfg->ypos = min((unsigned int)top,
+			vpbe_dev->current_timings.yres - cfg->ysize);
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+		"new xpos = %d, ypos = %d\n",
+		cfg->xpos, cfg->ypos);
+}
+
+static void vpbe_disp_check_window_params(struct vpbe_display *disp_dev,
+			struct v4l2_rect *c)
+{
+	struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+
+	if ((c->width == 0) ||
+	  ((c->width + c->left) > vpbe_dev->current_timings.xres))
+		c->width = vpbe_dev->current_timings.xres - c->left;
+
+	if ((c->height == 0) || ((c->height + c->top) >
+	  vpbe_dev->current_timings.yres))
+		c->height = vpbe_dev->current_timings.yres - c->top;
+
+	/* window height must be even for interlaced display */
+	if (vpbe_dev->current_timings.interlaced)
+		c->height &= (~0x01);
+
+}
+
+/**
+ * vpbe_try_format()
+ * If user application provides width and height, and have bytesperline set
+ * to zero, driver calculates bytesperline and sizeimage based on hardware
+ * limits.
+ */
+static int vpbe_try_format(struct vpbe_display *disp_dev,
+			struct v4l2_pix_format *pixfmt, int check)
+{
+	struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+	int min_height = 1;
+	int min_width = 32;
+	int max_height;
+	int max_width;
+	int bpp;
+
+	if ((pixfmt->pixelformat != V4L2_PIX_FMT_UYVY) &&
+	    (pixfmt->pixelformat != V4L2_PIX_FMT_NV12))
+		/* choose default as V4L2_PIX_FMT_UYVY */
+		pixfmt->pixelformat = V4L2_PIX_FMT_UYVY;
+
+	/* Check the field format */
+	if ((pixfmt->field != V4L2_FIELD_INTERLACED) &&
+		(pixfmt->field != V4L2_FIELD_NONE)) {
+		if (vpbe_dev->current_timings.interlaced)
+			pixfmt->field = V4L2_FIELD_INTERLACED;
+		else
+			pixfmt->field = V4L2_FIELD_NONE;
+	}
+
+	if (pixfmt->field == V4L2_FIELD_INTERLACED)
+		min_height = 2;
+
+	if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
+		bpp = 1;
+	else
+		bpp = 2;
+
+	max_width = vpbe_dev->current_timings.xres;
+	max_height = vpbe_dev->current_timings.yres;
+
+	min_width /= bpp;
+
+	if (!pixfmt->width || (pixfmt->width < min_width) ||
+		(pixfmt->width > max_width)) {
+		pixfmt->width = vpbe_dev->current_timings.xres;
+	}
+
+	if (!pixfmt->height || (pixfmt->height  < min_height) ||
+		(pixfmt->height  > max_height)) {
+		pixfmt->height = vpbe_dev->current_timings.yres;
+	}
+
+	if (pixfmt->bytesperline < (pixfmt->width * bpp))
+		pixfmt->bytesperline = pixfmt->width * bpp;
+
+	/* Make the bytesperline 32 byte aligned */
+	pixfmt->bytesperline = ((pixfmt->width * bpp + 31) & ~31);
+
+	if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
+		pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height +
+				(pixfmt->bytesperline * pixfmt->height >> 1);
+	else
+		pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+	return 0;
+}
+
+static int vpbe_display_g_priority(struct file *file, void *priv,
+				enum v4l2_priority *p)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_layer *layer = fh->layer;
+
+	*p = v4l2_prio_max(&layer->prio);
+
+	return 0;
+}
+
+static int vpbe_display_s_priority(struct file *file, void *priv,
+				enum v4l2_priority p)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	int ret;
+
+	ret = v4l2_prio_change(&layer->prio, &fh->prio, p);
+
+	return ret;
+}
+
+static int vpbe_display_querycap(struct file *file, void  *priv,
+			       struct v4l2_capability *cap)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+	cap->version = VPBE_DISPLAY_VERSION_CODE;
+	cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+	strlcpy(cap->driver, VPBE_DISPLAY_DRIVER, sizeof(cap->driver));
+	strlcpy(cap->bus_info, "platform", sizeof(cap->bus_info));
+	strlcpy(cap->card, vpbe_dev->cfg->module_name, sizeof(cap->card));
+
+	return 0;
+}
+
+static int vpbe_display_s_crop(struct file *file, void *priv,
+			     struct v4l2_crop *crop)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_display *disp_dev = fh->disp_dev;
+	struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+	struct osd_layer_config *cfg = &layer->layer_info.config;
+	struct osd_state *osd_device = disp_dev->osd_device;
+	struct v4l2_rect *rect = &crop->c;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+		"VIDIOC_S_CROP, layer id = %d\n", layer->device_id);
+
+	if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
+		return -EINVAL;
+	}
+
+	if (rect->top < 0)
+		rect->top = 0;
+	if (rect->left < 0)
+		rect->left = 0;
+
+	vpbe_disp_check_window_params(disp_dev, rect);
+
+	osd_device->ops.get_layer_config(osd_device,
+			layer->layer_info.id, cfg);
+
+	vpbe_disp_calculate_scale_factor(disp_dev, layer,
+					rect->width,
+					rect->height);
+	vpbe_disp_adj_position(disp_dev, layer, rect->top,
+					rect->left);
+	ret = osd_device->ops.set_layer_config(osd_device,
+				layer->layer_info.id, cfg);
+	if (ret < 0) {
+		v4l2_err(&vpbe_dev->v4l2_dev,
+			"Error in set layer config:\n");
+		return -EINVAL;
+	}
+
+	/* apply zooming and h or v expansion */
+	osd_device->ops.set_zoom(osd_device,
+			layer->layer_info.id,
+			layer->layer_info.h_zoom,
+			layer->layer_info.v_zoom);
+	ret = osd_device->ops.set_vid_expansion(osd_device,
+			layer->layer_info.h_exp,
+			layer->layer_info.v_exp);
+	if (ret < 0) {
+		v4l2_err(&vpbe_dev->v4l2_dev,
+		"Error in set vid expansion:\n");
+		return -EINVAL;
+	}
+
+	if ((layer->layer_info.h_zoom != ZOOM_X1) ||
+		(layer->layer_info.v_zoom != ZOOM_X1) ||
+		(layer->layer_info.h_exp != H_EXP_OFF) ||
+		(layer->layer_info.v_exp != V_EXP_OFF))
+		/* Enable expansion filter */
+		osd_device->ops.set_interpolation_filter(osd_device, 1);
+	else
+		osd_device->ops.set_interpolation_filter(osd_device, 0);
+
+	return 0;
+}
+
+static int vpbe_display_g_crop(struct file *file, void *priv,
+			     struct v4l2_crop *crop)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct osd_layer_config *cfg = &layer->layer_info.config;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	struct osd_state *osd_device = fh->disp_dev->osd_device;
+	struct v4l2_rect *rect = &crop->c;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+			"VIDIOC_G_CROP, layer id = %d\n",
+			layer->device_id);
+
+	if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
+		ret = -EINVAL;
+	}
+	osd_device->ops.get_layer_config(osd_device,
+				layer->layer_info.id, cfg);
+	rect->top = cfg->ypos;
+	rect->left = cfg->xpos;
+	rect->width = cfg->xsize;
+	rect->height = cfg->ysize;
+
+	return 0;
+}
+
+static int vpbe_display_cropcap(struct file *file, void *priv,
+			      struct v4l2_cropcap *cropcap)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_CROPCAP ioctl\n");
+
+	cropcap->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	cropcap->bounds.left = 0;
+	cropcap->bounds.top = 0;
+	cropcap->bounds.width = vpbe_dev->current_timings.xres;
+	cropcap->bounds.height = vpbe_dev->current_timings.yres;
+	cropcap->pixelaspect = vpbe_dev->current_timings.aspect;
+	cropcap->defrect = cropcap->bounds;
+	return 0;
+}
+
+static int vpbe_display_g_fmt(struct file *file, void *priv,
+				struct v4l2_format *fmt)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+			"VIDIOC_G_FMT, layer id = %d\n",
+			layer->device_id);
+
+	/* If buffer type is video output */
+	if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n");
+		return -EINVAL;
+	}
+	/* Fill in the information about format */
+	fmt->fmt.pix = layer->pix_fmt;
+
+	return 0;
+}
+
+static int vpbe_display_enum_fmt(struct file *file, void  *priv,
+				   struct v4l2_fmtdesc *fmt)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	unsigned int index = 0;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+				"VIDIOC_ENUM_FMT, layer id = %d\n",
+				layer->device_id);
+	if (fmt->index > 1) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Invalid format index\n");
+		return -EINVAL;
+	}
+
+	/* Fill in the information about format */
+	index = fmt->index;
+	memset(fmt, 0, sizeof(*fmt));
+	fmt->index = index;
+	fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	if (index == 0) {
+		strcpy(fmt->description, "YUV 4:2:2 - UYVY");
+		fmt->pixelformat = V4L2_PIX_FMT_UYVY;
+	} else {
+		strcpy(fmt->description, "Y/CbCr 4:2:0");
+		fmt->pixelformat = V4L2_PIX_FMT_NV12;
+	}
+
+	return 0;
+}
+
+static int vpbe_display_s_fmt(struct file *file, void *priv,
+				struct v4l2_format *fmt)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_display *disp_dev = fh->disp_dev;
+	struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+	struct osd_layer_config *cfg  = &layer->layer_info.config;
+	struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+	struct osd_state *osd_device = disp_dev->osd_device;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+			"VIDIOC_S_FMT, layer id = %d\n",
+			layer->device_id);
+
+	/* If streaming is started, return error */
+	if (layer->started) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+		return -EBUSY;
+	}
+	if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+		v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "invalid type\n");
+		return -EINVAL;
+	}
+	/* Check for valid pixel format */
+	ret = vpbe_try_format(disp_dev, pixfmt, 1);
+	if (ret)
+		return ret;
+
+	/* YUV420 is requested, check availability of the
+	other video window */
+
+	layer->pix_fmt = *pixfmt;
+
+	/* Get osd layer config */
+	osd_device->ops.get_layer_config(osd_device,
+			layer->layer_info.id, cfg);
+	/* Store the pixel format in the layer object */
+	cfg->xsize = pixfmt->width;
+	cfg->ysize = pixfmt->height;
+	cfg->line_length = pixfmt->bytesperline;
+	cfg->ypos = 0;
+	cfg->xpos = 0;
+	cfg->interlaced = vpbe_dev->current_timings.interlaced;
+
+	if (V4L2_PIX_FMT_UYVY == pixfmt->pixelformat)
+		cfg->pixfmt = PIXFMT_YCbCrI;
+
+	/* Change of the default pixel format for both video windows */
+	if (V4L2_PIX_FMT_NV12 == pixfmt->pixelformat) {
+		struct vpbe_layer *otherlayer;
+		cfg->pixfmt = PIXFMT_NV12;
+		otherlayer = _vpbe_display_get_other_win_layer(disp_dev,
+								layer);
+		otherlayer->layer_info.config.pixfmt = PIXFMT_NV12;
+	}
+
+	/* Set the layer config in the osd window */
+	ret = osd_device->ops.set_layer_config(osd_device,
+				layer->layer_info.id, cfg);
+	if (ret < 0) {
+		v4l2_err(&vpbe_dev->v4l2_dev,
+				"Error in S_FMT params:\n");
+		return -EINVAL;
+	}
+
+	/* Readback and fill the local copy of current pix format */
+	osd_device->ops.get_layer_config(osd_device,
+			layer->layer_info.id, cfg);
+
+	return 0;
+}
+
+static int vpbe_display_try_fmt(struct file *file, void *priv,
+				  struct v4l2_format *fmt)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_display *disp_dev = fh->disp_dev;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_TRY_FMT\n");
+
+	if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n");
+		return -EINVAL;
+	}
+
+	/* Check for valid field format */
+	return  vpbe_try_format(disp_dev, pixfmt, 0);
+
+}
+
+/**
+ * vpbe_display_s_std - Set the given standard in the encoder
+ *
+ * Sets the standard if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_display_s_std(struct file *file, void *priv,
+				v4l2_std_id *std_id)
+{
+	struct vpbe_fh *fh = priv;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_STD\n");
+
+	/* If streaming is started, return error */
+	if (layer->started) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+		return -EBUSY;
+	}
+	if (NULL != vpbe_dev->ops.s_std) {
+		ret = vpbe_dev->ops.s_std(vpbe_dev, std_id);
+		if (ret) {
+			v4l2_err(&vpbe_dev->v4l2_dev,
+			"Failed to set standard for sub devices\n");
+			return -EINVAL;
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * vpbe_display_g_std - Get the standard in the current encoder
+ *
+ * Get the standard in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_display_g_std(struct file *file, void *priv,
+				v4l2_std_id *std_id)
+{
+	struct vpbe_fh *fh = priv;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,	"VIDIOC_G_STD\n");
+
+	/* Get the standard from the current encoder */
+	if (vpbe_dev->current_timings.timings_type & VPBE_ENC_STD) {
+		*std_id = vpbe_dev->current_timings.timings.std_id;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+/**
+ * vpbe_display_enum_output - enumerate outputs
+ *
+ * Enumerates the outputs available at the vpbe display
+ * returns the status, -EINVAL if end of output list
+ */
+static int vpbe_display_enum_output(struct file *file, void *priv,
+				    struct v4l2_output *output)
+{
+	struct vpbe_fh *fh = priv;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,	"VIDIOC_ENUM_OUTPUT\n");
+
+	/* Enumerate outputs */
+
+	if (NULL == vpbe_dev->ops.enum_outputs)
+		return -EINVAL;
+
+	ret = vpbe_dev->ops.enum_outputs(vpbe_dev, output);
+	if (ret) {
+		v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+			"Failed to enumerate outputs\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * vpbe_display_s_output - Set output to
+ * the output specified by the index
+ */
+static int vpbe_display_s_output(struct file *file, void *priv,
+				unsigned int i)
+{
+	struct vpbe_fh *fh = priv;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,	"VIDIOC_S_OUTPUT\n");
+	/* If streaming is started, return error */
+	if (layer->started) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+		return -EBUSY;
+	}
+	if (NULL == vpbe_dev->ops.set_output)
+		return -EINVAL;
+
+	ret = vpbe_dev->ops.set_output(vpbe_dev, i);
+	if (ret) {
+		v4l2_err(&vpbe_dev->v4l2_dev,
+			"Failed to set output for sub devices\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * vpbe_display_g_output - Get output from subdevice
+ * for a given by the index
+ */
+static int vpbe_display_g_output(struct file *file, void *priv,
+				unsigned int *i)
+{
+	struct vpbe_fh *fh = priv;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_OUTPUT\n");
+	/* Get the standard from the current encoder */
+	*i = vpbe_dev->current_out_index;
+
+	return 0;
+}
+
+/**
+ * vpbe_display_enum_dv_presets - Enumerate the dv presets
+ *
+ * enum the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_enum_dv_presets(struct file *file, void *priv,
+			struct v4l2_dv_enum_preset *preset)
+{
+	struct vpbe_fh *fh = priv;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_PRESETS\n");
+
+	/* Enumerate outputs */
+	if (NULL == vpbe_dev->ops.enum_dv_presets)
+		return -EINVAL;
+
+	ret = vpbe_dev->ops.enum_dv_presets(vpbe_dev, preset);
+	if (ret) {
+		v4l2_err(&vpbe_dev->v4l2_dev,
+			"Failed to enumerate dv presets info\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * vpbe_display_s_dv_preset - Set the dv presets
+ *
+ * Set the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_s_dv_preset(struct file *file, void *priv,
+				struct v4l2_dv_preset *preset)
+{
+	struct vpbe_fh *fh = priv;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_DV_PRESETS\n");
+
+
+	/* If streaming is started, return error */
+	if (layer->started) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+		return -EBUSY;
+	}
+
+	/* Set the given standard in the encoder */
+	if (!vpbe_dev->ops.s_dv_preset)
+		return -EINVAL;
+
+	ret = vpbe_dev->ops.s_dv_preset(vpbe_dev, preset);
+	if (ret) {
+		v4l2_err(&vpbe_dev->v4l2_dev,
+			"Failed to set the dv presets info\n");
+		return -EINVAL;
+	}
+	/* set the current norm to zero to be consistent. If STD is used
+	 * v4l2 layer will set the norm properly on successful s_std call
+	 */
+	layer->video_dev.current_norm = 0;
+
+	return 0;
+}
+
+/**
+ * vpbe_display_g_dv_preset - Set the dv presets
+ *
+ * Get the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_g_dv_preset(struct file *file, void *priv,
+				struct v4l2_dv_preset *dv_preset)
+{
+	struct vpbe_fh *fh = priv;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_DV_PRESETS\n");
+
+	/* Get the given standard in the encoder */
+
+	if (vpbe_dev->current_timings.timings_type &
+				VPBE_ENC_DV_PRESET) {
+		dv_preset->preset =
+			vpbe_dev->current_timings.timings.dv_preset;
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vpbe_display_streamoff(struct file *file, void *priv,
+				enum v4l2_buf_type buf_type)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	struct osd_state *osd_device = fh->disp_dev->osd_device;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+			"VIDIOC_STREAMOFF,layer id = %d\n",
+			layer->device_id);
+
+	if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+		return -EINVAL;
+	}
+
+	/* If io is allowed for this file handle, return error */
+	if (!fh->io_allowed) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+		return -EACCES;
+	}
+
+	/* If streaming is not started, return error */
+	if (!layer->started) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "streaming not started in layer"
+			" id = %d\n", layer->device_id);
+		return -EINVAL;
+	}
+
+	osd_device->ops.disable_layer(osd_device,
+			layer->layer_info.id);
+	layer->started = 0;
+	ret = videobuf_streamoff(&layer->buffer_queue);
+
+	return ret;
+}
+
+static int vpbe_display_streamon(struct file *file, void *priv,
+			 enum v4l2_buf_type buf_type)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_display *disp_dev = fh->disp_dev;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	struct osd_state *osd_device = disp_dev->osd_device;
+	int ret;
+
+	osd_device->ops.disable_layer(osd_device,
+			layer->layer_info.id);
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_STREAMON, layerid=%d\n",
+						layer->device_id);
+
+	if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+		return -EINVAL;
+	}
+
+	/* If file handle is not allowed IO, return error */
+	if (!fh->io_allowed) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+		return -EACCES;
+	}
+	/* If Streaming is already started, return error */
+	if (layer->started) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "layer is already streaming\n");
+		return -EBUSY;
+	}
+
+	/*
+	 * Call videobuf_streamon to start streaming
+	 * in videobuf
+	 */
+	ret = videobuf_streamon(&layer->buffer_queue);
+	if (ret) {
+		v4l2_err(&vpbe_dev->v4l2_dev,
+		"error in videobuf_streamon\n");
+		return ret;
+	}
+	/* If buffer queue is empty, return error */
+	if (list_empty(&layer->dma_queue)) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "buffer queue is empty\n");
+		goto streamoff;
+	}
+	/* Get the next frame from the buffer queue */
+	layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
+				struct videobuf_buffer, queue);
+	/* Remove buffer from the buffer queue */
+	list_del(&layer->cur_frm->queue);
+	/* Mark state of the current frame to active */
+	layer->cur_frm->state = VIDEOBUF_ACTIVE;
+	/* Initialize field_id and started member */
+	layer->field_id = 0;
+
+	/* Set parameters in OSD and VENC */
+	ret = vpbe_set_osd_display_params(disp_dev, layer);
+	if (ret < 0)
+		goto streamoff;
+
+	/*
+	 * if request format is yuv420 semiplanar, need to
+	 * enable both video windows
+	 */
+	layer->started = 1;
+
+	layer->layer_first_int = 1;
+
+	return ret;
+streamoff:
+	ret = videobuf_streamoff(&layer->buffer_queue);
+	return ret;
+}
+
+static int vpbe_display_dqbuf(struct file *file, void *priv,
+		      struct v4l2_buffer *buf)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+		"VIDIOC_DQBUF, layer id = %d\n",
+		layer->device_id);
+
+	if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+		return -EINVAL;
+	}
+	/* If this file handle is not allowed to do IO, return error */
+	if (!fh->io_allowed) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+		return -EACCES;
+	}
+	if (file->f_flags & O_NONBLOCK)
+		/* Call videobuf_dqbuf for non blocking mode */
+		ret = videobuf_dqbuf(&layer->buffer_queue, buf, 1);
+	else
+		/* Call videobuf_dqbuf for blocking mode */
+		ret = videobuf_dqbuf(&layer->buffer_queue, buf, 0);
+
+	return ret;
+}
+
+static int vpbe_display_qbuf(struct file *file, void *priv,
+		     struct v4l2_buffer *p)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+		"VIDIOC_QBUF, layer id = %d\n",
+		layer->device_id);
+
+	if (V4L2_BUF_TYPE_VIDEO_OUTPUT != p->type) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+		return -EINVAL;
+	}
+
+	/* If this file handle is not allowed to do IO, return error */
+	if (!fh->io_allowed) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+		return -EACCES;
+	}
+
+	return videobuf_qbuf(&layer->buffer_queue, p);
+}
+
+static int vpbe_display_querybuf(struct file *file, void *priv,
+			 struct v4l2_buffer *buf)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+		"VIDIOC_QUERYBUF, layer id = %d\n",
+		layer->device_id);
+
+	if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+		return -EINVAL;
+	}
+
+	/* Call videobuf_querybuf to get information */
+	ret = videobuf_querybuf(&layer->buffer_queue, buf);
+
+	return ret;
+}
+
+static int vpbe_display_reqbufs(struct file *file, void *priv,
+			struct v4l2_requestbuffers *req_buf)
+{
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_reqbufs\n");
+
+	if (V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+		return -EINVAL;
+	}
+
+	/* If io users of the layer is not zero, return error */
+	if (0 != layer->io_usrs) {
+		v4l2_err(&vpbe_dev->v4l2_dev, "not IO user\n");
+		return -EBUSY;
+	}
+	/* Initialize videobuf queue as per the buffer type */
+	videobuf_queue_dma_contig_init(&layer->buffer_queue,
+				&video_qops,
+				vpbe_dev->pdev,
+				&layer->irqlock,
+				V4L2_BUF_TYPE_VIDEO_OUTPUT,
+				layer->pix_fmt.field,
+				sizeof(struct videobuf_buffer),
+				fh, NULL);
+
+	/* Set io allowed member of file handle to TRUE */
+	fh->io_allowed = 1;
+	/* Increment io usrs member of layer object to 1 */
+	layer->io_usrs = 1;
+	/* Store type of memory requested in layer object */
+	layer->memory = req_buf->memory;
+	/* Initialize buffer queue */
+	INIT_LIST_HEAD(&layer->dma_queue);
+	/* Allocate buffers */
+	ret = videobuf_reqbufs(&layer->buffer_queue, req_buf);
+
+	return ret;
+}
+
+/*
+ * vpbe_display_mmap()
+ * It is used to map kernel space buffers into user spaces
+ */
+static int vpbe_display_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+	/* Get the layer object and file handle object */
+	struct vpbe_fh *fh = filep->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_mmap\n");
+
+	if (mutex_lock_interruptible(&layer->opslock))
+		return -ERESTARTSYS;
+	ret = videobuf_mmap_mapper(&layer->buffer_queue, vma);
+	mutex_unlock(&layer->opslock);
+	return ret;
+}
+
+/* vpbe_display_poll(): It is used for select/poll system call
+ */
+static unsigned int vpbe_display_poll(struct file *filep, poll_table *wait)
+{
+	struct vpbe_fh *fh = filep->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+	unsigned int err = 0;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_poll\n");
+	if (layer->started) {
+		mutex_lock(&layer->opslock);
+		err = videobuf_poll_stream(filep, &layer->buffer_queue, wait);
+		mutex_unlock(&layer->opslock);
+	}
+	return err;
+}
+
+/*
+ * vpbe_display_open()
+ * It creates object of file handle structure and stores it in private_data
+ * member of filepointer
+ */
+static int vpbe_display_open(struct file *file)
+{
+	struct vpbe_fh *fh = NULL;
+	struct vpbe_layer *layer = video_drvdata(file);
+	struct vpbe_display *disp_dev = layer->disp_dev;
+	struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+	struct osd_state *osd_device = disp_dev->osd_device;
+	int err;
+
+	/* Allocate memory for the file handle object */
+	fh = kmalloc(sizeof(struct vpbe_fh), GFP_KERNEL);
+	if (fh == NULL) {
+		v4l2_err(&vpbe_dev->v4l2_dev,
+			"unable to allocate memory for file handle object\n");
+		return -ENOMEM;
+	}
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+			"vpbe display open plane = %d\n",
+			layer->device_id);
+
+	/* store pointer to fh in private_data member of filep */
+	file->private_data = fh;
+	fh->layer = layer;
+	fh->disp_dev = disp_dev;
+
+	if (!layer->usrs) {
+		if (mutex_lock_interruptible(&layer->opslock))
+			return -ERESTARTSYS;
+		/* First claim the layer for this device */
+		err = osd_device->ops.request_layer(osd_device,
+						layer->layer_info.id);
+		mutex_unlock(&layer->opslock);
+		if (err < 0) {
+			/* Couldn't get layer */
+			v4l2_err(&vpbe_dev->v4l2_dev,
+				"Display Manager failed to allocate layer\n");
+			kfree(fh);
+			return -EINVAL;
+		}
+	}
+	/* Increment layer usrs counter */
+	layer->usrs++;
+	/* Set io_allowed member to false */
+	fh->io_allowed = 0;
+	/* Initialize priority of this instance to default priority */
+	fh->prio = V4L2_PRIORITY_UNSET;
+	v4l2_prio_open(&layer->prio, &fh->prio);
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+			"vpbe display device opened successfully\n");
+	return 0;
+}
+
+/*
+ * vpbe_display_release()
+ * This function deletes buffer queue, frees the buffers and the davinci
+ * display file * handle
+ */
+static int vpbe_display_release(struct file *file)
+{
+	/* Get the layer object and file handle object */
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_layer *layer = fh->layer;
+	struct osd_layer_config *cfg  = &layer->layer_info.config;
+	struct vpbe_display *disp_dev = fh->disp_dev;
+	struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+	struct osd_state *osd_device = disp_dev->osd_device;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_release\n");
+
+	mutex_lock(&layer->opslock);
+	/* if this instance is doing IO */
+	if (fh->io_allowed) {
+		/* Reset io_usrs member of layer object */
+		layer->io_usrs = 0;
+
+		osd_device->ops.disable_layer(osd_device,
+				layer->layer_info.id);
+		layer->started = 0;
+		/* Free buffers allocated */
+		videobuf_queue_cancel(&layer->buffer_queue);
+		videobuf_mmap_free(&layer->buffer_queue);
+	}
+
+	/* Decrement layer usrs counter */
+	layer->usrs--;
+	/* If this file handle has initialize encoder device, reset it */
+	if (!layer->usrs) {
+		if (cfg->pixfmt == PIXFMT_NV12) {
+			struct vpbe_layer *otherlayer;
+			otherlayer =
+			_vpbe_display_get_other_win_layer(disp_dev, layer);
+			osd_device->ops.disable_layer(osd_device,
+					otherlayer->layer_info.id);
+			osd_device->ops.release_layer(osd_device,
+					otherlayer->layer_info.id);
+		}
+		osd_device->ops.disable_layer(osd_device,
+				layer->layer_info.id);
+		osd_device->ops.release_layer(osd_device,
+				layer->layer_info.id);
+	}
+	/* Close the priority */
+	v4l2_prio_close(&layer->prio, fh->prio);
+	file->private_data = NULL;
+	mutex_unlock(&layer->opslock);
+
+	/* Free memory allocated to file handle object */
+	kfree(fh);
+
+	disp_dev->cbcr_ofst = 0;
+
+	return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int vpbe_display_g_register(struct file *file, void *priv,
+			struct v4l2_dbg_register *reg)
+{
+	struct v4l2_dbg_match *match = &reg->match;
+	struct vpbe_fh *fh = file->private_data;
+	struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+	if (match->type >= 2) {
+		v4l2_subdev_call(vpbe_dev->venc,
+				 core,
+				 g_register,
+				 reg);
+	}
+
+	return 0;
+}
+
+static int vpbe_display_s_register(struct file *file, void *priv,
+			struct v4l2_dbg_register *reg)
+{
+	return 0;
+}
+#endif
+
+/* vpbe capture ioctl operations */
+static const struct v4l2_ioctl_ops vpbe_ioctl_ops = {
+	.vidioc_querycap	 = vpbe_display_querycap,
+	.vidioc_g_fmt_vid_out    = vpbe_display_g_fmt,
+	.vidioc_enum_fmt_vid_out = vpbe_display_enum_fmt,
+	.vidioc_s_fmt_vid_out    = vpbe_display_s_fmt,
+	.vidioc_try_fmt_vid_out  = vpbe_display_try_fmt,
+	.vidioc_reqbufs		 = vpbe_display_reqbufs,
+	.vidioc_querybuf	 = vpbe_display_querybuf,
+	.vidioc_qbuf		 = vpbe_display_qbuf,
+	.vidioc_dqbuf		 = vpbe_display_dqbuf,
+	.vidioc_streamon	 = vpbe_display_streamon,
+	.vidioc_streamoff	 = vpbe_display_streamoff,
+	.vidioc_cropcap		 = vpbe_display_cropcap,
+	.vidioc_g_crop		 = vpbe_display_g_crop,
+	.vidioc_s_crop		 = vpbe_display_s_crop,
+	.vidioc_g_priority	 = vpbe_display_g_priority,
+	.vidioc_s_priority	 = vpbe_display_s_priority,
+	.vidioc_s_std		 = vpbe_display_s_std,
+	.vidioc_g_std		 = vpbe_display_g_std,
+	.vidioc_enum_output	 = vpbe_display_enum_output,
+	.vidioc_s_output	 = vpbe_display_s_output,
+	.vidioc_g_output	 = vpbe_display_g_output,
+	.vidioc_s_dv_preset	 = vpbe_display_s_dv_preset,
+	.vidioc_g_dv_preset	 = vpbe_display_g_dv_preset,
+	.vidioc_enum_dv_presets	 = vpbe_display_enum_dv_presets,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.vidioc_g_register	 = vpbe_display_g_register,
+	.vidioc_s_register	 = vpbe_display_s_register,
+#endif
+};
+
+static struct v4l2_file_operations vpbe_fops = {
+	.owner = THIS_MODULE,
+	.open = vpbe_display_open,
+	.release = vpbe_display_release,
+	.unlocked_ioctl = video_ioctl2,
+	.mmap = vpbe_display_mmap,
+	.poll = vpbe_display_poll
+};
+
+static int vpbe_device_get(struct device *dev, void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct vpbe_display *vpbe_disp  = data;
+
+	if (strcmp("vpbe_controller", pdev->name) == 0)
+		vpbe_disp->vpbe_dev = platform_get_drvdata(pdev);
+
+	if (strcmp("vpbe-osd", pdev->name) == 0)
+		vpbe_disp->osd_device = platform_get_drvdata(pdev);
+
+	return 0;
+}
+
+static __devinit int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
+				     struct platform_device *pdev)
+{
+	struct vpbe_layer *vpbe_display_layer = NULL;
+	struct video_device *vbd = NULL;
+
+	/* Allocate memory for four plane display objects */
+
+	disp_dev->dev[i] =
+		kzalloc(sizeof(struct vpbe_layer), GFP_KERNEL);
+
+	/* If memory allocation fails, return error */
+	if (!disp_dev->dev[i]) {
+		printk(KERN_ERR "ran out of memory\n");
+		return  -ENOMEM;
+	}
+	spin_lock_init(&disp_dev->dev[i]->irqlock);
+	mutex_init(&disp_dev->dev[i]->opslock);
+
+	/* Get the pointer to the layer object */
+	vpbe_display_layer = disp_dev->dev[i];
+	vbd = &vpbe_display_layer->video_dev;
+	/* Initialize field of video device */
+	vbd->release	= video_device_release_empty;
+	vbd->fops	= &vpbe_fops;
+	vbd->ioctl_ops	= &vpbe_ioctl_ops;
+	vbd->minor	= -1;
+	vbd->v4l2_dev   = &disp_dev->vpbe_dev->v4l2_dev;
+	vbd->lock	= &vpbe_display_layer->opslock;
+
+	if (disp_dev->vpbe_dev->current_timings.timings_type &
+			VPBE_ENC_STD) {
+		vbd->tvnorms = (V4L2_STD_525_60 | V4L2_STD_625_50);
+		vbd->current_norm =
+			disp_dev->vpbe_dev->
+			current_timings.timings.std_id;
+	} else
+		vbd->current_norm = 0;
+
+	snprintf(vbd->name, sizeof(vbd->name),
+			"DaVinci_VPBE Display_DRIVER_V%d.%d.%d",
+			(VPBE_DISPLAY_VERSION_CODE >> 16) & 0xff,
+			(VPBE_DISPLAY_VERSION_CODE >> 8) & 0xff,
+			(VPBE_DISPLAY_VERSION_CODE) & 0xff);
+
+	vpbe_display_layer->device_id = i;
+
+	vpbe_display_layer->layer_info.id =
+		((i == VPBE_DISPLAY_DEVICE_0) ? WIN_VID0 : WIN_VID1);
+
+	/* Initialize prio member of layer object */
+	v4l2_prio_init(&vpbe_display_layer->prio);
+
+	return 0;
+}
+
+static __devinit int register_device(struct vpbe_layer *vpbe_display_layer,
+					struct vpbe_display *disp_dev,
+					struct platform_device *pdev) {
+	int err;
+
+	v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
+		  "Trying to register VPBE display device.\n");
+	v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
+		  "layer=%x,layer->video_dev=%x\n",
+		  (int)vpbe_display_layer,
+		  (int)&vpbe_display_layer->video_dev);
+
+	err = video_register_device(&vpbe_display_layer->video_dev,
+				    VFL_TYPE_GRABBER,
+				    -1);
+	if (err)
+		return -ENODEV;
+
+	vpbe_display_layer->disp_dev = disp_dev;
+	/* set the driver data in platform device */
+	platform_set_drvdata(pdev, disp_dev);
+	video_set_drvdata(&vpbe_display_layer->video_dev,
+			  vpbe_display_layer);
+
+	return 0;
+}
+
+
+
+/*
+ * vpbe_display_probe()
+ * This function creates device entries by register itself to the V4L2 driver
+ * and initializes fields of each layer objects
+ */
+static __devinit int vpbe_display_probe(struct platform_device *pdev)
+{
+	struct vpbe_layer *vpbe_display_layer;
+	struct vpbe_display *disp_dev;
+	struct resource *res = NULL;
+	int k;
+	int i;
+	int err;
+	int irq;
+
+	printk(KERN_DEBUG "vpbe_display_probe\n");
+	/* Allocate memory for vpbe_display */
+	disp_dev = kzalloc(sizeof(struct vpbe_display), GFP_KERNEL);
+	if (!disp_dev) {
+		printk(KERN_ERR "ran out of memory\n");
+		return -ENOMEM;
+	}
+
+	spin_lock_init(&disp_dev->dma_queue_lock);
+	/*
+	 * Scan all the platform devices to find the vpbe
+	 * controller device and get the vpbe_dev object
+	 */
+	err = bus_for_each_dev(&platform_bus_type, NULL, disp_dev,
+			vpbe_device_get);
+	if (err < 0)
+		return err;
+	/* Initialize the vpbe display controller */
+	if (NULL != disp_dev->vpbe_dev->ops.initialize) {
+		err = disp_dev->vpbe_dev->ops.initialize(&pdev->dev,
+							 disp_dev->vpbe_dev);
+		if (err) {
+			v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
+					"Error initing vpbe\n");
+			err = -ENOMEM;
+			goto probe_out;
+		}
+	}
+
+	for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+		if (init_vpbe_layer(i, disp_dev, pdev)) {
+			err = -ENODEV;
+			goto probe_out;
+		}
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
+			 "Unable to get VENC interrupt resource\n");
+		err = -ENODEV;
+		goto probe_out;
+	}
+
+	irq = res->start;
+	if (request_irq(irq, venc_isr,  IRQF_DISABLED, VPBE_DISPLAY_DRIVER,
+		disp_dev)) {
+		v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
+				"Unable to request interrupt\n");
+		err = -ENODEV;
+		goto probe_out;
+	}
+
+	for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+		if (register_device(disp_dev->dev[i], disp_dev, pdev)) {
+			err = -ENODEV;
+			goto probe_out_irq;
+		}
+	}
+
+	printk(KERN_DEBUG "Successfully completed the probing of vpbe v4l2 device\n");
+	return 0;
+
+probe_out_irq:
+	free_irq(res->start, disp_dev);
+probe_out:
+	for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
+		/* Get the pointer to the layer object */
+		vpbe_display_layer = disp_dev->dev[k];
+		/* Unregister video device */
+		if (vpbe_display_layer) {
+			video_unregister_device(
+				&vpbe_display_layer->video_dev);
+				kfree(disp_dev->dev[k]);
+		}
+	}
+	kfree(disp_dev);
+	return err;
+}
+
+/*
+ * vpbe_display_remove()
+ * It un-register hardware layer from V4L2 driver
+ */
+static int vpbe_display_remove(struct platform_device *pdev)
+{
+	struct vpbe_layer *vpbe_display_layer;
+	struct vpbe_display *disp_dev = platform_get_drvdata(pdev);
+	struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+	struct resource *res;
+	int i;
+
+	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_remove\n");
+
+	/* unregister irq */
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	free_irq(res->start, disp_dev);
+
+	/* deinitialize the vpbe display controller */
+	if (NULL != vpbe_dev->ops.deinitialize)
+		vpbe_dev->ops.deinitialize(&pdev->dev, vpbe_dev);
+	/* un-register device */
+	for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+		/* Get the pointer to the layer object */
+		vpbe_display_layer = disp_dev->dev[i];
+		/* Unregister video device */
+		video_unregister_device(&vpbe_display_layer->video_dev);
+
+	}
+	for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+		kfree(disp_dev->dev[i]);
+		disp_dev->dev[i] = NULL;
+	}
+
+	return 0;
+}
+
+static struct platform_driver vpbe_display_driver = {
+	.driver = {
+		.name = VPBE_DISPLAY_DRIVER,
+		.owner = THIS_MODULE,
+		.bus = &platform_bus_type,
+	},
+	.probe = vpbe_display_probe,
+	.remove = __devexit_p(vpbe_display_remove),
+};
+
+module_platform_driver(vpbe_display_driver);
+
+MODULE_DESCRIPTION("TI DM644x/DM355/DM365 VPBE Display controller");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
new file mode 100644
index 0000000..bba299d
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -0,0 +1,1605 @@
+/*
+ * Copyright (C) 2007-2010 Texas Instruments Inc
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ *
+ * Andy Lowe (alowe@mvista.com), MontaVista Software
+ * - Initial version
+ * Murali Karicheri (mkaricheri@gmail.com), Texas Instruments Ltd.
+ * - ported to sub device interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include <mach/cputype.h>
+#include <mach/hardware.h>
+
+#include <media/davinci/vpss.h>
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe_osd.h>
+
+#include <linux/io.h>
+#include "vpbe_osd_regs.h"
+
+#define MODULE_NAME	VPBE_OSD_SUBDEV_NAME
+
+/* register access routines */
+static inline u32 osd_read(struct osd_state *sd, u32 offset)
+{
+	struct osd_state *osd = sd;
+
+	return readl(osd->osd_base + offset);
+}
+
+static inline u32 osd_write(struct osd_state *sd, u32 val, u32 offset)
+{
+	struct osd_state *osd = sd;
+
+	writel(val, osd->osd_base + offset);
+
+	return val;
+}
+
+static inline u32 osd_set(struct osd_state *sd, u32 mask, u32 offset)
+{
+	struct osd_state *osd = sd;
+
+	u32 addr = osd->osd_base + offset;
+	u32 val = readl(addr) | mask;
+
+	writel(val, addr);
+
+	return val;
+}
+
+static inline u32 osd_clear(struct osd_state *sd, u32 mask, u32 offset)
+{
+	struct osd_state *osd = sd;
+
+	u32 addr = osd->osd_base + offset;
+	u32 val = readl(addr) & ~mask;
+
+	writel(val, addr);
+
+	return val;
+}
+
+static inline u32 osd_modify(struct osd_state *sd, u32 mask, u32 val,
+				 u32 offset)
+{
+	struct osd_state *osd = sd;
+
+	u32 addr = osd->osd_base + offset;
+	u32 new_val = (readl(addr) & ~mask) | (val & mask);
+
+	writel(new_val, addr);
+
+	return new_val;
+}
+
+/* define some macros for layer and pixfmt classification */
+#define is_osd_win(layer) (((layer) == WIN_OSD0) || ((layer) == WIN_OSD1))
+#define is_vid_win(layer) (((layer) == WIN_VID0) || ((layer) == WIN_VID1))
+#define is_rgb_pixfmt(pixfmt) \
+	(((pixfmt) == PIXFMT_RGB565) || ((pixfmt) == PIXFMT_RGB888))
+#define is_yc_pixfmt(pixfmt) \
+	(((pixfmt) == PIXFMT_YCbCrI) || ((pixfmt) == PIXFMT_YCrCbI) || \
+	((pixfmt) == PIXFMT_NV12))
+#define MAX_WIN_SIZE OSD_VIDWIN0XP_V0X
+#define MAX_LINE_LENGTH (OSD_VIDWIN0OFST_V0LO << 5)
+
+/**
+ * _osd_dm6446_vid0_pingpong() - field inversion fix for DM6446
+ * @sd - ptr to struct osd_state
+ * @field_inversion - inversion flag
+ * @fb_base_phys - frame buffer address
+ * @lconfig - ptr to layer config
+ *
+ * This routine implements a workaround for the field signal inversion silicon
+ * erratum described in Advisory 1.3.8 for the DM6446.  The fb_base_phys and
+ * lconfig parameters apply to the vid0 window.  This routine should be called
+ * whenever the vid0 layer configuration or start address is modified, or when
+ * the OSD field inversion setting is modified.
+ * Returns: 1 if the ping-pong buffers need to be toggled in the vsync isr, or
+ *          0 otherwise
+ */
+static int _osd_dm6446_vid0_pingpong(struct osd_state *sd,
+				     int field_inversion,
+				     unsigned long fb_base_phys,
+				     const struct osd_layer_config *lconfig)
+{
+	struct osd_platform_data *pdata;
+
+	pdata = (struct osd_platform_data *)sd->dev->platform_data;
+	if (pdata->field_inv_wa_enable) {
+
+		if (!field_inversion || !lconfig->interlaced) {
+			osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
+			osd_write(sd, fb_base_phys & ~0x1F, OSD_PPVWIN0ADR);
+			osd_modify(sd, OSD_MISCCTL_PPSW | OSD_MISCCTL_PPRV, 0,
+				   OSD_MISCCTL);
+			return 0;
+		} else {
+			unsigned miscctl = OSD_MISCCTL_PPRV;
+
+			osd_write(sd,
+				(fb_base_phys & ~0x1F) - lconfig->line_length,
+				OSD_VIDWIN0ADR);
+			osd_write(sd,
+				(fb_base_phys & ~0x1F) + lconfig->line_length,
+				OSD_PPVWIN0ADR);
+			osd_modify(sd,
+				OSD_MISCCTL_PPSW | OSD_MISCCTL_PPRV, miscctl,
+				OSD_MISCCTL);
+
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static void _osd_set_field_inversion(struct osd_state *sd, int enable)
+{
+	unsigned fsinv = 0;
+
+	if (enable)
+		fsinv = OSD_MODE_FSINV;
+
+	osd_modify(sd, OSD_MODE_FSINV, fsinv, OSD_MODE);
+}
+
+static void _osd_set_blink_attribute(struct osd_state *sd, int enable,
+				     enum osd_blink_interval blink)
+{
+	u32 osdatrmd = 0;
+
+	if (enable) {
+		osdatrmd |= OSD_OSDATRMD_BLNK;
+		osdatrmd |= blink << OSD_OSDATRMD_BLNKINT_SHIFT;
+	}
+	/* caller must ensure that OSD1 is configured in attribute mode */
+	osd_modify(sd, OSD_OSDATRMD_BLNKINT | OSD_OSDATRMD_BLNK, osdatrmd,
+		  OSD_OSDATRMD);
+}
+
+static void _osd_set_rom_clut(struct osd_state *sd,
+			      enum osd_rom_clut rom_clut)
+{
+	if (rom_clut == ROM_CLUT0)
+		osd_clear(sd, OSD_MISCCTL_RSEL, OSD_MISCCTL);
+	else
+		osd_set(sd, OSD_MISCCTL_RSEL, OSD_MISCCTL);
+}
+
+static void _osd_set_palette_map(struct osd_state *sd,
+				 enum osd_win_layer osdwin,
+				 unsigned char pixel_value,
+				 unsigned char clut_index,
+				 enum osd_pix_format pixfmt)
+{
+	static const int map_2bpp[] = { 0, 5, 10, 15 };
+	static const int map_1bpp[] = { 0, 15 };
+	int bmp_offset;
+	int bmp_shift;
+	int bmp_mask;
+	int bmp_reg;
+
+	switch (pixfmt) {
+	case PIXFMT_1BPP:
+		bmp_reg = map_1bpp[pixel_value & 0x1];
+		break;
+	case PIXFMT_2BPP:
+		bmp_reg = map_2bpp[pixel_value & 0x3];
+		break;
+	case PIXFMT_4BPP:
+		bmp_reg = pixel_value & 0xf;
+		break;
+	default:
+		return;
+	}
+
+	switch (osdwin) {
+	case OSDWIN_OSD0:
+		bmp_offset = OSD_W0BMP01 + (bmp_reg >> 1) * sizeof(u32);
+		break;
+	case OSDWIN_OSD1:
+		bmp_offset = OSD_W1BMP01 + (bmp_reg >> 1) * sizeof(u32);
+		break;
+	default:
+		return;
+	}
+
+	if (bmp_reg & 1) {
+		bmp_shift = 8;
+		bmp_mask = 0xff << 8;
+	} else {
+		bmp_shift = 0;
+		bmp_mask = 0xff;
+	}
+
+	osd_modify(sd, bmp_mask, clut_index << bmp_shift, bmp_offset);
+}
+
+static void _osd_set_rec601_attenuation(struct osd_state *sd,
+					enum osd_win_layer osdwin, int enable)
+{
+	switch (osdwin) {
+	case OSDWIN_OSD0:
+		osd_modify(sd, OSD_OSDWIN0MD_ATN0E,
+			  enable ? OSD_OSDWIN0MD_ATN0E : 0,
+			  OSD_OSDWIN0MD);
+		if (sd->vpbe_type == VPBE_VERSION_1)
+			osd_modify(sd, OSD_OSDWIN0MD_ATN0E,
+				  enable ? OSD_OSDWIN0MD_ATN0E : 0,
+				  OSD_OSDWIN0MD);
+		else if ((sd->vpbe_type == VPBE_VERSION_3) ||
+			   (sd->vpbe_type == VPBE_VERSION_2))
+			osd_modify(sd, OSD_EXTMODE_ATNOSD0EN,
+				  enable ? OSD_EXTMODE_ATNOSD0EN : 0,
+				  OSD_EXTMODE);
+		break;
+	case OSDWIN_OSD1:
+		osd_modify(sd, OSD_OSDWIN1MD_ATN1E,
+			  enable ? OSD_OSDWIN1MD_ATN1E : 0,
+			  OSD_OSDWIN1MD);
+		if (sd->vpbe_type == VPBE_VERSION_1)
+			osd_modify(sd, OSD_OSDWIN1MD_ATN1E,
+				  enable ? OSD_OSDWIN1MD_ATN1E : 0,
+				  OSD_OSDWIN1MD);
+		else if ((sd->vpbe_type == VPBE_VERSION_3) ||
+			   (sd->vpbe_type == VPBE_VERSION_2))
+			osd_modify(sd, OSD_EXTMODE_ATNOSD1EN,
+				  enable ? OSD_EXTMODE_ATNOSD1EN : 0,
+				  OSD_EXTMODE);
+		break;
+	}
+}
+
+static void _osd_set_blending_factor(struct osd_state *sd,
+				     enum osd_win_layer osdwin,
+				     enum osd_blending_factor blend)
+{
+	switch (osdwin) {
+	case OSDWIN_OSD0:
+		osd_modify(sd, OSD_OSDWIN0MD_BLND0,
+			  blend << OSD_OSDWIN0MD_BLND0_SHIFT, OSD_OSDWIN0MD);
+		break;
+	case OSDWIN_OSD1:
+		osd_modify(sd, OSD_OSDWIN1MD_BLND1,
+			  blend << OSD_OSDWIN1MD_BLND1_SHIFT, OSD_OSDWIN1MD);
+		break;
+	}
+}
+
+static void _osd_enable_rgb888_pixblend(struct osd_state *sd,
+					enum osd_win_layer osdwin)
+{
+
+	osd_modify(sd, OSD_MISCCTL_BLDSEL, 0, OSD_MISCCTL);
+	switch (osdwin) {
+	case OSDWIN_OSD0:
+		osd_modify(sd, OSD_EXTMODE_OSD0BLDCHR,
+			  OSD_EXTMODE_OSD0BLDCHR, OSD_EXTMODE);
+		break;
+	case OSDWIN_OSD1:
+		osd_modify(sd, OSD_EXTMODE_OSD1BLDCHR,
+			  OSD_EXTMODE_OSD1BLDCHR, OSD_EXTMODE);
+		break;
+	}
+}
+
+static void _osd_enable_color_key(struct osd_state *sd,
+				  enum osd_win_layer osdwin,
+				  unsigned colorkey,
+				  enum osd_pix_format pixfmt)
+{
+	switch (pixfmt) {
+	case PIXFMT_1BPP:
+	case PIXFMT_2BPP:
+	case PIXFMT_4BPP:
+	case PIXFMT_8BPP:
+		if (sd->vpbe_type == VPBE_VERSION_3) {
+			switch (osdwin) {
+			case OSDWIN_OSD0:
+				osd_modify(sd, OSD_TRANSPBMPIDX_BMP0,
+					  colorkey <<
+					  OSD_TRANSPBMPIDX_BMP0_SHIFT,
+					  OSD_TRANSPBMPIDX);
+				break;
+			case OSDWIN_OSD1:
+				osd_modify(sd, OSD_TRANSPBMPIDX_BMP1,
+					  colorkey <<
+					  OSD_TRANSPBMPIDX_BMP1_SHIFT,
+					  OSD_TRANSPBMPIDX);
+				break;
+			}
+		}
+		break;
+	case PIXFMT_RGB565:
+		if (sd->vpbe_type == VPBE_VERSION_1)
+			osd_write(sd, colorkey & OSD_TRANSPVAL_RGBTRANS,
+				  OSD_TRANSPVAL);
+		else if (sd->vpbe_type == VPBE_VERSION_3)
+			osd_write(sd, colorkey & OSD_TRANSPVALL_RGBL,
+				  OSD_TRANSPVALL);
+		break;
+	case PIXFMT_YCbCrI:
+	case PIXFMT_YCrCbI:
+		if (sd->vpbe_type == VPBE_VERSION_3)
+			osd_modify(sd, OSD_TRANSPVALU_Y, colorkey,
+				   OSD_TRANSPVALU);
+		break;
+	case PIXFMT_RGB888:
+		if (sd->vpbe_type == VPBE_VERSION_3) {
+			osd_write(sd, colorkey & OSD_TRANSPVALL_RGBL,
+				  OSD_TRANSPVALL);
+			osd_modify(sd, OSD_TRANSPVALU_RGBU, colorkey >> 16,
+				  OSD_TRANSPVALU);
+		}
+		break;
+	default:
+		break;
+	}
+
+	switch (osdwin) {
+	case OSDWIN_OSD0:
+		osd_set(sd, OSD_OSDWIN0MD_TE0, OSD_OSDWIN0MD);
+		break;
+	case OSDWIN_OSD1:
+		osd_set(sd, OSD_OSDWIN1MD_TE1, OSD_OSDWIN1MD);
+		break;
+	}
+}
+
+static void _osd_disable_color_key(struct osd_state *sd,
+				   enum osd_win_layer osdwin)
+{
+	switch (osdwin) {
+	case OSDWIN_OSD0:
+		osd_clear(sd, OSD_OSDWIN0MD_TE0, OSD_OSDWIN0MD);
+		break;
+	case OSDWIN_OSD1:
+		osd_clear(sd, OSD_OSDWIN1MD_TE1, OSD_OSDWIN1MD);
+		break;
+	}
+}
+
+static void _osd_set_osd_clut(struct osd_state *sd,
+			      enum osd_win_layer osdwin,
+			      enum osd_clut clut)
+{
+	u32 winmd = 0;
+
+	switch (osdwin) {
+	case OSDWIN_OSD0:
+		if (clut == RAM_CLUT)
+			winmd |= OSD_OSDWIN0MD_CLUTS0;
+		osd_modify(sd, OSD_OSDWIN0MD_CLUTS0, winmd, OSD_OSDWIN0MD);
+		break;
+	case OSDWIN_OSD1:
+		if (clut == RAM_CLUT)
+			winmd |= OSD_OSDWIN1MD_CLUTS1;
+		osd_modify(sd, OSD_OSDWIN1MD_CLUTS1, winmd, OSD_OSDWIN1MD);
+		break;
+	}
+}
+
+static void _osd_set_zoom(struct osd_state *sd, enum osd_layer layer,
+			  enum osd_zoom_factor h_zoom,
+			  enum osd_zoom_factor v_zoom)
+{
+	u32 winmd = 0;
+
+	switch (layer) {
+	case WIN_OSD0:
+		winmd |= (h_zoom << OSD_OSDWIN0MD_OHZ0_SHIFT);
+		winmd |= (v_zoom << OSD_OSDWIN0MD_OVZ0_SHIFT);
+		osd_modify(sd, OSD_OSDWIN0MD_OHZ0 | OSD_OSDWIN0MD_OVZ0, winmd,
+			  OSD_OSDWIN0MD);
+		break;
+	case WIN_VID0:
+		winmd |= (h_zoom << OSD_VIDWINMD_VHZ0_SHIFT);
+		winmd |= (v_zoom << OSD_VIDWINMD_VVZ0_SHIFT);
+		osd_modify(sd, OSD_VIDWINMD_VHZ0 | OSD_VIDWINMD_VVZ0, winmd,
+			  OSD_VIDWINMD);
+		break;
+	case WIN_OSD1:
+		winmd |= (h_zoom << OSD_OSDWIN1MD_OHZ1_SHIFT);
+		winmd |= (v_zoom << OSD_OSDWIN1MD_OVZ1_SHIFT);
+		osd_modify(sd, OSD_OSDWIN1MD_OHZ1 | OSD_OSDWIN1MD_OVZ1, winmd,
+			  OSD_OSDWIN1MD);
+		break;
+	case WIN_VID1:
+		winmd |= (h_zoom << OSD_VIDWINMD_VHZ1_SHIFT);
+		winmd |= (v_zoom << OSD_VIDWINMD_VVZ1_SHIFT);
+		osd_modify(sd, OSD_VIDWINMD_VHZ1 | OSD_VIDWINMD_VVZ1, winmd,
+			  OSD_VIDWINMD);
+		break;
+	}
+}
+
+static void _osd_disable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+	switch (layer) {
+	case WIN_OSD0:
+		osd_clear(sd, OSD_OSDWIN0MD_OACT0, OSD_OSDWIN0MD);
+		break;
+	case WIN_VID0:
+		osd_clear(sd, OSD_VIDWINMD_ACT0, OSD_VIDWINMD);
+		break;
+	case WIN_OSD1:
+		/* disable attribute mode as well as disabling the window */
+		osd_clear(sd, OSD_OSDWIN1MD_OASW | OSD_OSDWIN1MD_OACT1,
+			  OSD_OSDWIN1MD);
+		break;
+	case WIN_VID1:
+		osd_clear(sd, OSD_VIDWINMD_ACT1, OSD_VIDWINMD);
+		break;
+	}
+}
+
+static void osd_disable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+	struct osd_state *osd = sd;
+	struct osd_window_state *win = &osd->win[layer];
+	unsigned long flags;
+
+	spin_lock_irqsave(&osd->lock, flags);
+
+	if (!win->is_enabled) {
+		spin_unlock_irqrestore(&osd->lock, flags);
+		return;
+	}
+	win->is_enabled = 0;
+
+	_osd_disable_layer(sd, layer);
+
+	spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void _osd_enable_attribute_mode(struct osd_state *sd)
+{
+	/* enable attribute mode for OSD1 */
+	osd_set(sd, OSD_OSDWIN1MD_OASW, OSD_OSDWIN1MD);
+}
+
+static void _osd_enable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+	switch (layer) {
+	case WIN_OSD0:
+		osd_set(sd, OSD_OSDWIN0MD_OACT0, OSD_OSDWIN0MD);
+		break;
+	case WIN_VID0:
+		osd_set(sd, OSD_VIDWINMD_ACT0, OSD_VIDWINMD);
+		break;
+	case WIN_OSD1:
+		/* enable OSD1 and disable attribute mode */
+		osd_modify(sd, OSD_OSDWIN1MD_OASW | OSD_OSDWIN1MD_OACT1,
+			  OSD_OSDWIN1MD_OACT1, OSD_OSDWIN1MD);
+		break;
+	case WIN_VID1:
+		osd_set(sd, OSD_VIDWINMD_ACT1, OSD_VIDWINMD);
+		break;
+	}
+}
+
+static int osd_enable_layer(struct osd_state *sd, enum osd_layer layer,
+			    int otherwin)
+{
+	struct osd_state *osd = sd;
+	struct osd_window_state *win = &osd->win[layer];
+	struct osd_layer_config *cfg = &win->lconfig;
+	unsigned long flags;
+
+	spin_lock_irqsave(&osd->lock, flags);
+
+	/*
+	 * use otherwin flag to know this is the other vid window
+	 * in YUV420 mode, if is, skip this check
+	 */
+	if (!otherwin && (!win->is_allocated ||
+			!win->fb_base_phys ||
+			!cfg->line_length ||
+			!cfg->xsize ||
+			!cfg->ysize)) {
+		spin_unlock_irqrestore(&osd->lock, flags);
+		return -1;
+	}
+
+	if (win->is_enabled) {
+		spin_unlock_irqrestore(&osd->lock, flags);
+		return 0;
+	}
+	win->is_enabled = 1;
+
+	if (cfg->pixfmt != PIXFMT_OSD_ATTR)
+		_osd_enable_layer(sd, layer);
+	else {
+		_osd_enable_attribute_mode(sd);
+		_osd_set_blink_attribute(sd, osd->is_blinking, osd->blink);
+	}
+
+	spin_unlock_irqrestore(&osd->lock, flags);
+
+	return 0;
+}
+
+#define OSD_SRC_ADDR_HIGH4	0x7800000
+#define OSD_SRC_ADDR_HIGH7	0x7F0000
+#define OSD_SRCADD_OFSET_SFT	23
+#define OSD_SRCADD_ADD_SFT	16
+#define OSD_WINADL_MASK		0xFFFF
+#define OSD_WINOFST_MASK	0x1000
+#define VPBE_REG_BASE		0x80000000
+
+static void _osd_start_layer(struct osd_state *sd, enum osd_layer layer,
+			     unsigned long fb_base_phys,
+			     unsigned long cbcr_ofst)
+{
+
+	if (sd->vpbe_type == VPBE_VERSION_1) {
+		switch (layer) {
+		case WIN_OSD0:
+			osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN0ADR);
+			break;
+		case WIN_VID0:
+			osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
+			break;
+		case WIN_OSD1:
+			osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN1ADR);
+			break;
+		case WIN_VID1:
+			osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN1ADR);
+			break;
+	      }
+	} else if (sd->vpbe_type == VPBE_VERSION_3) {
+		unsigned long fb_offset_32 =
+		    (fb_base_phys - VPBE_REG_BASE) >> 5;
+
+		switch (layer) {
+		case WIN_OSD0:
+			osd_modify(sd, OSD_OSDWINADH_O0AH,
+				  fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+						   OSD_OSDWINADH_O0AH_SHIFT),
+				  OSD_OSDWINADH);
+			osd_write(sd, fb_offset_32 & OSD_OSDWIN0ADL_O0AL,
+				  OSD_OSDWIN0ADL);
+			break;
+		case WIN_VID0:
+			osd_modify(sd, OSD_VIDWINADH_V0AH,
+				  fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+						   OSD_VIDWINADH_V0AH_SHIFT),
+				  OSD_VIDWINADH);
+			osd_write(sd, fb_offset_32 & OSD_VIDWIN0ADL_V0AL,
+				  OSD_VIDWIN0ADL);
+			break;
+		case WIN_OSD1:
+			osd_modify(sd, OSD_OSDWINADH_O1AH,
+				  fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+						   OSD_OSDWINADH_O1AH_SHIFT),
+				  OSD_OSDWINADH);
+			osd_write(sd, fb_offset_32 & OSD_OSDWIN1ADL_O1AL,
+				  OSD_OSDWIN1ADL);
+			break;
+		case WIN_VID1:
+			osd_modify(sd, OSD_VIDWINADH_V1AH,
+				  fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+						   OSD_VIDWINADH_V1AH_SHIFT),
+				  OSD_VIDWINADH);
+			osd_write(sd, fb_offset_32 & OSD_VIDWIN1ADL_V1AL,
+				  OSD_VIDWIN1ADL);
+			break;
+		}
+	} else if (sd->vpbe_type == VPBE_VERSION_2) {
+		struct osd_window_state *win = &sd->win[layer];
+		unsigned long fb_offset_32, cbcr_offset_32;
+
+		fb_offset_32 = fb_base_phys - VPBE_REG_BASE;
+		if (cbcr_ofst)
+			cbcr_offset_32 = cbcr_ofst;
+		else
+			cbcr_offset_32 = win->lconfig.line_length *
+					 win->lconfig.ysize;
+		cbcr_offset_32 += fb_offset_32;
+		fb_offset_32 = fb_offset_32 >> 5;
+		cbcr_offset_32 = cbcr_offset_32 >> 5;
+		/*
+		 * DM365: start address is 27-bit long address b26 - b23 are
+		 * in offset register b12 - b9, and * bit 26 has to be '1'
+		 */
+		if (win->lconfig.pixfmt == PIXFMT_NV12) {
+			switch (layer) {
+			case WIN_VID0:
+			case WIN_VID1:
+				/* Y is in VID0 */
+				osd_modify(sd, OSD_VIDWIN0OFST_V0AH,
+					 ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+					 (OSD_SRCADD_OFSET_SFT -
+					 OSD_WINOFST_AH_SHIFT)) |
+					 OSD_WINOFST_MASK, OSD_VIDWIN0OFST);
+				osd_modify(sd, OSD_VIDWINADH_V0AH,
+					  (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+					  (OSD_SRCADD_ADD_SFT -
+					  OSD_VIDWINADH_V0AH_SHIFT),
+					   OSD_VIDWINADH);
+				osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+					  OSD_VIDWIN0ADL);
+				/* CbCr is in VID1 */
+				osd_modify(sd, OSD_VIDWIN1OFST_V1AH,
+					 ((cbcr_offset_32 &
+					 OSD_SRC_ADDR_HIGH4) >>
+					 (OSD_SRCADD_OFSET_SFT -
+					 OSD_WINOFST_AH_SHIFT)) |
+					 OSD_WINOFST_MASK, OSD_VIDWIN1OFST);
+				osd_modify(sd, OSD_VIDWINADH_V1AH,
+					  (cbcr_offset_32 &
+					  OSD_SRC_ADDR_HIGH7) >>
+					  (OSD_SRCADD_ADD_SFT -
+					  OSD_VIDWINADH_V1AH_SHIFT),
+					  OSD_VIDWINADH);
+				osd_write(sd, cbcr_offset_32 & OSD_WINADL_MASK,
+					  OSD_VIDWIN1ADL);
+				break;
+			default:
+				break;
+			}
+		}
+
+		switch (layer) {
+		case WIN_OSD0:
+			osd_modify(sd, OSD_OSDWIN0OFST_O0AH,
+				 ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+				 (OSD_SRCADD_OFSET_SFT -
+				 OSD_WINOFST_AH_SHIFT)) | OSD_WINOFST_MASK,
+				  OSD_OSDWIN0OFST);
+			osd_modify(sd, OSD_OSDWINADH_O0AH,
+				 (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+				 (OSD_SRCADD_ADD_SFT -
+				 OSD_OSDWINADH_O0AH_SHIFT), OSD_OSDWINADH);
+			osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+					OSD_OSDWIN0ADL);
+			break;
+		case WIN_VID0:
+			if (win->lconfig.pixfmt != PIXFMT_NV12) {
+				osd_modify(sd, OSD_VIDWIN0OFST_V0AH,
+					 ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+					 (OSD_SRCADD_OFSET_SFT -
+					 OSD_WINOFST_AH_SHIFT)) |
+					 OSD_WINOFST_MASK, OSD_VIDWIN0OFST);
+				osd_modify(sd, OSD_VIDWINADH_V0AH,
+					  (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+					  (OSD_SRCADD_ADD_SFT -
+					  OSD_VIDWINADH_V0AH_SHIFT),
+					  OSD_VIDWINADH);
+				osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+					  OSD_VIDWIN0ADL);
+			}
+			break;
+		case WIN_OSD1:
+			osd_modify(sd, OSD_OSDWIN1OFST_O1AH,
+				 ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+				 (OSD_SRCADD_OFSET_SFT -
+				 OSD_WINOFST_AH_SHIFT)) | OSD_WINOFST_MASK,
+				  OSD_OSDWIN1OFST);
+			osd_modify(sd, OSD_OSDWINADH_O1AH,
+				  (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+				  (OSD_SRCADD_ADD_SFT -
+				  OSD_OSDWINADH_O1AH_SHIFT),
+				  OSD_OSDWINADH);
+			osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+					OSD_OSDWIN1ADL);
+			break;
+		case WIN_VID1:
+			if (win->lconfig.pixfmt != PIXFMT_NV12) {
+				osd_modify(sd, OSD_VIDWIN1OFST_V1AH,
+					 ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+					 (OSD_SRCADD_OFSET_SFT -
+					 OSD_WINOFST_AH_SHIFT)) |
+					 OSD_WINOFST_MASK, OSD_VIDWIN1OFST);
+				osd_modify(sd, OSD_VIDWINADH_V1AH,
+					  (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+					  (OSD_SRCADD_ADD_SFT -
+					  OSD_VIDWINADH_V1AH_SHIFT),
+					  OSD_VIDWINADH);
+				osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+					  OSD_VIDWIN1ADL);
+			}
+			break;
+		}
+	}
+}
+
+static void osd_start_layer(struct osd_state *sd, enum osd_layer layer,
+			    unsigned long fb_base_phys,
+			    unsigned long cbcr_ofst)
+{
+	struct osd_state *osd = sd;
+	struct osd_window_state *win = &osd->win[layer];
+	struct osd_layer_config *cfg = &win->lconfig;
+	unsigned long flags;
+
+	spin_lock_irqsave(&osd->lock, flags);
+
+	win->fb_base_phys = fb_base_phys & ~0x1F;
+	_osd_start_layer(sd, layer, fb_base_phys, cbcr_ofst);
+
+	if (layer == WIN_VID0) {
+		osd->pingpong =
+		    _osd_dm6446_vid0_pingpong(sd, osd->field_inversion,
+						       win->fb_base_phys,
+						       cfg);
+	}
+
+	spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void osd_get_layer_config(struct osd_state *sd, enum osd_layer layer,
+				 struct osd_layer_config *lconfig)
+{
+	struct osd_state *osd = sd;
+	struct osd_window_state *win = &osd->win[layer];
+	unsigned long flags;
+
+	spin_lock_irqsave(&osd->lock, flags);
+
+	*lconfig = win->lconfig;
+
+	spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+/**
+ * try_layer_config() - Try a specific configuration for the layer
+ * @sd  - ptr to struct osd_state
+ * @layer - layer to configure
+ * @lconfig - layer configuration to try
+ *
+ * If the requested lconfig is completely rejected and the value of lconfig on
+ * exit is the current lconfig, then try_layer_config() returns 1.  Otherwise,
+ * try_layer_config() returns 0.  A return value of 0 does not necessarily mean
+ * that the value of lconfig on exit is identical to the value of lconfig on
+ * entry, but merely that it represents a change from the current lconfig.
+ */
+static int try_layer_config(struct osd_state *sd, enum osd_layer layer,
+			    struct osd_layer_config *lconfig)
+{
+	struct osd_state *osd = sd;
+	struct osd_window_state *win = &osd->win[layer];
+	int bad_config = 0;
+
+	/* verify that the pixel format is compatible with the layer */
+	switch (lconfig->pixfmt) {
+	case PIXFMT_1BPP:
+	case PIXFMT_2BPP:
+	case PIXFMT_4BPP:
+	case PIXFMT_8BPP:
+	case PIXFMT_RGB565:
+		if (osd->vpbe_type == VPBE_VERSION_1)
+			bad_config = !is_vid_win(layer);
+		break;
+	case PIXFMT_YCbCrI:
+	case PIXFMT_YCrCbI:
+		bad_config = !is_vid_win(layer);
+		break;
+	case PIXFMT_RGB888:
+		if (osd->vpbe_type == VPBE_VERSION_1)
+			bad_config = !is_vid_win(layer);
+		else if ((osd->vpbe_type == VPBE_VERSION_3) ||
+			 (osd->vpbe_type == VPBE_VERSION_2))
+			bad_config = !is_osd_win(layer);
+		break;
+	case PIXFMT_NV12:
+		if (osd->vpbe_type != VPBE_VERSION_2)
+			bad_config = 1;
+		else
+			bad_config = is_osd_win(layer);
+		break;
+	case PIXFMT_OSD_ATTR:
+		bad_config = (layer != WIN_OSD1);
+		break;
+	default:
+		bad_config = 1;
+		break;
+	}
+	if (bad_config) {
+		/*
+		 * The requested pixel format is incompatible with the layer,
+		 * so keep the current layer configuration.
+		 */
+		*lconfig = win->lconfig;
+		return bad_config;
+	}
+
+	/* DM6446: */
+	/* only one OSD window at a time can use RGB pixel formats */
+	  if ((osd->vpbe_type == VPBE_VERSION_1) &&
+		  is_osd_win(layer) && is_rgb_pixfmt(lconfig->pixfmt)) {
+		enum osd_pix_format pixfmt;
+		if (layer == WIN_OSD0)
+			pixfmt = osd->win[WIN_OSD1].lconfig.pixfmt;
+		else
+			pixfmt = osd->win[WIN_OSD0].lconfig.pixfmt;
+
+		if (is_rgb_pixfmt(pixfmt)) {
+			/*
+			 * The other OSD window is already configured for an
+			 * RGB, so keep the current layer configuration.
+			 */
+			*lconfig = win->lconfig;
+			return 1;
+		}
+	}
+
+	/* DM6446: only one video window at a time can use RGB888 */
+	if ((osd->vpbe_type == VPBE_VERSION_1) && is_vid_win(layer) &&
+		lconfig->pixfmt == PIXFMT_RGB888) {
+		enum osd_pix_format pixfmt;
+
+		if (layer == WIN_VID0)
+			pixfmt = osd->win[WIN_VID1].lconfig.pixfmt;
+		else
+			pixfmt = osd->win[WIN_VID0].lconfig.pixfmt;
+
+		if (pixfmt == PIXFMT_RGB888) {
+			/*
+			 * The other video window is already configured for
+			 * RGB888, so keep the current layer configuration.
+			 */
+			*lconfig = win->lconfig;
+			return 1;
+		}
+	}
+
+	/* window dimensions must be non-zero */
+	if (!lconfig->line_length || !lconfig->xsize || !lconfig->ysize) {
+		*lconfig = win->lconfig;
+		return 1;
+	}
+
+	/* round line_length up to a multiple of 32 */
+	lconfig->line_length = ((lconfig->line_length + 31) / 32) * 32;
+	lconfig->line_length =
+	    min(lconfig->line_length, (unsigned)MAX_LINE_LENGTH);
+	lconfig->xsize = min(lconfig->xsize, (unsigned)MAX_WIN_SIZE);
+	lconfig->ysize = min(lconfig->ysize, (unsigned)MAX_WIN_SIZE);
+	lconfig->xpos = min(lconfig->xpos, (unsigned)MAX_WIN_SIZE);
+	lconfig->ypos = min(lconfig->ypos, (unsigned)MAX_WIN_SIZE);
+	lconfig->interlaced = (lconfig->interlaced != 0);
+	if (lconfig->interlaced) {
+		/* ysize and ypos must be even for interlaced displays */
+		lconfig->ysize &= ~1;
+		lconfig->ypos &= ~1;
+	}
+
+	return 0;
+}
+
+static void _osd_disable_vid_rgb888(struct osd_state *sd)
+{
+	/*
+	 * The DM6446 supports RGB888 pixel format in a single video window.
+	 * This routine disables RGB888 pixel format for both video windows.
+	 * The caller must ensure that neither video window is currently
+	 * configured for RGB888 pixel format.
+	 */
+	if (sd->vpbe_type == VPBE_VERSION_1)
+		osd_clear(sd, OSD_MISCCTL_RGBEN, OSD_MISCCTL);
+}
+
+static void _osd_enable_vid_rgb888(struct osd_state *sd,
+				   enum osd_layer layer)
+{
+	/*
+	 * The DM6446 supports RGB888 pixel format in a single video window.
+	 * This routine enables RGB888 pixel format for the specified video
+	 * window.  The caller must ensure that the other video window is not
+	 * currently configured for RGB888 pixel format, as this routine will
+	 * disable RGB888 pixel format for the other window.
+	 */
+	if (sd->vpbe_type == VPBE_VERSION_1) {
+		if (layer == WIN_VID0)
+			osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+				  OSD_MISCCTL_RGBEN, OSD_MISCCTL);
+		else if (layer == WIN_VID1)
+			osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+				  OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+				  OSD_MISCCTL);
+	}
+}
+
+static void _osd_set_cbcr_order(struct osd_state *sd,
+				enum osd_pix_format pixfmt)
+{
+	/*
+	 * The caller must ensure that all windows using YC pixfmt use the same
+	 * Cb/Cr order.
+	 */
+	if (pixfmt == PIXFMT_YCbCrI)
+		osd_clear(sd, OSD_MODE_CS, OSD_MODE);
+	else if (pixfmt == PIXFMT_YCrCbI)
+		osd_set(sd, OSD_MODE_CS, OSD_MODE);
+}
+
+static void _osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
+				  const struct osd_layer_config *lconfig)
+{
+	u32 winmd = 0, winmd_mask = 0, bmw = 0;
+
+	_osd_set_cbcr_order(sd, lconfig->pixfmt);
+
+	switch (layer) {
+	case WIN_OSD0:
+		if (sd->vpbe_type == VPBE_VERSION_1) {
+			winmd_mask |= OSD_OSDWIN0MD_RGB0E;
+			if (lconfig->pixfmt == PIXFMT_RGB565)
+				winmd |= OSD_OSDWIN0MD_RGB0E;
+		} else if ((sd->vpbe_type == VPBE_VERSION_3) ||
+		  (sd->vpbe_type == VPBE_VERSION_2)) {
+			winmd_mask |= OSD_OSDWIN0MD_BMP0MD;
+			switch (lconfig->pixfmt) {
+			case PIXFMT_RGB565:
+					winmd |= (1 <<
+					OSD_OSDWIN0MD_BMP0MD_SHIFT);
+					break;
+			case PIXFMT_RGB888:
+				winmd |= (2 << OSD_OSDWIN0MD_BMP0MD_SHIFT);
+				_osd_enable_rgb888_pixblend(sd, OSDWIN_OSD0);
+				break;
+			case PIXFMT_YCbCrI:
+			case PIXFMT_YCrCbI:
+				winmd |= (3 << OSD_OSDWIN0MD_BMP0MD_SHIFT);
+				break;
+			default:
+				break;
+			}
+		}
+
+		winmd_mask |= OSD_OSDWIN0MD_BMW0 | OSD_OSDWIN0MD_OFF0;
+
+		switch (lconfig->pixfmt) {
+		case PIXFMT_1BPP:
+			bmw = 0;
+			break;
+		case PIXFMT_2BPP:
+			bmw = 1;
+			break;
+		case PIXFMT_4BPP:
+			bmw = 2;
+			break;
+		case PIXFMT_8BPP:
+			bmw = 3;
+			break;
+		default:
+			break;
+		}
+		winmd |= (bmw << OSD_OSDWIN0MD_BMW0_SHIFT);
+
+		if (lconfig->interlaced)
+			winmd |= OSD_OSDWIN0MD_OFF0;
+
+		osd_modify(sd, winmd_mask, winmd, OSD_OSDWIN0MD);
+		osd_write(sd, lconfig->line_length >> 5, OSD_OSDWIN0OFST);
+		osd_write(sd, lconfig->xpos, OSD_OSDWIN0XP);
+		osd_write(sd, lconfig->xsize, OSD_OSDWIN0XL);
+		if (lconfig->interlaced) {
+			osd_write(sd, lconfig->ypos >> 1, OSD_OSDWIN0YP);
+			osd_write(sd, lconfig->ysize >> 1, OSD_OSDWIN0YL);
+		} else {
+			osd_write(sd, lconfig->ypos, OSD_OSDWIN0YP);
+			osd_write(sd, lconfig->ysize, OSD_OSDWIN0YL);
+		}
+		break;
+	case WIN_VID0:
+		winmd_mask |= OSD_VIDWINMD_VFF0;
+		if (lconfig->interlaced)
+			winmd |= OSD_VIDWINMD_VFF0;
+
+		osd_modify(sd, winmd_mask, winmd, OSD_VIDWINMD);
+		osd_write(sd, lconfig->line_length >> 5, OSD_VIDWIN0OFST);
+		osd_write(sd, lconfig->xpos, OSD_VIDWIN0XP);
+		osd_write(sd, lconfig->xsize, OSD_VIDWIN0XL);
+		/*
+		 * For YUV420P format the register contents are
+		 * duplicated in both VID registers
+		 */
+		if ((sd->vpbe_type == VPBE_VERSION_2) &&
+				(lconfig->pixfmt == PIXFMT_NV12)) {
+			/* other window also */
+			if (lconfig->interlaced) {
+				winmd_mask |= OSD_VIDWINMD_VFF1;
+				winmd |= OSD_VIDWINMD_VFF1;
+				osd_modify(sd, winmd_mask, winmd,
+					  OSD_VIDWINMD);
+			}
+
+			osd_modify(sd, OSD_MISCCTL_S420D,
+				    OSD_MISCCTL_S420D, OSD_MISCCTL);
+			osd_write(sd, lconfig->line_length >> 5,
+				  OSD_VIDWIN1OFST);
+			osd_write(sd, lconfig->xpos, OSD_VIDWIN1XP);
+			osd_write(sd, lconfig->xsize, OSD_VIDWIN1XL);
+			/*
+			  * if NV21 pixfmt and line length not 32B
+			  * aligned (e.g. NTSC), Need to set window
+			  * X pixel size to be 32B aligned as well
+			  */
+			if (lconfig->xsize % 32) {
+				osd_write(sd,
+					  ((lconfig->xsize + 31) & ~31),
+					  OSD_VIDWIN1XL);
+				osd_write(sd,
+					  ((lconfig->xsize + 31) & ~31),
+					  OSD_VIDWIN0XL);
+			}
+		} else if ((sd->vpbe_type == VPBE_VERSION_2) &&
+				(lconfig->pixfmt != PIXFMT_NV12)) {
+			osd_modify(sd, OSD_MISCCTL_S420D, ~OSD_MISCCTL_S420D,
+						OSD_MISCCTL);
+		}
+
+		if (lconfig->interlaced) {
+			osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN0YP);
+			osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN0YL);
+			if ((sd->vpbe_type == VPBE_VERSION_2) &&
+				lconfig->pixfmt == PIXFMT_NV12) {
+				osd_write(sd, lconfig->ypos >> 1,
+					  OSD_VIDWIN1YP);
+				osd_write(sd, lconfig->ysize >> 1,
+					  OSD_VIDWIN1YL);
+			}
+		} else {
+			osd_write(sd, lconfig->ypos, OSD_VIDWIN0YP);
+			osd_write(sd, lconfig->ysize, OSD_VIDWIN0YL);
+			if ((sd->vpbe_type == VPBE_VERSION_2) &&
+				lconfig->pixfmt == PIXFMT_NV12) {
+				osd_write(sd, lconfig->ypos, OSD_VIDWIN1YP);
+				osd_write(sd, lconfig->ysize, OSD_VIDWIN1YL);
+			}
+		}
+		break;
+	case WIN_OSD1:
+		/*
+		 * The caller must ensure that OSD1 is disabled prior to
+		 * switching from a normal mode to attribute mode or from
+		 * attribute mode to a normal mode.
+		 */
+		if (lconfig->pixfmt == PIXFMT_OSD_ATTR) {
+			if (sd->vpbe_type == VPBE_VERSION_1) {
+				winmd_mask |= OSD_OSDWIN1MD_ATN1E |
+				OSD_OSDWIN1MD_RGB1E | OSD_OSDWIN1MD_CLUTS1 |
+				OSD_OSDWIN1MD_BLND1 | OSD_OSDWIN1MD_TE1;
+			} else {
+				winmd_mask |= OSD_OSDWIN1MD_BMP1MD |
+				OSD_OSDWIN1MD_CLUTS1 | OSD_OSDWIN1MD_BLND1 |
+				OSD_OSDWIN1MD_TE1;
+			}
+		} else {
+			if (sd->vpbe_type == VPBE_VERSION_1) {
+				winmd_mask |= OSD_OSDWIN1MD_RGB1E;
+				if (lconfig->pixfmt == PIXFMT_RGB565)
+					winmd |= OSD_OSDWIN1MD_RGB1E;
+			} else if ((sd->vpbe_type == VPBE_VERSION_3)
+				   || (sd->vpbe_type == VPBE_VERSION_2)) {
+				winmd_mask |= OSD_OSDWIN1MD_BMP1MD;
+				switch (lconfig->pixfmt) {
+				case PIXFMT_RGB565:
+					winmd |=
+					    (1 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
+					break;
+				case PIXFMT_RGB888:
+					winmd |=
+					    (2 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
+					_osd_enable_rgb888_pixblend(sd,
+							OSDWIN_OSD1);
+					break;
+				case PIXFMT_YCbCrI:
+				case PIXFMT_YCrCbI:
+					winmd |=
+					    (3 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
+					break;
+				default:
+					break;
+				}
+			}
+
+			winmd_mask |= OSD_OSDWIN1MD_BMW1;
+			switch (lconfig->pixfmt) {
+			case PIXFMT_1BPP:
+				bmw = 0;
+				break;
+			case PIXFMT_2BPP:
+				bmw = 1;
+				break;
+			case PIXFMT_4BPP:
+				bmw = 2;
+				break;
+			case PIXFMT_8BPP:
+				bmw = 3;
+				break;
+			default:
+				break;
+			}
+			winmd |= (bmw << OSD_OSDWIN1MD_BMW1_SHIFT);
+		}
+
+		winmd_mask |= OSD_OSDWIN1MD_OFF1;
+		if (lconfig->interlaced)
+			winmd |= OSD_OSDWIN1MD_OFF1;
+
+		osd_modify(sd, winmd_mask, winmd, OSD_OSDWIN1MD);
+		osd_write(sd, lconfig->line_length >> 5, OSD_OSDWIN1OFST);
+		osd_write(sd, lconfig->xpos, OSD_OSDWIN1XP);
+		osd_write(sd, lconfig->xsize, OSD_OSDWIN1XL);
+		if (lconfig->interlaced) {
+			osd_write(sd, lconfig->ypos >> 1, OSD_OSDWIN1YP);
+			osd_write(sd, lconfig->ysize >> 1, OSD_OSDWIN1YL);
+		} else {
+			osd_write(sd, lconfig->ypos, OSD_OSDWIN1YP);
+			osd_write(sd, lconfig->ysize, OSD_OSDWIN1YL);
+		}
+		break;
+	case WIN_VID1:
+		winmd_mask |= OSD_VIDWINMD_VFF1;
+		if (lconfig->interlaced)
+			winmd |= OSD_VIDWINMD_VFF1;
+
+		osd_modify(sd, winmd_mask, winmd, OSD_VIDWINMD);
+		osd_write(sd, lconfig->line_length >> 5, OSD_VIDWIN1OFST);
+		osd_write(sd, lconfig->xpos, OSD_VIDWIN1XP);
+		osd_write(sd, lconfig->xsize, OSD_VIDWIN1XL);
+		/*
+		 * For YUV420P format the register contents are
+		 * duplicated in both VID registers
+		 */
+		if (sd->vpbe_type == VPBE_VERSION_2) {
+			if (lconfig->pixfmt == PIXFMT_NV12) {
+				/* other window also */
+				if (lconfig->interlaced) {
+					winmd_mask |= OSD_VIDWINMD_VFF0;
+					winmd |= OSD_VIDWINMD_VFF0;
+					osd_modify(sd, winmd_mask, winmd,
+						  OSD_VIDWINMD);
+				}
+				osd_modify(sd, OSD_MISCCTL_S420D,
+					   OSD_MISCCTL_S420D, OSD_MISCCTL);
+				osd_write(sd, lconfig->line_length >> 5,
+					  OSD_VIDWIN0OFST);
+				osd_write(sd, lconfig->xpos, OSD_VIDWIN0XP);
+				osd_write(sd, lconfig->xsize, OSD_VIDWIN0XL);
+			} else {
+				osd_modify(sd, OSD_MISCCTL_S420D,
+					   ~OSD_MISCCTL_S420D, OSD_MISCCTL);
+			}
+		}
+
+		if (lconfig->interlaced) {
+			osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN1YP);
+			osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN1YL);
+			if ((sd->vpbe_type == VPBE_VERSION_2) &&
+				lconfig->pixfmt == PIXFMT_NV12) {
+				osd_write(sd, lconfig->ypos >> 1,
+					  OSD_VIDWIN0YP);
+				osd_write(sd, lconfig->ysize >> 1,
+					  OSD_VIDWIN0YL);
+			}
+		} else {
+			osd_write(sd, lconfig->ypos, OSD_VIDWIN1YP);
+			osd_write(sd, lconfig->ysize, OSD_VIDWIN1YL);
+			if ((sd->vpbe_type == VPBE_VERSION_2) &&
+				lconfig->pixfmt == PIXFMT_NV12) {
+				osd_write(sd, lconfig->ypos, OSD_VIDWIN0YP);
+				osd_write(sd, lconfig->ysize, OSD_VIDWIN0YL);
+			}
+		}
+		break;
+	}
+}
+
+static int osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
+				struct osd_layer_config *lconfig)
+{
+	struct osd_state *osd = sd;
+	struct osd_window_state *win = &osd->win[layer];
+	struct osd_layer_config *cfg = &win->lconfig;
+	unsigned long flags;
+	int reject_config;
+
+	spin_lock_irqsave(&osd->lock, flags);
+
+	reject_config = try_layer_config(sd, layer, lconfig);
+	if (reject_config) {
+		spin_unlock_irqrestore(&osd->lock, flags);
+		return reject_config;
+	}
+
+	/* update the current Cb/Cr order */
+	if (is_yc_pixfmt(lconfig->pixfmt))
+		osd->yc_pixfmt = lconfig->pixfmt;
+
+	/*
+	 * If we are switching OSD1 from normal mode to attribute mode or from
+	 * attribute mode to normal mode, then we must disable the window.
+	 */
+	if (layer == WIN_OSD1) {
+		if (((lconfig->pixfmt == PIXFMT_OSD_ATTR) &&
+		  (cfg->pixfmt != PIXFMT_OSD_ATTR)) ||
+		  ((lconfig->pixfmt != PIXFMT_OSD_ATTR) &&
+		  (cfg->pixfmt == PIXFMT_OSD_ATTR))) {
+			win->is_enabled = 0;
+			_osd_disable_layer(sd, layer);
+		}
+	}
+
+	_osd_set_layer_config(sd, layer, lconfig);
+
+	if (layer == WIN_OSD1) {
+		struct osd_osdwin_state *osdwin_state =
+		    &osd->osdwin[OSDWIN_OSD1];
+
+		if ((lconfig->pixfmt != PIXFMT_OSD_ATTR) &&
+		  (cfg->pixfmt == PIXFMT_OSD_ATTR)) {
+			/*
+			 * We just switched OSD1 from attribute mode to normal
+			 * mode, so we must initialize the CLUT select, the
+			 * blend factor, transparency colorkey enable, and
+			 * attenuation enable (DM6446 only) bits in the
+			 * OSDWIN1MD register.
+			 */
+			_osd_set_osd_clut(sd, OSDWIN_OSD1,
+						   osdwin_state->clut);
+			_osd_set_blending_factor(sd, OSDWIN_OSD1,
+							  osdwin_state->blend);
+			if (osdwin_state->colorkey_blending) {
+				_osd_enable_color_key(sd, OSDWIN_OSD1,
+							       osdwin_state->
+							       colorkey,
+							       lconfig->pixfmt);
+			} else
+				_osd_disable_color_key(sd, OSDWIN_OSD1);
+			_osd_set_rec601_attenuation(sd, OSDWIN_OSD1,
+						    osdwin_state->
+						    rec601_attenuation);
+		} else if ((lconfig->pixfmt == PIXFMT_OSD_ATTR) &&
+		  (cfg->pixfmt != PIXFMT_OSD_ATTR)) {
+			/*
+			 * We just switched OSD1 from normal mode to attribute
+			 * mode, so we must initialize the blink enable and
+			 * blink interval bits in the OSDATRMD register.
+			 */
+			_osd_set_blink_attribute(sd, osd->is_blinking,
+							  osd->blink);
+		}
+	}
+
+	/*
+	 * If we just switched to a 1-, 2-, or 4-bits-per-pixel bitmap format
+	 * then configure a default palette map.
+	 */
+	if ((lconfig->pixfmt != cfg->pixfmt) &&
+	  ((lconfig->pixfmt == PIXFMT_1BPP) ||
+	  (lconfig->pixfmt == PIXFMT_2BPP) ||
+	  (lconfig->pixfmt == PIXFMT_4BPP))) {
+		enum osd_win_layer osdwin =
+		    ((layer == WIN_OSD0) ? OSDWIN_OSD0 : OSDWIN_OSD1);
+		struct osd_osdwin_state *osdwin_state =
+		    &osd->osdwin[osdwin];
+		unsigned char clut_index;
+		unsigned char clut_entries = 0;
+
+		switch (lconfig->pixfmt) {
+		case PIXFMT_1BPP:
+			clut_entries = 2;
+			break;
+		case PIXFMT_2BPP:
+			clut_entries = 4;
+			break;
+		case PIXFMT_4BPP:
+			clut_entries = 16;
+			break;
+		default:
+			break;
+		}
+		/*
+		 * The default palette map maps the pixel value to the clut
+		 * index, i.e. pixel value 0 maps to clut entry 0, pixel value
+		 * 1 maps to clut entry 1, etc.
+		 */
+		for (clut_index = 0; clut_index < 16; clut_index++) {
+			osdwin_state->palette_map[clut_index] = clut_index;
+			if (clut_index < clut_entries) {
+				_osd_set_palette_map(sd, osdwin, clut_index,
+						     clut_index,
+						     lconfig->pixfmt);
+			}
+		}
+	}
+
+	*cfg = *lconfig;
+	/* DM6446: configure the RGB888 enable and window selection */
+	if (osd->win[WIN_VID0].lconfig.pixfmt == PIXFMT_RGB888)
+		_osd_enable_vid_rgb888(sd, WIN_VID0);
+	else if (osd->win[WIN_VID1].lconfig.pixfmt == PIXFMT_RGB888)
+		_osd_enable_vid_rgb888(sd, WIN_VID1);
+	else
+		_osd_disable_vid_rgb888(sd);
+
+	if (layer == WIN_VID0) {
+		osd->pingpong =
+		    _osd_dm6446_vid0_pingpong(sd, osd->field_inversion,
+						       win->fb_base_phys,
+						       cfg);
+	}
+
+	spin_unlock_irqrestore(&osd->lock, flags);
+
+	return 0;
+}
+
+static void osd_init_layer(struct osd_state *sd, enum osd_layer layer)
+{
+	struct osd_state *osd = sd;
+	struct osd_window_state *win = &osd->win[layer];
+	enum osd_win_layer osdwin;
+	struct osd_osdwin_state *osdwin_state;
+	struct osd_layer_config *cfg = &win->lconfig;
+	unsigned long flags;
+
+	spin_lock_irqsave(&osd->lock, flags);
+
+	win->is_enabled = 0;
+	_osd_disable_layer(sd, layer);
+
+	win->h_zoom = ZOOM_X1;
+	win->v_zoom = ZOOM_X1;
+	_osd_set_zoom(sd, layer, win->h_zoom, win->v_zoom);
+
+	win->fb_base_phys = 0;
+	_osd_start_layer(sd, layer, win->fb_base_phys, 0);
+
+	cfg->line_length = 0;
+	cfg->xsize = 0;
+	cfg->ysize = 0;
+	cfg->xpos = 0;
+	cfg->ypos = 0;
+	cfg->interlaced = 0;
+	switch (layer) {
+	case WIN_OSD0:
+	case WIN_OSD1:
+		osdwin = (layer == WIN_OSD0) ? OSDWIN_OSD0 : OSDWIN_OSD1;
+		osdwin_state = &osd->osdwin[osdwin];
+		/*
+		 * Other code relies on the fact that OSD windows default to a
+		 * bitmap pixel format when they are deallocated, so don't
+		 * change this default pixel format.
+		 */
+		cfg->pixfmt = PIXFMT_8BPP;
+		_osd_set_layer_config(sd, layer, cfg);
+		osdwin_state->clut = RAM_CLUT;
+		_osd_set_osd_clut(sd, osdwin, osdwin_state->clut);
+		osdwin_state->colorkey_blending = 0;
+		_osd_disable_color_key(sd, osdwin);
+		osdwin_state->blend = OSD_8_VID_0;
+		_osd_set_blending_factor(sd, osdwin, osdwin_state->blend);
+		osdwin_state->rec601_attenuation = 0;
+		_osd_set_rec601_attenuation(sd, osdwin,
+						     osdwin_state->
+						     rec601_attenuation);
+		if (osdwin == OSDWIN_OSD1) {
+			osd->is_blinking = 0;
+			osd->blink = BLINK_X1;
+		}
+		break;
+	case WIN_VID0:
+	case WIN_VID1:
+		cfg->pixfmt = osd->yc_pixfmt;
+		_osd_set_layer_config(sd, layer, cfg);
+		break;
+	}
+
+	spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void osd_release_layer(struct osd_state *sd, enum osd_layer layer)
+{
+	struct osd_state *osd = sd;
+	struct osd_window_state *win = &osd->win[layer];
+	unsigned long flags;
+
+	spin_lock_irqsave(&osd->lock, flags);
+
+	if (!win->is_allocated) {
+		spin_unlock_irqrestore(&osd->lock, flags);
+		return;
+	}
+
+	spin_unlock_irqrestore(&osd->lock, flags);
+	osd_init_layer(sd, layer);
+	spin_lock_irqsave(&osd->lock, flags);
+
+	win->is_allocated = 0;
+
+	spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static int osd_request_layer(struct osd_state *sd, enum osd_layer layer)
+{
+	struct osd_state *osd = sd;
+	struct osd_window_state *win = &osd->win[layer];
+	unsigned long flags;
+
+	spin_lock_irqsave(&osd->lock, flags);
+
+	if (win->is_allocated) {
+		spin_unlock_irqrestore(&osd->lock, flags);
+		return -1;
+	}
+	win->is_allocated = 1;
+
+	spin_unlock_irqrestore(&osd->lock, flags);
+
+	return 0;
+}
+
+static void _osd_init(struct osd_state *sd)
+{
+	osd_write(sd, 0, OSD_MODE);
+	osd_write(sd, 0, OSD_VIDWINMD);
+	osd_write(sd, 0, OSD_OSDWIN0MD);
+	osd_write(sd, 0, OSD_OSDWIN1MD);
+	osd_write(sd, 0, OSD_RECTCUR);
+	osd_write(sd, 0, OSD_MISCCTL);
+	if (sd->vpbe_type == VPBE_VERSION_3) {
+		osd_write(sd, 0, OSD_VBNDRY);
+		osd_write(sd, 0, OSD_EXTMODE);
+		osd_write(sd, OSD_MISCCTL_DMANG, OSD_MISCCTL);
+	}
+}
+
+static void osd_set_left_margin(struct osd_state *sd, u32 val)
+{
+	osd_write(sd, val, OSD_BASEPX);
+}
+
+static void osd_set_top_margin(struct osd_state *sd, u32 val)
+{
+	osd_write(sd, val, OSD_BASEPY);
+}
+
+static int osd_initialize(struct osd_state *osd)
+{
+	if (osd == NULL)
+		return -ENODEV;
+	_osd_init(osd);
+
+	/* set default Cb/Cr order */
+	osd->yc_pixfmt = PIXFMT_YCbCrI;
+
+	if (osd->vpbe_type == VPBE_VERSION_3) {
+		/*
+		 * ROM CLUT1 on the DM355 is similar (identical?) to ROM CLUT0
+		 * on the DM6446, so make ROM_CLUT1 the default on the DM355.
+		 */
+		osd->rom_clut = ROM_CLUT1;
+	}
+
+	_osd_set_field_inversion(osd, osd->field_inversion);
+	_osd_set_rom_clut(osd, osd->rom_clut);
+
+	osd_init_layer(osd, WIN_OSD0);
+	osd_init_layer(osd, WIN_VID0);
+	osd_init_layer(osd, WIN_OSD1);
+	osd_init_layer(osd, WIN_VID1);
+
+	return 0;
+}
+
+static const struct vpbe_osd_ops osd_ops = {
+	.initialize = osd_initialize,
+	.request_layer = osd_request_layer,
+	.release_layer = osd_release_layer,
+	.enable_layer = osd_enable_layer,
+	.disable_layer = osd_disable_layer,
+	.set_layer_config = osd_set_layer_config,
+	.get_layer_config = osd_get_layer_config,
+	.start_layer = osd_start_layer,
+	.set_left_margin = osd_set_left_margin,
+	.set_top_margin = osd_set_top_margin,
+};
+
+static int osd_probe(struct platform_device *pdev)
+{
+	struct osd_platform_data *pdata;
+	struct osd_state *osd;
+	struct resource *res;
+	int ret = 0;
+
+	osd = kzalloc(sizeof(struct osd_state), GFP_KERNEL);
+	if (osd == NULL)
+		return -ENOMEM;
+
+	osd->dev = &pdev->dev;
+	pdata = (struct osd_platform_data *)pdev->dev.platform_data;
+	osd->vpbe_type = (enum vpbe_version)pdata->vpbe_type;
+	if (NULL == pdev->dev.platform_data) {
+		dev_err(osd->dev, "No platform data defined for OSD"
+			" sub device\n");
+		ret = -ENOENT;
+		goto free_mem;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(osd->dev, "Unable to get OSD register address map\n");
+		ret = -ENODEV;
+		goto free_mem;
+	}
+	osd->osd_base_phys = res->start;
+	osd->osd_size = resource_size(res);
+	if (!request_mem_region(osd->osd_base_phys, osd->osd_size,
+				MODULE_NAME)) {
+		dev_err(osd->dev, "Unable to reserve OSD MMIO region\n");
+		ret = -ENODEV;
+		goto free_mem;
+	}
+	osd->osd_base = (unsigned long)ioremap_nocache(res->start,
+							osd->osd_size);
+	if (!osd->osd_base) {
+		dev_err(osd->dev, "Unable to map the OSD region\n");
+		ret = -ENODEV;
+		goto release_mem_region;
+	}
+	spin_lock_init(&osd->lock);
+	osd->ops = osd_ops;
+	platform_set_drvdata(pdev, osd);
+	dev_notice(osd->dev, "OSD sub device probe success\n");
+	return ret;
+
+release_mem_region:
+	release_mem_region(osd->osd_base_phys, osd->osd_size);
+free_mem:
+	kfree(osd);
+	return ret;
+}
+
+static int osd_remove(struct platform_device *pdev)
+{
+	struct osd_state *osd = platform_get_drvdata(pdev);
+
+	iounmap((void *)osd->osd_base);
+	release_mem_region(osd->osd_base_phys, osd->osd_size);
+	kfree(osd);
+	return 0;
+}
+
+static struct platform_driver osd_driver = {
+	.probe		= osd_probe,
+	.remove		= osd_remove,
+	.driver		= {
+		.name	= MODULE_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+module_platform_driver(osd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DaVinci OSD Manager Driver");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/platform/davinci/vpbe_osd_regs.h b/drivers/media/platform/davinci/vpbe_osd_regs.h
new file mode 100644
index 0000000..584520f
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_osd_regs.h
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2006-2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _VPBE_OSD_REGS_H
+#define _VPBE_OSD_REGS_H
+
+/* VPBE Global Registers */
+#define VPBE_PID				0x0
+#define VPBE_PCR				0x4
+
+/* VPSS CLock Registers */
+#define VPSSCLK_PID				0x00
+#define VPSSCLK_CLKCTRL				0x04
+
+/* VPSS Buffer Logic Registers */
+#define VPSSBL_PID				0x00
+#define VPSSBL_PCR				0x04
+#define VPSSBL_BCR				0x08
+#define VPSSBL_INTSTAT				0x0C
+#define VPSSBL_INTSEL				0x10
+#define VPSSBL_EVTSEL				0x14
+#define VPSSBL_MEMCTRL				0x18
+#define VPSSBL_CCDCMUX				0x1C
+
+/* DM365 ISP5 system configuration */
+#define ISP5_PID				0x0
+#define ISP5_PCCR				0x4
+#define ISP5_BCR				0x8
+#define ISP5_INTSTAT				0xC
+#define ISP5_INTSEL1				0x10
+#define ISP5_INTSEL2				0x14
+#define ISP5_INTSEL3				0x18
+#define ISP5_EVTSEL				0x1c
+#define ISP5_CCDCMUX				0x20
+
+/* VPBE On-Screen Display Subsystem Registers (OSD) */
+#define OSD_MODE				0x00
+#define OSD_VIDWINMD				0x04
+#define OSD_OSDWIN0MD				0x08
+#define OSD_OSDWIN1MD				0x0C
+#define OSD_OSDATRMD				0x0C
+#define OSD_RECTCUR				0x10
+#define OSD_VIDWIN0OFST				0x18
+#define OSD_VIDWIN1OFST				0x1C
+#define OSD_OSDWIN0OFST				0x20
+#define OSD_OSDWIN1OFST				0x24
+#define OSD_VIDWINADH				0x28
+#define OSD_VIDWIN0ADL				0x2C
+#define OSD_VIDWIN0ADR				0x2C
+#define OSD_VIDWIN1ADL				0x30
+#define OSD_VIDWIN1ADR				0x30
+#define OSD_OSDWINADH				0x34
+#define OSD_OSDWIN0ADL				0x38
+#define OSD_OSDWIN0ADR				0x38
+#define OSD_OSDWIN1ADL				0x3C
+#define OSD_OSDWIN1ADR				0x3C
+#define OSD_BASEPX				0x40
+#define OSD_BASEPY				0x44
+#define OSD_VIDWIN0XP				0x48
+#define OSD_VIDWIN0YP				0x4C
+#define OSD_VIDWIN0XL				0x50
+#define OSD_VIDWIN0YL				0x54
+#define OSD_VIDWIN1XP				0x58
+#define OSD_VIDWIN1YP				0x5C
+#define OSD_VIDWIN1XL				0x60
+#define OSD_VIDWIN1YL				0x64
+#define OSD_OSDWIN0XP				0x68
+#define OSD_OSDWIN0YP				0x6C
+#define OSD_OSDWIN0XL				0x70
+#define OSD_OSDWIN0YL				0x74
+#define OSD_OSDWIN1XP				0x78
+#define OSD_OSDWIN1YP				0x7C
+#define OSD_OSDWIN1XL				0x80
+#define OSD_OSDWIN1YL				0x84
+#define OSD_CURXP				0x88
+#define OSD_CURYP				0x8C
+#define OSD_CURXL				0x90
+#define OSD_CURYL				0x94
+#define OSD_W0BMP01				0xA0
+#define OSD_W0BMP23				0xA4
+#define OSD_W0BMP45				0xA8
+#define OSD_W0BMP67				0xAC
+#define OSD_W0BMP89				0xB0
+#define OSD_W0BMPAB				0xB4
+#define OSD_W0BMPCD				0xB8
+#define OSD_W0BMPEF				0xBC
+#define OSD_W1BMP01				0xC0
+#define OSD_W1BMP23				0xC4
+#define OSD_W1BMP45				0xC8
+#define OSD_W1BMP67				0xCC
+#define OSD_W1BMP89				0xD0
+#define OSD_W1BMPAB				0xD4
+#define OSD_W1BMPCD				0xD8
+#define OSD_W1BMPEF				0xDC
+#define OSD_VBNDRY				0xE0
+#define OSD_EXTMODE				0xE4
+#define OSD_MISCCTL				0xE8
+#define OSD_CLUTRAMYCB				0xEC
+#define OSD_CLUTRAMCR				0xF0
+#define OSD_TRANSPVAL				0xF4
+#define OSD_TRANSPVALL				0xF4
+#define OSD_TRANSPVALU				0xF8
+#define OSD_TRANSPBMPIDX			0xFC
+#define OSD_PPVWIN0ADR				0xFC
+
+/* bit definitions */
+#define VPBE_PCR_VENC_DIV			(1 << 1)
+#define VPBE_PCR_CLK_OFF			(1 << 0)
+
+#define VPSSBL_INTSTAT_HSSIINT			(1 << 14)
+#define VPSSBL_INTSTAT_CFALDINT			(1 << 13)
+#define VPSSBL_INTSTAT_IPIPE_INT5		(1 << 12)
+#define VPSSBL_INTSTAT_IPIPE_INT4		(1 << 11)
+#define VPSSBL_INTSTAT_IPIPE_INT3		(1 << 10)
+#define VPSSBL_INTSTAT_IPIPE_INT2		(1 << 9)
+#define VPSSBL_INTSTAT_IPIPE_INT1		(1 << 8)
+#define VPSSBL_INTSTAT_IPIPE_INT0		(1 << 7)
+#define VPSSBL_INTSTAT_IPIPEIFINT		(1 << 6)
+#define VPSSBL_INTSTAT_OSDINT			(1 << 5)
+#define VPSSBL_INTSTAT_VENCINT			(1 << 4)
+#define VPSSBL_INTSTAT_H3AINT			(1 << 3)
+#define VPSSBL_INTSTAT_CCDC_VDINT2		(1 << 2)
+#define VPSSBL_INTSTAT_CCDC_VDINT1		(1 << 1)
+#define VPSSBL_INTSTAT_CCDC_VDINT0		(1 << 0)
+
+/* DM365 ISP5 bit definitions */
+#define ISP5_INTSTAT_VENCINT			(1 << 21)
+#define ISP5_INTSTAT_OSDINT			(1 << 20)
+
+/* VMOD TVTYP options for HDMD=0 */
+#define SDTV_NTSC				0
+#define SDTV_PAL				1
+/* VMOD TVTYP options for HDMD=1 */
+#define HDTV_525P				0
+#define HDTV_625P				1
+#define HDTV_1080I				2
+#define HDTV_720P				3
+
+#define OSD_MODE_CS				(1 << 15)
+#define OSD_MODE_OVRSZ				(1 << 14)
+#define OSD_MODE_OHRSZ				(1 << 13)
+#define OSD_MODE_EF				(1 << 12)
+#define OSD_MODE_VVRSZ				(1 << 11)
+#define OSD_MODE_VHRSZ				(1 << 10)
+#define OSD_MODE_FSINV				(1 << 9)
+#define OSD_MODE_BCLUT				(1 << 8)
+#define OSD_MODE_CABG_SHIFT			0
+#define OSD_MODE_CABG				(0xff << 0)
+
+#define OSD_VIDWINMD_VFINV			(1 << 15)
+#define OSD_VIDWINMD_V1EFC			(1 << 14)
+#define OSD_VIDWINMD_VHZ1_SHIFT			12
+#define OSD_VIDWINMD_VHZ1			(3 << 12)
+#define OSD_VIDWINMD_VVZ1_SHIFT			10
+#define OSD_VIDWINMD_VVZ1			(3 << 10)
+#define OSD_VIDWINMD_VFF1			(1 << 9)
+#define OSD_VIDWINMD_ACT1			(1 << 8)
+#define OSD_VIDWINMD_V0EFC			(1 << 6)
+#define OSD_VIDWINMD_VHZ0_SHIFT			4
+#define OSD_VIDWINMD_VHZ0			(3 << 4)
+#define OSD_VIDWINMD_VVZ0_SHIFT			2
+#define OSD_VIDWINMD_VVZ0			(3 << 2)
+#define OSD_VIDWINMD_VFF0			(1 << 1)
+#define OSD_VIDWINMD_ACT0			(1 << 0)
+
+#define OSD_OSDWIN0MD_ATN0E			(1 << 14)
+#define OSD_OSDWIN0MD_RGB0E			(1 << 13)
+#define OSD_OSDWIN0MD_BMP0MD_SHIFT		13
+#define OSD_OSDWIN0MD_BMP0MD			(3 << 13)
+#define OSD_OSDWIN0MD_CLUTS0			(1 << 12)
+#define OSD_OSDWIN0MD_OHZ0_SHIFT		10
+#define OSD_OSDWIN0MD_OHZ0			(3 << 10)
+#define OSD_OSDWIN0MD_OVZ0_SHIFT		8
+#define OSD_OSDWIN0MD_OVZ0			(3 << 8)
+#define OSD_OSDWIN0MD_BMW0_SHIFT		6
+#define OSD_OSDWIN0MD_BMW0			(3 << 6)
+#define OSD_OSDWIN0MD_BLND0_SHIFT		3
+#define OSD_OSDWIN0MD_BLND0			(7 << 3)
+#define OSD_OSDWIN0MD_TE0			(1 << 2)
+#define OSD_OSDWIN0MD_OFF0			(1 << 1)
+#define OSD_OSDWIN0MD_OACT0			(1 << 0)
+
+#define OSD_OSDWIN1MD_OASW			(1 << 15)
+#define OSD_OSDWIN1MD_ATN1E			(1 << 14)
+#define OSD_OSDWIN1MD_RGB1E			(1 << 13)
+#define OSD_OSDWIN1MD_BMP1MD_SHIFT		13
+#define OSD_OSDWIN1MD_BMP1MD			(3 << 13)
+#define OSD_OSDWIN1MD_CLUTS1			(1 << 12)
+#define OSD_OSDWIN1MD_OHZ1_SHIFT		10
+#define OSD_OSDWIN1MD_OHZ1			(3 << 10)
+#define OSD_OSDWIN1MD_OVZ1_SHIFT		8
+#define OSD_OSDWIN1MD_OVZ1			(3 << 8)
+#define OSD_OSDWIN1MD_BMW1_SHIFT		6
+#define OSD_OSDWIN1MD_BMW1			(3 << 6)
+#define OSD_OSDWIN1MD_BLND1_SHIFT		3
+#define OSD_OSDWIN1MD_BLND1			(7 << 3)
+#define OSD_OSDWIN1MD_TE1			(1 << 2)
+#define OSD_OSDWIN1MD_OFF1			(1 << 1)
+#define OSD_OSDWIN1MD_OACT1			(1 << 0)
+
+#define OSD_OSDATRMD_OASW			(1 << 15)
+#define OSD_OSDATRMD_OHZA_SHIFT			10
+#define OSD_OSDATRMD_OHZA			(3 << 10)
+#define OSD_OSDATRMD_OVZA_SHIFT			8
+#define OSD_OSDATRMD_OVZA			(3 << 8)
+#define OSD_OSDATRMD_BLNKINT_SHIFT		6
+#define OSD_OSDATRMD_BLNKINT			(3 << 6)
+#define OSD_OSDATRMD_OFFA			(1 << 1)
+#define OSD_OSDATRMD_BLNK			(1 << 0)
+
+#define OSD_RECTCUR_RCAD_SHIFT			8
+#define OSD_RECTCUR_RCAD			(0xff << 8)
+#define OSD_RECTCUR_CLUTSR			(1 << 7)
+#define OSD_RECTCUR_RCHW_SHIFT			4
+#define OSD_RECTCUR_RCHW			(7 << 4)
+#define OSD_RECTCUR_RCVW_SHIFT			1
+#define OSD_RECTCUR_RCVW			(7 << 1)
+#define OSD_RECTCUR_RCACT			(1 << 0)
+
+#define OSD_VIDWIN0OFST_V0LO			(0x1ff << 0)
+
+#define OSD_VIDWIN1OFST_V1LO			(0x1ff << 0)
+
+#define OSD_OSDWIN0OFST_O0LO			(0x1ff << 0)
+
+#define OSD_OSDWIN1OFST_O1LO			(0x1ff << 0)
+
+#define OSD_WINOFST_AH_SHIFT			9
+
+#define OSD_VIDWIN0OFST_V0AH			(0xf << 9)
+#define OSD_VIDWIN1OFST_V1AH			(0xf << 9)
+#define OSD_OSDWIN0OFST_O0AH			(0xf << 9)
+#define OSD_OSDWIN1OFST_O1AH			(0xf << 9)
+
+#define OSD_VIDWINADH_V1AH_SHIFT		8
+#define OSD_VIDWINADH_V1AH			(0x7f << 8)
+#define OSD_VIDWINADH_V0AH_SHIFT		0
+#define OSD_VIDWINADH_V0AH			(0x7f << 0)
+
+#define OSD_VIDWIN0ADL_V0AL			(0xffff << 0)
+
+#define OSD_VIDWIN1ADL_V1AL			(0xffff << 0)
+
+#define OSD_OSDWINADH_O1AH_SHIFT		8
+#define OSD_OSDWINADH_O1AH			(0x7f << 8)
+#define OSD_OSDWINADH_O0AH_SHIFT		0
+#define OSD_OSDWINADH_O0AH			(0x7f << 0)
+
+#define OSD_OSDWIN0ADL_O0AL			(0xffff << 0)
+
+#define OSD_OSDWIN1ADL_O1AL			(0xffff << 0)
+
+#define OSD_BASEPX_BPX				(0x3ff << 0)
+
+#define OSD_BASEPY_BPY				(0x1ff << 0)
+
+#define OSD_VIDWIN0XP_V0X			(0x7ff << 0)
+
+#define OSD_VIDWIN0YP_V0Y			(0x7ff << 0)
+
+#define OSD_VIDWIN0XL_V0W			(0x7ff << 0)
+
+#define OSD_VIDWIN0YL_V0H			(0x7ff << 0)
+
+#define OSD_VIDWIN1XP_V1X			(0x7ff << 0)
+
+#define OSD_VIDWIN1YP_V1Y			(0x7ff << 0)
+
+#define OSD_VIDWIN1XL_V1W			(0x7ff << 0)
+
+#define OSD_VIDWIN1YL_V1H			(0x7ff << 0)
+
+#define OSD_OSDWIN0XP_W0X			(0x7ff << 0)
+
+#define OSD_OSDWIN0YP_W0Y			(0x7ff << 0)
+
+#define OSD_OSDWIN0XL_W0W			(0x7ff << 0)
+
+#define OSD_OSDWIN0YL_W0H			(0x7ff << 0)
+
+#define OSD_OSDWIN1XP_W1X			(0x7ff << 0)
+
+#define OSD_OSDWIN1YP_W1Y			(0x7ff << 0)
+
+#define OSD_OSDWIN1XL_W1W			(0x7ff << 0)
+
+#define OSD_OSDWIN1YL_W1H			(0x7ff << 0)
+
+#define OSD_CURXP_RCSX				(0x7ff << 0)
+
+#define OSD_CURYP_RCSY				(0x7ff << 0)
+
+#define OSD_CURXL_RCSW				(0x7ff << 0)
+
+#define OSD_CURYL_RCSH				(0x7ff << 0)
+
+#define OSD_EXTMODE_EXPMDSEL			(1 << 15)
+#define OSD_EXTMODE_SCRNHEXP_SHIFT		13
+#define OSD_EXTMODE_SCRNHEXP			(3 << 13)
+#define OSD_EXTMODE_SCRNVEXP			(1 << 12)
+#define OSD_EXTMODE_OSD1BLDCHR			(1 << 11)
+#define OSD_EXTMODE_OSD0BLDCHR			(1 << 10)
+#define OSD_EXTMODE_ATNOSD1EN			(1 << 9)
+#define OSD_EXTMODE_ATNOSD0EN			(1 << 8)
+#define OSD_EXTMODE_OSDHRSZ15			(1 << 7)
+#define OSD_EXTMODE_VIDHRSZ15			(1 << 6)
+#define OSD_EXTMODE_ZMFILV1HEN			(1 << 5)
+#define OSD_EXTMODE_ZMFILV1VEN			(1 << 4)
+#define OSD_EXTMODE_ZMFILV0HEN			(1 << 3)
+#define OSD_EXTMODE_ZMFILV0VEN			(1 << 2)
+#define OSD_EXTMODE_EXPFILHEN			(1 << 1)
+#define OSD_EXTMODE_EXPFILVEN			(1 << 0)
+
+#define OSD_MISCCTL_BLDSEL			(1 << 15)
+#define OSD_MISCCTL_S420D			(1 << 14)
+#define OSD_MISCCTL_BMAPT			(1 << 13)
+#define OSD_MISCCTL_DM365M			(1 << 12)
+#define OSD_MISCCTL_RGBEN			(1 << 7)
+#define OSD_MISCCTL_RGBWIN			(1 << 6)
+#define OSD_MISCCTL_DMANG			(1 << 6)
+#define OSD_MISCCTL_TMON			(1 << 5)
+#define OSD_MISCCTL_RSEL			(1 << 4)
+#define OSD_MISCCTL_CPBSY			(1 << 3)
+#define OSD_MISCCTL_PPSW			(1 << 2)
+#define OSD_MISCCTL_PPRV			(1 << 1)
+
+#define OSD_CLUTRAMYCB_Y_SHIFT			8
+#define OSD_CLUTRAMYCB_Y			(0xff << 8)
+#define OSD_CLUTRAMYCB_CB_SHIFT			0
+#define OSD_CLUTRAMYCB_CB			(0xff << 0)
+
+#define OSD_CLUTRAMCR_CR_SHIFT			8
+#define OSD_CLUTRAMCR_CR			(0xff << 8)
+#define OSD_CLUTRAMCR_CADDR_SHIFT		0
+#define OSD_CLUTRAMCR_CADDR			(0xff << 0)
+
+#define OSD_TRANSPVAL_RGBTRANS			(0xffff << 0)
+
+#define OSD_TRANSPVALL_RGBL			(0xffff << 0)
+
+#define OSD_TRANSPVALU_Y_SHIFT			8
+#define OSD_TRANSPVALU_Y			(0xff << 8)
+#define OSD_TRANSPVALU_RGBU_SHIFT		0
+#define OSD_TRANSPVALU_RGBU			(0xff << 0)
+
+#define OSD_TRANSPBMPIDX_BMP1_SHIFT		8
+#define OSD_TRANSPBMPIDX_BMP1			(0xff << 8)
+#define OSD_TRANSPBMPIDX_BMP0_SHIFT		0
+#define OSD_TRANSPBMPIDX_BMP0			0xff
+
+#endif				/* _DAVINCI_VPBE_H_ */
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
new file mode 100644
index 0000000..b21ecc8
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -0,0 +1,706 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+
+#include <mach/hardware.h>
+#include <mach/mux.h>
+#include <mach/i2c.h>
+
+#include <linux/io.h>
+
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe_venc.h>
+#include <media/davinci/vpss.h>
+#include <media/v4l2-device.h>
+
+#include "vpbe_venc_regs.h"
+
+#define MODULE_NAME	VPBE_VENC_SUBDEV_NAME
+
+static int debug = 2;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level 0-2");
+
+struct venc_state {
+	struct v4l2_subdev sd;
+	struct venc_callback *callback;
+	struct venc_platform_data *pdata;
+	struct device *pdev;
+	u32 output;
+	v4l2_std_id std;
+	spinlock_t lock;
+	void __iomem *venc_base;
+	void __iomem *vdaccfg_reg;
+};
+
+static inline struct venc_state *to_state(struct v4l2_subdev *sd)
+{
+	return container_of(sd, struct venc_state, sd);
+}
+
+static inline u32 venc_read(struct v4l2_subdev *sd, u32 offset)
+{
+	struct venc_state *venc = to_state(sd);
+
+	return readl(venc->venc_base + offset);
+}
+
+static inline u32 venc_write(struct v4l2_subdev *sd, u32 offset, u32 val)
+{
+	struct venc_state *venc = to_state(sd);
+
+	writel(val, (venc->venc_base + offset));
+
+	return val;
+}
+
+static inline u32 venc_modify(struct v4l2_subdev *sd, u32 offset,
+				 u32 val, u32 mask)
+{
+	u32 new_val = (venc_read(sd, offset) & ~mask) | (val & mask);
+
+	venc_write(sd, offset, new_val);
+
+	return new_val;
+}
+
+static inline u32 vdaccfg_write(struct v4l2_subdev *sd, u32 val)
+{
+	struct venc_state *venc = to_state(sd);
+
+	writel(val, venc->vdaccfg_reg);
+
+	val = readl(venc->vdaccfg_reg);
+
+	return val;
+}
+
+#define VDAC_COMPONENT	0x543
+#define VDAC_S_VIDEO	0x210
+/* This function sets the dac of the VPBE for various outputs
+ */
+static int venc_set_dac(struct v4l2_subdev *sd, u32 out_index)
+{
+	switch (out_index) {
+	case 0:
+		v4l2_dbg(debug, 1, sd, "Setting output to Composite\n");
+		venc_write(sd, VENC_DACSEL, 0);
+		break;
+	case 1:
+		v4l2_dbg(debug, 1, sd, "Setting output to Component\n");
+		venc_write(sd, VENC_DACSEL, VDAC_COMPONENT);
+		break;
+	case 2:
+		v4l2_dbg(debug, 1, sd, "Setting output to S-video\n");
+		venc_write(sd, VENC_DACSEL, VDAC_S_VIDEO);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable)
+{
+	struct venc_state *venc = to_state(sd);
+	struct venc_platform_data *pdata = venc->pdata;
+	v4l2_dbg(debug, 2, sd, "venc_enabledigitaloutput\n");
+
+	if (benable) {
+		venc_write(sd, VENC_VMOD, 0);
+		venc_write(sd, VENC_CVBS, 0);
+		venc_write(sd, VENC_LCDOUT, 0);
+		venc_write(sd, VENC_HSPLS, 0);
+		venc_write(sd, VENC_HSTART, 0);
+		venc_write(sd, VENC_HVALID, 0);
+		venc_write(sd, VENC_HINT, 0);
+		venc_write(sd, VENC_VSPLS, 0);
+		venc_write(sd, VENC_VSTART, 0);
+		venc_write(sd, VENC_VVALID, 0);
+		venc_write(sd, VENC_VINT, 0);
+		venc_write(sd, VENC_YCCCTL, 0);
+		venc_write(sd, VENC_DACSEL, 0);
+
+	} else {
+		venc_write(sd, VENC_VMOD, 0);
+		/* disable VCLK output pin enable */
+		venc_write(sd, VENC_VIDCTL, 0x141);
+
+		/* Disable output sync pins */
+		venc_write(sd, VENC_SYNCCTL, 0);
+
+		/* Disable DCLOCK */
+		venc_write(sd, VENC_DCLKCTL, 0);
+		venc_write(sd, VENC_DRGBX1, 0x0000057C);
+
+		/* Disable LCD output control (accepting default polarity) */
+		venc_write(sd, VENC_LCDOUT, 0);
+		if (pdata->venc_type != VPBE_VERSION_3)
+			venc_write(sd, VENC_CMPNT, 0x100);
+		venc_write(sd, VENC_HSPLS, 0);
+		venc_write(sd, VENC_HINT, 0);
+		venc_write(sd, VENC_HSTART, 0);
+		venc_write(sd, VENC_HVALID, 0);
+
+		venc_write(sd, VENC_VSPLS, 0);
+		venc_write(sd, VENC_VINT, 0);
+		venc_write(sd, VENC_VSTART, 0);
+		venc_write(sd, VENC_VVALID, 0);
+
+		venc_write(sd, VENC_HSDLY, 0);
+		venc_write(sd, VENC_VSDLY, 0);
+
+		venc_write(sd, VENC_YCCCTL, 0);
+		venc_write(sd, VENC_VSTARTA, 0);
+
+		/* Set OSD clock and OSD Sync Adavance registers */
+		venc_write(sd, VENC_OSDCLK0, 1);
+		venc_write(sd, VENC_OSDCLK1, 2);
+	}
+}
+
+#define VDAC_CONFIG_SD_V3	0x0E21A6B6
+#define VDAC_CONFIG_SD_V2	0x081141CF
+/*
+ * setting NTSC mode
+ */
+static int venc_set_ntsc(struct v4l2_subdev *sd)
+{
+	u32 val;
+	struct venc_state *venc = to_state(sd);
+	struct venc_platform_data *pdata = venc->pdata;
+
+	v4l2_dbg(debug, 2, sd, "venc_set_ntsc\n");
+
+	/* Setup clock at VPSS & VENC for SD */
+	vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
+	if (pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_525_60) < 0)
+		return -EINVAL;
+
+	venc_enabledigitaloutput(sd, 0);
+
+	if (pdata->venc_type == VPBE_VERSION_3) {
+		venc_write(sd, VENC_CLKCTL, 0x01);
+		venc_write(sd, VENC_VIDCTL, 0);
+		val = vdaccfg_write(sd, VDAC_CONFIG_SD_V3);
+	} else if (pdata->venc_type == VPBE_VERSION_2) {
+		venc_write(sd, VENC_CLKCTL, 0x01);
+		venc_write(sd, VENC_VIDCTL, 0);
+		vdaccfg_write(sd, VDAC_CONFIG_SD_V2);
+	} else {
+		/* to set VENC CLK DIV to 1 - final clock is 54 MHz */
+		venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
+		/* Set REC656 Mode */
+		venc_write(sd, VENC_YCCCTL, 0x1);
+		venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAFRQ);
+		venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAUPS);
+	}
+
+	venc_write(sd, VENC_VMOD, 0);
+	venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+			VENC_VMOD_VIE);
+	venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_VMD), VENC_VMOD_VMD);
+	venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_TVTYP_SHIFT),
+			VENC_VMOD_TVTYP);
+	venc_write(sd, VENC_DACTST, 0x0);
+	venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+	return 0;
+}
+
+/*
+ * setting PAL mode
+ */
+static int venc_set_pal(struct v4l2_subdev *sd)
+{
+	struct venc_state *venc = to_state(sd);
+	struct venc_platform_data *pdata = venc->pdata;
+
+	v4l2_dbg(debug, 2, sd, "venc_set_pal\n");
+
+	/* Setup clock at VPSS & VENC for SD */
+	vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
+	if (venc->pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_625_50) < 0)
+		return -EINVAL;
+
+	venc_enabledigitaloutput(sd, 0);
+
+	if (pdata->venc_type == VPBE_VERSION_3) {
+		venc_write(sd, VENC_CLKCTL, 0x1);
+		venc_write(sd, VENC_VIDCTL, 0);
+		vdaccfg_write(sd, VDAC_CONFIG_SD_V3);
+	} else if (pdata->venc_type == VPBE_VERSION_2) {
+		venc_write(sd, VENC_CLKCTL, 0x1);
+		venc_write(sd, VENC_VIDCTL, 0);
+		vdaccfg_write(sd, VDAC_CONFIG_SD_V2);
+	} else {
+		/* to set VENC CLK DIV to 1 - final clock is 54 MHz */
+		venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
+		/* Set REC656 Mode */
+		venc_write(sd, VENC_YCCCTL, 0x1);
+	}
+
+	venc_modify(sd, VENC_SYNCCTL, 1 << VENC_SYNCCTL_OVD_SHIFT,
+			VENC_SYNCCTL_OVD);
+	venc_write(sd, VENC_VMOD, 0);
+	venc_modify(sd, VENC_VMOD,
+			(1 << VENC_VMOD_VIE_SHIFT),
+			VENC_VMOD_VIE);
+	venc_modify(sd, VENC_VMOD,
+			(0 << VENC_VMOD_VMD), VENC_VMOD_VMD);
+	venc_modify(sd, VENC_VMOD,
+			(1 << VENC_VMOD_TVTYP_SHIFT),
+			VENC_VMOD_TVTYP);
+	venc_write(sd, VENC_DACTST, 0x0);
+	venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+	return 0;
+}
+
+#define VDAC_CONFIG_HD_V2	0x081141EF
+/*
+ * venc_set_480p59_94
+ *
+ * This function configures the video encoder to EDTV(525p) component setting.
+ */
+static int venc_set_480p59_94(struct v4l2_subdev *sd)
+{
+	struct venc_state *venc = to_state(sd);
+	struct venc_platform_data *pdata = venc->pdata;
+
+	v4l2_dbg(debug, 2, sd, "venc_set_480p59_94\n");
+	if ((pdata->venc_type != VPBE_VERSION_1) &&
+	    (pdata->venc_type != VPBE_VERSION_2))
+		return -EINVAL;
+
+	/* Setup clock at VPSS & VENC for SD */
+	if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_480P59_94) < 0)
+		return -EINVAL;
+
+	venc_enabledigitaloutput(sd, 0);
+
+	if (pdata->venc_type == VPBE_VERSION_2)
+		vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+	venc_write(sd, VENC_OSDCLK0, 0);
+	venc_write(sd, VENC_OSDCLK1, 1);
+
+	if (pdata->venc_type == VPBE_VERSION_1) {
+		venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
+			    VENC_VDPRO_DAFRQ);
+		venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
+			    VENC_VDPRO_DAUPS);
+	}
+
+	venc_write(sd, VENC_VMOD, 0);
+	venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+		    VENC_VMOD_VIE);
+	venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+	venc_modify(sd, VENC_VMOD, (HDTV_525P << VENC_VMOD_TVTYP_SHIFT),
+		    VENC_VMOD_TVTYP);
+	venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 <<
+		    VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD);
+
+	venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+	return 0;
+}
+
+/*
+ * venc_set_625p
+ *
+ * This function configures the video encoder to HDTV(625p) component setting
+ */
+static int venc_set_576p50(struct v4l2_subdev *sd)
+{
+	struct venc_state *venc = to_state(sd);
+	struct venc_platform_data *pdata = venc->pdata;
+
+	v4l2_dbg(debug, 2, sd, "venc_set_576p50\n");
+
+	if ((pdata->venc_type != VPBE_VERSION_1) &&
+	  (pdata->venc_type != VPBE_VERSION_2))
+		return -EINVAL;
+	/* Setup clock at VPSS & VENC for SD */
+	if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_576P50) < 0)
+		return -EINVAL;
+
+	venc_enabledigitaloutput(sd, 0);
+
+	if (pdata->venc_type == VPBE_VERSION_2)
+		vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+
+	venc_write(sd, VENC_OSDCLK0, 0);
+	venc_write(sd, VENC_OSDCLK1, 1);
+
+	if (pdata->venc_type == VPBE_VERSION_1) {
+		venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
+			    VENC_VDPRO_DAFRQ);
+		venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
+			    VENC_VDPRO_DAUPS);
+	}
+
+	venc_write(sd, VENC_VMOD, 0);
+	venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+		    VENC_VMOD_VIE);
+	venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+	venc_modify(sd, VENC_VMOD, (HDTV_625P << VENC_VMOD_TVTYP_SHIFT),
+		    VENC_VMOD_TVTYP);
+
+	venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 <<
+		    VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD);
+	venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+	return 0;
+}
+
+/*
+ * venc_set_720p60_internal - Setup 720p60 in venc for dm365 only
+ */
+static int venc_set_720p60_internal(struct v4l2_subdev *sd)
+{
+	struct venc_state *venc = to_state(sd);
+	struct venc_platform_data *pdata = venc->pdata;
+
+	if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_720P60) < 0)
+		return -EINVAL;
+
+	venc_enabledigitaloutput(sd, 0);
+
+	venc_write(sd, VENC_OSDCLK0, 0);
+	venc_write(sd, VENC_OSDCLK1, 1);
+
+	venc_write(sd, VENC_VMOD, 0);
+	/* DM365 component HD mode */
+	venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+	    VENC_VMOD_VIE);
+	venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+	venc_modify(sd, VENC_VMOD, (HDTV_720P << VENC_VMOD_TVTYP_SHIFT),
+		    VENC_VMOD_TVTYP);
+	venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+	venc_write(sd, VENC_XHINTVL, 0);
+	return 0;
+}
+
+/*
+ * venc_set_1080i30_internal - Setup 1080i30 in venc for dm365 only
+ */
+static int venc_set_1080i30_internal(struct v4l2_subdev *sd)
+{
+	struct venc_state *venc = to_state(sd);
+	struct venc_platform_data *pdata = venc->pdata;
+
+	if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_1080P30) < 0)
+		return -EINVAL;
+
+	venc_enabledigitaloutput(sd, 0);
+
+	venc_write(sd, VENC_OSDCLK0, 0);
+	venc_write(sd, VENC_OSDCLK1, 1);
+
+
+	venc_write(sd, VENC_VMOD, 0);
+	/* DM365 component HD mode */
+	venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+		    VENC_VMOD_VIE);
+	venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+	venc_modify(sd, VENC_VMOD, (HDTV_1080I << VENC_VMOD_TVTYP_SHIFT),
+		    VENC_VMOD_TVTYP);
+	venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+	venc_write(sd, VENC_XHINTVL, 0);
+	return 0;
+}
+
+static int venc_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm)
+{
+	v4l2_dbg(debug, 1, sd, "venc_s_std_output\n");
+
+	if (norm & V4L2_STD_525_60)
+		return venc_set_ntsc(sd);
+	else if (norm & V4L2_STD_625_50)
+		return venc_set_pal(sd);
+
+	return -EINVAL;
+}
+
+static int venc_s_dv_preset(struct v4l2_subdev *sd,
+			    struct v4l2_dv_preset *dv_preset)
+{
+	struct venc_state *venc = to_state(sd);
+	int ret;
+
+	v4l2_dbg(debug, 1, sd, "venc_s_dv_preset\n");
+
+	if (dv_preset->preset == V4L2_DV_576P50)
+		return venc_set_576p50(sd);
+	else if (dv_preset->preset == V4L2_DV_480P59_94)
+		return venc_set_480p59_94(sd);
+	else if ((dv_preset->preset == V4L2_DV_720P60) &&
+			(venc->pdata->venc_type == VPBE_VERSION_2)) {
+		/* TBD setup internal 720p mode here */
+		ret = venc_set_720p60_internal(sd);
+		/* for DM365 VPBE, there is DAC inside */
+		vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+		return ret;
+	} else if ((dv_preset->preset == V4L2_DV_1080I30) &&
+		(venc->pdata->venc_type == VPBE_VERSION_2)) {
+		/* TBD setup internal 1080i mode here */
+		ret = venc_set_1080i30_internal(sd);
+		/* for DM365 VPBE, there is DAC inside */
+		vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+		return ret;
+	}
+	return -EINVAL;
+}
+
+static int venc_s_routing(struct v4l2_subdev *sd, u32 input, u32 output,
+			  u32 config)
+{
+	struct venc_state *venc = to_state(sd);
+	int ret;
+
+	v4l2_dbg(debug, 1, sd, "venc_s_routing\n");
+
+	ret = venc_set_dac(sd, output);
+	if (!ret)
+		venc->output = output;
+
+	return ret;
+}
+
+static long venc_ioctl(struct v4l2_subdev *sd,
+			unsigned int cmd,
+			void *arg)
+{
+	u32 val;
+
+	switch (cmd) {
+	case VENC_GET_FLD:
+		val = venc_read(sd, VENC_VSTAT);
+		*((int *)arg) = ((val & VENC_VSTAT_FIDST) ==
+		VENC_VSTAT_FIDST);
+		break;
+	default:
+		v4l2_err(sd, "Wrong IOCTL cmd\n");
+		break;
+	}
+
+	return 0;
+}
+
+static const struct v4l2_subdev_core_ops venc_core_ops = {
+	.ioctl      = venc_ioctl,
+};
+
+static const struct v4l2_subdev_video_ops venc_video_ops = {
+	.s_routing = venc_s_routing,
+	.s_std_output = venc_s_std_output,
+	.s_dv_preset = venc_s_dv_preset,
+};
+
+static const struct v4l2_subdev_ops venc_ops = {
+	.core = &venc_core_ops,
+	.video = &venc_video_ops,
+};
+
+static int venc_initialize(struct v4l2_subdev *sd)
+{
+	struct venc_state *venc = to_state(sd);
+	int ret;
+
+	/* Set default to output to composite and std to NTSC */
+	venc->output = 0;
+	venc->std = V4L2_STD_525_60;
+
+	ret = venc_s_routing(sd, 0, venc->output, 0);
+	if (ret < 0) {
+		v4l2_err(sd, "Error setting output during init\n");
+		return -EINVAL;
+	}
+
+	ret = venc_s_std_output(sd, venc->std);
+	if (ret < 0) {
+		v4l2_err(sd, "Error setting std during init\n");
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int venc_device_get(struct device *dev, void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct venc_state **venc = data;
+
+	if (strcmp(MODULE_NAME, pdev->name) == 0)
+		*venc = platform_get_drvdata(pdev);
+
+	return 0;
+}
+
+struct v4l2_subdev *venc_sub_dev_init(struct v4l2_device *v4l2_dev,
+		const char *venc_name)
+{
+	struct venc_state *venc;
+	int err;
+
+	err = bus_for_each_dev(&platform_bus_type, NULL, &venc,
+			venc_device_get);
+	if (venc == NULL)
+		return NULL;
+
+	v4l2_subdev_init(&venc->sd, &venc_ops);
+
+	strcpy(venc->sd.name, venc_name);
+	if (v4l2_device_register_subdev(v4l2_dev, &venc->sd) < 0) {
+		v4l2_err(v4l2_dev,
+			"vpbe unable to register venc sub device\n");
+		return NULL;
+	}
+	if (venc_initialize(&venc->sd)) {
+		v4l2_err(v4l2_dev,
+			"vpbe venc initialization failed\n");
+		return NULL;
+	}
+
+	return &venc->sd;
+}
+EXPORT_SYMBOL(venc_sub_dev_init);
+
+static int venc_probe(struct platform_device *pdev)
+{
+	struct venc_state *venc;
+	struct resource *res;
+	int ret;
+
+	venc = kzalloc(sizeof(struct venc_state), GFP_KERNEL);
+	if (venc == NULL)
+		return -ENOMEM;
+
+	venc->pdev = &pdev->dev;
+	venc->pdata = pdev->dev.platform_data;
+	if (NULL == venc->pdata) {
+		dev_err(venc->pdev, "Unable to get platform data for"
+			" VENC sub device");
+		ret = -ENOENT;
+		goto free_mem;
+	}
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(venc->pdev,
+			"Unable to get VENC register address map\n");
+		ret = -ENODEV;
+		goto free_mem;
+	}
+
+	if (!request_mem_region(res->start, resource_size(res), "venc")) {
+		dev_err(venc->pdev, "Unable to reserve VENC MMIO region\n");
+		ret = -ENODEV;
+		goto free_mem;
+	}
+
+	venc->venc_base = ioremap_nocache(res->start, resource_size(res));
+	if (!venc->venc_base) {
+		dev_err(venc->pdev, "Unable to map VENC IO space\n");
+		ret = -ENODEV;
+		goto release_venc_mem_region;
+	}
+
+	if (venc->pdata->venc_type != VPBE_VERSION_1) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		if (!res) {
+			dev_err(venc->pdev,
+				"Unable to get VDAC_CONFIG address map\n");
+			ret = -ENODEV;
+			goto unmap_venc_io;
+		}
+
+		if (!request_mem_region(res->start,
+					resource_size(res), "venc")) {
+			dev_err(venc->pdev,
+				"Unable to reserve VDAC_CONFIG  MMIO region\n");
+			ret = -ENODEV;
+			goto unmap_venc_io;
+		}
+
+		venc->vdaccfg_reg = ioremap_nocache(res->start,
+						    resource_size(res));
+		if (!venc->vdaccfg_reg) {
+			dev_err(venc->pdev,
+				"Unable to map VDAC_CONFIG IO space\n");
+			ret = -ENODEV;
+			goto release_vdaccfg_mem_region;
+		}
+	}
+	spin_lock_init(&venc->lock);
+	platform_set_drvdata(pdev, venc);
+	dev_notice(venc->pdev, "VENC sub device probe success\n");
+	return 0;
+
+release_vdaccfg_mem_region:
+	release_mem_region(res->start, resource_size(res));
+unmap_venc_io:
+	iounmap(venc->venc_base);
+release_venc_mem_region:
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, resource_size(res));
+free_mem:
+	kfree(venc);
+	return ret;
+}
+
+static int venc_remove(struct platform_device *pdev)
+{
+	struct venc_state *venc = platform_get_drvdata(pdev);
+	struct resource *res;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	iounmap((void *)venc->venc_base);
+	release_mem_region(res->start, resource_size(res));
+	if (venc->pdata->venc_type != VPBE_VERSION_1) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		iounmap((void *)venc->vdaccfg_reg);
+		release_mem_region(res->start, resource_size(res));
+	}
+	kfree(venc);
+
+	return 0;
+}
+
+static struct platform_driver venc_driver = {
+	.probe		= venc_probe,
+	.remove		= venc_remove,
+	.driver		= {
+		.name	= MODULE_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+module_platform_driver(venc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VPBE VENC Driver");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/platform/davinci/vpbe_venc_regs.h b/drivers/media/platform/davinci/vpbe_venc_regs.h
new file mode 100644
index 0000000..947cb15
--- /dev/null
+++ b/drivers/media/platform/davinci/vpbe_venc_regs.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2006-2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2..
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _VPBE_VENC_REGS_H
+#define _VPBE_VENC_REGS_H
+
+/* VPBE Video Encoder / Digital LCD Subsystem Registers (VENC) */
+#define VENC_VMOD				0x00
+#define VENC_VIDCTL				0x04
+#define VENC_VDPRO				0x08
+#define VENC_SYNCCTL				0x0C
+#define VENC_HSPLS				0x10
+#define VENC_VSPLS				0x14
+#define VENC_HINT				0x18
+#define VENC_HSTART				0x1C
+#define VENC_HVALID				0x20
+#define VENC_VINT				0x24
+#define VENC_VSTART				0x28
+#define VENC_VVALID				0x2C
+#define VENC_HSDLY				0x30
+#define VENC_VSDLY				0x34
+#define VENC_YCCCTL				0x38
+#define VENC_RGBCTL				0x3C
+#define VENC_RGBCLP				0x40
+#define VENC_LINECTL				0x44
+#define VENC_CULLLINE				0x48
+#define VENC_LCDOUT				0x4C
+#define VENC_BRTS				0x50
+#define VENC_BRTW				0x54
+#define VENC_ACCTL				0x58
+#define VENC_PWMP				0x5C
+#define VENC_PWMW				0x60
+#define VENC_DCLKCTL				0x64
+#define VENC_DCLKPTN0				0x68
+#define VENC_DCLKPTN1				0x6C
+#define VENC_DCLKPTN2				0x70
+#define VENC_DCLKPTN3				0x74
+#define VENC_DCLKPTN0A				0x78
+#define VENC_DCLKPTN1A				0x7C
+#define VENC_DCLKPTN2A				0x80
+#define VENC_DCLKPTN3A				0x84
+#define VENC_DCLKHS				0x88
+#define VENC_DCLKHSA				0x8C
+#define VENC_DCLKHR				0x90
+#define VENC_DCLKVS				0x94
+#define VENC_DCLKVR				0x98
+#define VENC_CAPCTL				0x9C
+#define VENC_CAPDO				0xA0
+#define VENC_CAPDE				0xA4
+#define VENC_ATR0				0xA8
+#define VENC_ATR1				0xAC
+#define VENC_ATR2				0xB0
+#define VENC_VSTAT				0xB8
+#define VENC_RAMADR				0xBC
+#define VENC_RAMPORT				0xC0
+#define VENC_DACTST				0xC4
+#define VENC_YCOLVL				0xC8
+#define VENC_SCPROG				0xCC
+#define VENC_CVBS				0xDC
+#define VENC_CMPNT				0xE0
+#define VENC_ETMG0				0xE4
+#define VENC_ETMG1				0xE8
+#define VENC_ETMG2				0xEC
+#define VENC_ETMG3				0xF0
+#define VENC_DACSEL				0xF4
+#define VENC_ARGBX0				0x100
+#define VENC_ARGBX1				0x104
+#define VENC_ARGBX2				0x108
+#define VENC_ARGBX3				0x10C
+#define VENC_ARGBX4				0x110
+#define VENC_DRGBX0				0x114
+#define VENC_DRGBX1				0x118
+#define VENC_DRGBX2				0x11C
+#define VENC_DRGBX3				0x120
+#define VENC_DRGBX4				0x124
+#define VENC_VSTARTA				0x128
+#define VENC_OSDCLK0				0x12C
+#define VENC_OSDCLK1				0x130
+#define VENC_HVLDCL0				0x134
+#define VENC_HVLDCL1				0x138
+#define VENC_OSDHADV				0x13C
+#define VENC_CLKCTL				0x140
+#define VENC_GAMCTL				0x144
+#define VENC_XHINTVL				0x174
+
+/* bit definitions */
+#define VPBE_PCR_VENC_DIV			(1 << 1)
+#define VPBE_PCR_CLK_OFF			(1 << 0)
+
+#define VENC_VMOD_VDMD_SHIFT			12
+#define VENC_VMOD_VDMD_YCBCR16			0
+#define VENC_VMOD_VDMD_YCBCR8			1
+#define VENC_VMOD_VDMD_RGB666			2
+#define VENC_VMOD_VDMD_RGB8			3
+#define VENC_VMOD_VDMD_EPSON			4
+#define VENC_VMOD_VDMD_CASIO			5
+#define VENC_VMOD_VDMD_UDISPQVGA		6
+#define VENC_VMOD_VDMD_STNLCD			7
+#define VENC_VMOD_VIE_SHIFT			1
+#define VENC_VMOD_VDMD				(7 << 12)
+#define VENC_VMOD_ITLCL				(1 << 11)
+#define VENC_VMOD_ITLC				(1 << 10)
+#define VENC_VMOD_NSIT				(1 << 9)
+#define VENC_VMOD_HDMD				(1 << 8)
+#define VENC_VMOD_TVTYP_SHIFT			6
+#define VENC_VMOD_TVTYP				(3 << 6)
+#define VENC_VMOD_SLAVE				(1 << 5)
+#define VENC_VMOD_VMD				(1 << 4)
+#define VENC_VMOD_BLNK				(1 << 3)
+#define VENC_VMOD_VIE				(1 << 1)
+#define VENC_VMOD_VENC				(1 << 0)
+
+/* VMOD TVTYP options for HDMD=0 */
+#define SDTV_NTSC				0
+#define SDTV_PAL				1
+/* VMOD TVTYP options for HDMD=1 */
+#define HDTV_525P				0
+#define HDTV_625P				1
+#define HDTV_1080I				2
+#define HDTV_720P				3
+
+#define VENC_VIDCTL_VCLKP			(1 << 14)
+#define VENC_VIDCTL_VCLKE_SHIFT			13
+#define VENC_VIDCTL_VCLKE			(1 << 13)
+#define VENC_VIDCTL_VCLKZ_SHIFT			12
+#define VENC_VIDCTL_VCLKZ			(1 << 12)
+#define VENC_VIDCTL_SYDIR_SHIFT			8
+#define VENC_VIDCTL_SYDIR			(1 << 8)
+#define VENC_VIDCTL_DOMD_SHIFT			4
+#define VENC_VIDCTL_DOMD			(3 << 4)
+#define VENC_VIDCTL_YCDIR_SHIFT			0
+#define VENC_VIDCTL_YCDIR			(1 << 0)
+
+#define VENC_VDPRO_ATYCC_SHIFT			5
+#define VENC_VDPRO_ATYCC			(1 << 5)
+#define VENC_VDPRO_ATCOM_SHIFT			4
+#define VENC_VDPRO_ATCOM			(1 << 4)
+#define VENC_VDPRO_DAFRQ			(1 << 3)
+#define VENC_VDPRO_DAUPS			(1 << 2)
+#define VENC_VDPRO_CUPS				(1 << 1)
+#define VENC_VDPRO_YUPS				(1 << 0)
+
+#define VENC_SYNCCTL_VPL_SHIFT			3
+#define VENC_SYNCCTL_VPL			(1 << 3)
+#define VENC_SYNCCTL_HPL_SHIFT			2
+#define VENC_SYNCCTL_HPL			(1 << 2)
+#define VENC_SYNCCTL_SYEV_SHIFT			1
+#define VENC_SYNCCTL_SYEV			(1 << 1)
+#define VENC_SYNCCTL_SYEH_SHIFT			0
+#define VENC_SYNCCTL_SYEH			(1 << 0)
+#define VENC_SYNCCTL_OVD_SHIFT			14
+#define VENC_SYNCCTL_OVD			(1 << 14)
+
+#define VENC_DCLKCTL_DCKEC_SHIFT		11
+#define VENC_DCLKCTL_DCKEC			(1 << 11)
+#define VENC_DCLKCTL_DCKPW_SHIFT		0
+#define VENC_DCLKCTL_DCKPW			(0x3f << 0)
+
+#define VENC_VSTAT_FIDST			(1 << 4)
+
+#define VENC_CMPNT_MRGB_SHIFT			14
+#define VENC_CMPNT_MRGB				(1 << 14)
+
+#endif				/* _VPBE_VENC_REGS_H */
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
new file mode 100644
index 0000000..843b138
--- /dev/null
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -0,0 +1,2079 @@
+/*
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ * Driver name : VPFE Capture driver
+ *    VPFE Capture driver allows applications to capture and stream video
+ *    frames on DaVinci SoCs (DM6446, DM355 etc) from a YUV source such as
+ *    TVP5146 or  Raw Bayer RGB image data from an image sensor
+ *    such as Microns' MT9T001, MT9T031 etc.
+ *
+ *    These SoCs have, in common, a Video Processing Subsystem (VPSS) that
+ *    consists of a Video Processing Front End (VPFE) for capturing
+ *    video/raw image data and Video Processing Back End (VPBE) for displaying
+ *    YUV data through an in-built analog encoder or Digital LCD port. This
+ *    driver is for capture through VPFE. A typical EVM using these SoCs have
+ *    following high level configuration.
+ *
+ *
+ *    decoder(TVP5146/		YUV/
+ * 	     MT9T001)   -->  Raw Bayer RGB ---> MUX -> VPFE (CCDC/ISIF)
+ *    				data input              |      |
+ *							V      |
+ *						      SDRAM    |
+ *							       V
+ *							   Image Processor
+ *							       |
+ *							       V
+ *							     SDRAM
+ *    The data flow happens from a decoder connected to the VPFE over a
+ *    YUV embedded (BT.656/BT.1120) or separate sync or raw bayer rgb interface
+ *    and to the input of VPFE through an optional MUX (if more inputs are
+ *    to be interfaced on the EVM). The input data is first passed through
+ *    CCDC (CCD Controller, a.k.a Image Sensor Interface, ISIF). The CCDC
+ *    does very little or no processing on YUV data and does pre-process Raw
+ *    Bayer RGB data through modules such as Defect Pixel Correction (DFC)
+ *    Color Space Conversion (CSC), data gain/offset etc. After this, data
+ *    can be written to SDRAM or can be connected to the image processing
+ *    block such as IPIPE (on DM355 only).
+ *
+ *    Features supported
+ *  		- MMAP IO
+ *		- Capture using TVP5146 over BT.656
+ *		- support for interfacing decoders using sub device model
+ *		- Work with DM355 or DM6446 CCDC to do Raw Bayer RGB/YUV
+ *		  data capture to SDRAM.
+ *    TODO list
+ *		- Support multiple REQBUF after open
+ *		- Support for de-allocating buffers through REQBUF
+ *		- Support for Raw Bayer RGB capture
+ *		- Support for chaining Image Processor
+ *		- Support for static allocation of buffers
+ *		- Support for USERPTR IO
+ *		- Support for STREAMON before QBUF
+ *		- Support for control ioctls
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <media/v4l2-common.h>
+#include <linux/io.h>
+#include <media/davinci/vpfe_capture.h>
+#include "ccdc_hw_device.h"
+
+static int debug;
+static u32 numbuffers = 3;
+static u32 bufsize = (720 * 576 * 2);
+
+module_param(numbuffers, uint, S_IRUGO);
+module_param(bufsize, uint, S_IRUGO);
+module_param(debug, int, 0644);
+
+MODULE_PARM_DESC(numbuffers, "buffer count (default:3)");
+MODULE_PARM_DESC(bufsize, "buffer size in bytes (default:720 x 576 x 2)");
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+MODULE_DESCRIPTION("VPFE Video for Linux Capture Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+
+/* standard information */
+struct vpfe_standard {
+	v4l2_std_id std_id;
+	unsigned int width;
+	unsigned int height;
+	struct v4l2_fract pixelaspect;
+	/* 0 - progressive, 1 - interlaced */
+	int frame_format;
+};
+
+/* ccdc configuration */
+struct ccdc_config {
+	/* This make sure vpfe is probed and ready to go */
+	int vpfe_probed;
+	/* name of ccdc device */
+	char name[32];
+};
+
+/* data structures */
+static struct vpfe_config_params config_params = {
+	.min_numbuffers = 3,
+	.numbuffers = 3,
+	.min_bufsize = 720 * 480 * 2,
+	.device_bufsize = 720 * 576 * 2,
+};
+
+/* ccdc device registered */
+static struct ccdc_hw_device *ccdc_dev;
+/* lock for accessing ccdc information */
+static DEFINE_MUTEX(ccdc_lock);
+/* ccdc configuration */
+static struct ccdc_config *ccdc_cfg;
+
+const struct vpfe_standard vpfe_standards[] = {
+	{V4L2_STD_525_60, 720, 480, {11, 10}, 1},
+	{V4L2_STD_625_50, 720, 576, {54, 59}, 1},
+};
+
+/* Used when raw Bayer image from ccdc is directly captured to SDRAM */
+static const struct vpfe_pixel_format vpfe_pix_fmts[] = {
+	{
+		.fmtdesc = {
+			.index = 0,
+			.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+			.description = "Bayer GrRBGb 8bit A-Law compr.",
+			.pixelformat = V4L2_PIX_FMT_SBGGR8,
+		},
+		.bpp = 1,
+	},
+	{
+		.fmtdesc = {
+			.index = 1,
+			.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+			.description = "Bayer GrRBGb - 16bit",
+			.pixelformat = V4L2_PIX_FMT_SBGGR16,
+		},
+		.bpp = 2,
+	},
+	{
+		.fmtdesc = {
+			.index = 2,
+			.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+			.description = "Bayer GrRBGb 8bit DPCM compr.",
+			.pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
+		},
+		.bpp = 1,
+	},
+	{
+		.fmtdesc = {
+			.index = 3,
+			.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+			.description = "YCbCr 4:2:2 Interleaved UYVY",
+			.pixelformat = V4L2_PIX_FMT_UYVY,
+		},
+		.bpp = 2,
+	},
+	{
+		.fmtdesc = {
+			.index = 4,
+			.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+			.description = "YCbCr 4:2:2 Interleaved YUYV",
+			.pixelformat = V4L2_PIX_FMT_YUYV,
+		},
+		.bpp = 2,
+	},
+	{
+		.fmtdesc = {
+			.index = 5,
+			.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+			.description = "Y/CbCr 4:2:0 - Semi planar",
+			.pixelformat = V4L2_PIX_FMT_NV12,
+		},
+		.bpp = 1,
+	},
+};
+
+/*
+ * vpfe_lookup_pix_format()
+ * lookup an entry in the vpfe pix format table based on pix_format
+ */
+static const struct vpfe_pixel_format *vpfe_lookup_pix_format(u32 pix_format)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(vpfe_pix_fmts); i++) {
+		if (pix_format == vpfe_pix_fmts[i].fmtdesc.pixelformat)
+			return &vpfe_pix_fmts[i];
+	}
+	return NULL;
+}
+
+/*
+ * vpfe_register_ccdc_device. CCDC module calls this to
+ * register with vpfe capture
+ */
+int vpfe_register_ccdc_device(struct ccdc_hw_device *dev)
+{
+	int ret = 0;
+	printk(KERN_NOTICE "vpfe_register_ccdc_device: %s\n", dev->name);
+
+	BUG_ON(!dev->hw_ops.open);
+	BUG_ON(!dev->hw_ops.enable);
+	BUG_ON(!dev->hw_ops.set_hw_if_params);
+	BUG_ON(!dev->hw_ops.configure);
+	BUG_ON(!dev->hw_ops.set_buftype);
+	BUG_ON(!dev->hw_ops.get_buftype);
+	BUG_ON(!dev->hw_ops.enum_pix);
+	BUG_ON(!dev->hw_ops.set_frame_format);
+	BUG_ON(!dev->hw_ops.get_frame_format);
+	BUG_ON(!dev->hw_ops.get_pixel_format);
+	BUG_ON(!dev->hw_ops.set_pixel_format);
+	BUG_ON(!dev->hw_ops.set_image_window);
+	BUG_ON(!dev->hw_ops.get_image_window);
+	BUG_ON(!dev->hw_ops.get_line_length);
+	BUG_ON(!dev->hw_ops.getfid);
+
+	mutex_lock(&ccdc_lock);
+	if (NULL == ccdc_cfg) {
+		/*
+		 * TODO. Will this ever happen? if so, we need to fix it.
+		 * Proabably we need to add the request to a linked list and
+		 * walk through it during vpfe probe
+		 */
+		printk(KERN_ERR "vpfe capture not initialized\n");
+		ret = -EFAULT;
+		goto unlock;
+	}
+
+	if (strcmp(dev->name, ccdc_cfg->name)) {
+		/* ignore this ccdc */
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	if (ccdc_dev) {
+		printk(KERN_ERR "ccdc already registered\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	ccdc_dev = dev;
+unlock:
+	mutex_unlock(&ccdc_lock);
+	return ret;
+}
+EXPORT_SYMBOL(vpfe_register_ccdc_device);
+
+/*
+ * vpfe_unregister_ccdc_device. CCDC module calls this to
+ * unregister with vpfe capture
+ */
+void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
+{
+	if (NULL == dev) {
+		printk(KERN_ERR "invalid ccdc device ptr\n");
+		return;
+	}
+
+	printk(KERN_NOTICE "vpfe_unregister_ccdc_device, dev->name = %s\n",
+		dev->name);
+
+	if (strcmp(dev->name, ccdc_cfg->name)) {
+		/* ignore this ccdc */
+		return;
+	}
+
+	mutex_lock(&ccdc_lock);
+	ccdc_dev = NULL;
+	mutex_unlock(&ccdc_lock);
+	return;
+}
+EXPORT_SYMBOL(vpfe_unregister_ccdc_device);
+
+/*
+ * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
+ */
+static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe_dev,
+				 struct v4l2_format *f)
+{
+	struct v4l2_rect image_win;
+	enum ccdc_buftype buf_type;
+	enum ccdc_frmfmt frm_fmt;
+
+	memset(f, 0, sizeof(*f));
+	f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	ccdc_dev->hw_ops.get_image_window(&image_win);
+	f->fmt.pix.width = image_win.width;
+	f->fmt.pix.height = image_win.height;
+	f->fmt.pix.bytesperline = ccdc_dev->hw_ops.get_line_length();
+	f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+				f->fmt.pix.height;
+	buf_type = ccdc_dev->hw_ops.get_buftype();
+	f->fmt.pix.pixelformat = ccdc_dev->hw_ops.get_pixel_format();
+	frm_fmt = ccdc_dev->hw_ops.get_frame_format();
+	if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
+		f->fmt.pix.field = V4L2_FIELD_NONE;
+	else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+		if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
+			f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+		else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED)
+			f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
+		else {
+			v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf_type\n");
+			return -EINVAL;
+		}
+	} else {
+		v4l2_err(&vpfe_dev->v4l2_dev, "Invalid frm_fmt\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*
+ * vpfe_config_ccdc_image_format()
+ * For a pix format, configure ccdc to setup the capture
+ */
+static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe_dev)
+{
+	enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
+	int ret = 0;
+
+	if (ccdc_dev->hw_ops.set_pixel_format(
+			vpfe_dev->fmt.fmt.pix.pixelformat) < 0) {
+		v4l2_err(&vpfe_dev->v4l2_dev,
+			"couldn't set pix format in ccdc\n");
+		return -EINVAL;
+	}
+	/* configure the image window */
+	ccdc_dev->hw_ops.set_image_window(&vpfe_dev->crop);
+
+	switch (vpfe_dev->fmt.fmt.pix.field) {
+	case V4L2_FIELD_INTERLACED:
+		/* do nothing, since it is default */
+		ret = ccdc_dev->hw_ops.set_buftype(
+				CCDC_BUFTYPE_FLD_INTERLEAVED);
+		break;
+	case V4L2_FIELD_NONE:
+		frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
+		/* buffer type only applicable for interlaced scan */
+		break;
+	case V4L2_FIELD_SEQ_TB:
+		ret = ccdc_dev->hw_ops.set_buftype(
+				CCDC_BUFTYPE_FLD_SEPARATED);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* set the frame format */
+	if (!ret)
+		ret = ccdc_dev->hw_ops.set_frame_format(frm_fmt);
+	return ret;
+}
+/*
+ * vpfe_config_image_format()
+ * For a given standard, this functions sets up the default
+ * pix format & crop values in the vpfe device and ccdc.  It first
+ * starts with defaults based values from the standard table.
+ * It then checks if sub device support g_mbus_fmt and then override the
+ * values based on that.Sets crop values to match with scan resolution
+ * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
+ * values in ccdc
+ */
+static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
+				    const v4l2_std_id *std_id)
+{
+	struct vpfe_subdev_info *sdinfo = vpfe_dev->current_subdev;
+	struct v4l2_mbus_framefmt mbus_fmt;
+	struct v4l2_pix_format *pix = &vpfe_dev->fmt.fmt.pix;
+	int i, ret = 0;
+
+	for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
+		if (vpfe_standards[i].std_id & *std_id) {
+			vpfe_dev->std_info.active_pixels =
+					vpfe_standards[i].width;
+			vpfe_dev->std_info.active_lines =
+					vpfe_standards[i].height;
+			vpfe_dev->std_info.frame_format =
+					vpfe_standards[i].frame_format;
+			vpfe_dev->std_index = i;
+			break;
+		}
+	}
+
+	if (i ==  ARRAY_SIZE(vpfe_standards)) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "standard not supported\n");
+		return -EINVAL;
+	}
+
+	vpfe_dev->crop.top = 0;
+	vpfe_dev->crop.left = 0;
+	vpfe_dev->crop.width = vpfe_dev->std_info.active_pixels;
+	vpfe_dev->crop.height = vpfe_dev->std_info.active_lines;
+	pix->width = vpfe_dev->crop.width;
+	pix->height = vpfe_dev->crop.height;
+
+	/* first field and frame format based on standard frame format */
+	if (vpfe_dev->std_info.frame_format) {
+		pix->field = V4L2_FIELD_INTERLACED;
+		/* assume V4L2_PIX_FMT_UYVY as default */
+		pix->pixelformat = V4L2_PIX_FMT_UYVY;
+		v4l2_fill_mbus_format(&mbus_fmt, pix,
+				V4L2_MBUS_FMT_YUYV10_2X10);
+	} else {
+		pix->field = V4L2_FIELD_NONE;
+		/* assume V4L2_PIX_FMT_SBGGR8 */
+		pix->pixelformat = V4L2_PIX_FMT_SBGGR8;
+		v4l2_fill_mbus_format(&mbus_fmt, pix,
+				V4L2_MBUS_FMT_SBGGR8_1X8);
+	}
+
+	/* if sub device supports g_mbus_fmt, override the defaults */
+	ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
+			sdinfo->grp_id, video, g_mbus_fmt, &mbus_fmt);
+
+	if (ret && ret != -ENOIOCTLCMD) {
+		v4l2_err(&vpfe_dev->v4l2_dev,
+			"error in getting g_mbus_fmt from sub device\n");
+		return ret;
+	}
+	v4l2_fill_pix_format(pix, &mbus_fmt);
+	pix->bytesperline = pix->width * 2;
+	pix->sizeimage = pix->bytesperline * pix->height;
+
+	/* Sets the values in CCDC */
+	ret = vpfe_config_ccdc_image_format(vpfe_dev);
+	if (ret)
+		return ret;
+
+	/* Update the values of sizeimage and bytesperline */
+	if (!ret) {
+		pix->bytesperline = ccdc_dev->hw_ops.get_line_length();
+		pix->sizeimage = pix->bytesperline * pix->height;
+	}
+	return ret;
+}
+
+static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
+{
+	int ret = 0;
+
+	/* set first input of current subdevice as the current input */
+	vpfe_dev->current_input = 0;
+
+	/* set default standard */
+	vpfe_dev->std_index = 0;
+
+	/* Configure the default format information */
+	ret = vpfe_config_image_format(vpfe_dev,
+				&vpfe_standards[vpfe_dev->std_index].std_id);
+	if (ret)
+		return ret;
+
+	/* now open the ccdc device to initialize it */
+	mutex_lock(&ccdc_lock);
+	if (NULL == ccdc_dev) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "ccdc device not registered\n");
+		ret = -ENODEV;
+		goto unlock;
+	}
+
+	if (!try_module_get(ccdc_dev->owner)) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "Couldn't lock ccdc module\n");
+		ret = -ENODEV;
+		goto unlock;
+	}
+	ret = ccdc_dev->hw_ops.open(vpfe_dev->pdev);
+	if (!ret)
+		vpfe_dev->initialized = 1;
+
+	/* Clear all VPFE/CCDC interrupts */
+	if (vpfe_dev->cfg->clr_intr)
+		vpfe_dev->cfg->clr_intr(-1);
+
+unlock:
+	mutex_unlock(&ccdc_lock);
+	return ret;
+}
+
+/*
+ * vpfe_open : It creates object of file handle structure and
+ * stores it in private_data  member of filepointer
+ */
+static int vpfe_open(struct file *file)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	struct vpfe_fh *fh;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_open\n");
+
+	if (!vpfe_dev->cfg->num_subdevs) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "No decoder registered\n");
+		return -ENODEV;
+	}
+
+	/* Allocate memory for the file handle object */
+	fh = kmalloc(sizeof(struct vpfe_fh), GFP_KERNEL);
+	if (NULL == fh) {
+		v4l2_err(&vpfe_dev->v4l2_dev,
+			"unable to allocate memory for file handle object\n");
+		return -ENOMEM;
+	}
+	/* store pointer to fh in private_data member of file */
+	file->private_data = fh;
+	fh->vpfe_dev = vpfe_dev;
+	mutex_lock(&vpfe_dev->lock);
+	/* If decoder is not initialized. initialize it */
+	if (!vpfe_dev->initialized) {
+		if (vpfe_initialize_device(vpfe_dev)) {
+			mutex_unlock(&vpfe_dev->lock);
+			return -ENODEV;
+		}
+	}
+	/* Increment device usrs counter */
+	vpfe_dev->usrs++;
+	/* Set io_allowed member to false */
+	fh->io_allowed = 0;
+	/* Initialize priority of this instance to default priority */
+	fh->prio = V4L2_PRIORITY_UNSET;
+	v4l2_prio_open(&vpfe_dev->prio, &fh->prio);
+	mutex_unlock(&vpfe_dev->lock);
+	return 0;
+}
+
+static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe_dev)
+{
+	unsigned long addr;
+
+	vpfe_dev->next_frm = list_entry(vpfe_dev->dma_queue.next,
+					struct videobuf_buffer, queue);
+	list_del(&vpfe_dev->next_frm->queue);
+	vpfe_dev->next_frm->state = VIDEOBUF_ACTIVE;
+	addr = videobuf_to_dma_contig(vpfe_dev->next_frm);
+
+	ccdc_dev->hw_ops.setfbaddr(addr);
+}
+
+static void vpfe_schedule_bottom_field(struct vpfe_device *vpfe_dev)
+{
+	unsigned long addr;
+
+	addr = videobuf_to_dma_contig(vpfe_dev->cur_frm);
+	addr += vpfe_dev->field_off;
+	ccdc_dev->hw_ops.setfbaddr(addr);
+}
+
+static void vpfe_process_buffer_complete(struct vpfe_device *vpfe_dev)
+{
+	struct timeval timevalue;
+
+	do_gettimeofday(&timevalue);
+	vpfe_dev->cur_frm->ts = timevalue;
+	vpfe_dev->cur_frm->state = VIDEOBUF_DONE;
+	vpfe_dev->cur_frm->size = vpfe_dev->fmt.fmt.pix.sizeimage;
+	wake_up_interruptible(&vpfe_dev->cur_frm->done);
+	vpfe_dev->cur_frm = vpfe_dev->next_frm;
+}
+
+/* ISR for VINT0*/
+static irqreturn_t vpfe_isr(int irq, void *dev_id)
+{
+	struct vpfe_device *vpfe_dev = dev_id;
+	enum v4l2_field field;
+	int fid;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nStarting vpfe_isr...\n");
+	field = vpfe_dev->fmt.fmt.pix.field;
+
+	/* if streaming not started, don't do anything */
+	if (!vpfe_dev->started)
+		goto clear_intr;
+
+	/* only for 6446 this will be applicable */
+	if (NULL != ccdc_dev->hw_ops.reset)
+		ccdc_dev->hw_ops.reset();
+
+	if (field == V4L2_FIELD_NONE) {
+		/* handle progressive frame capture */
+		v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+			"frame format is progressive...\n");
+		if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
+			vpfe_process_buffer_complete(vpfe_dev);
+		goto clear_intr;
+	}
+
+	/* interlaced or TB capture check which field we are in hardware */
+	fid = ccdc_dev->hw_ops.getfid();
+
+	/* switch the software maintained field id */
+	vpfe_dev->field_id ^= 1;
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "field id = %x:%x.\n",
+		fid, vpfe_dev->field_id);
+	if (fid == vpfe_dev->field_id) {
+		/* we are in-sync here,continue */
+		if (fid == 0) {
+			/*
+			 * One frame is just being captured. If the next frame
+			 * is available, release the current frame and move on
+			 */
+			if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
+				vpfe_process_buffer_complete(vpfe_dev);
+			/*
+			 * based on whether the two fields are stored
+			 * interleavely or separately in memory, reconfigure
+			 * the CCDC memory address
+			 */
+			if (field == V4L2_FIELD_SEQ_TB) {
+				vpfe_schedule_bottom_field(vpfe_dev);
+			}
+			goto clear_intr;
+		}
+		/*
+		 * if one field is just being captured configure
+		 * the next frame get the next frame from the empty
+		 * queue if no frame is available hold on to the
+		 * current buffer
+		 */
+		spin_lock(&vpfe_dev->dma_queue_lock);
+		if (!list_empty(&vpfe_dev->dma_queue) &&
+		    vpfe_dev->cur_frm == vpfe_dev->next_frm)
+			vpfe_schedule_next_buffer(vpfe_dev);
+		spin_unlock(&vpfe_dev->dma_queue_lock);
+	} else if (fid == 0) {
+		/*
+		 * out of sync. Recover from any hardware out-of-sync.
+		 * May loose one frame
+		 */
+		vpfe_dev->field_id = fid;
+	}
+clear_intr:
+	if (vpfe_dev->cfg->clr_intr)
+		vpfe_dev->cfg->clr_intr(irq);
+
+	return IRQ_HANDLED;
+}
+
+/* vdint1_isr - isr handler for VINT1 interrupt */
+static irqreturn_t vdint1_isr(int irq, void *dev_id)
+{
+	struct vpfe_device *vpfe_dev = dev_id;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nInside vdint1_isr...\n");
+
+	/* if streaming not started, don't do anything */
+	if (!vpfe_dev->started) {
+		if (vpfe_dev->cfg->clr_intr)
+			vpfe_dev->cfg->clr_intr(irq);
+		return IRQ_HANDLED;
+	}
+
+	spin_lock(&vpfe_dev->dma_queue_lock);
+	if ((vpfe_dev->fmt.fmt.pix.field == V4L2_FIELD_NONE) &&
+	    !list_empty(&vpfe_dev->dma_queue) &&
+	    vpfe_dev->cur_frm == vpfe_dev->next_frm)
+		vpfe_schedule_next_buffer(vpfe_dev);
+	spin_unlock(&vpfe_dev->dma_queue_lock);
+
+	if (vpfe_dev->cfg->clr_intr)
+		vpfe_dev->cfg->clr_intr(irq);
+
+	return IRQ_HANDLED;
+}
+
+static void vpfe_detach_irq(struct vpfe_device *vpfe_dev)
+{
+	enum ccdc_frmfmt frame_format;
+
+	frame_format = ccdc_dev->hw_ops.get_frame_format();
+	if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
+		free_irq(vpfe_dev->ccdc_irq1, vpfe_dev);
+}
+
+static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
+{
+	enum ccdc_frmfmt frame_format;
+
+	frame_format = ccdc_dev->hw_ops.get_frame_format();
+	if (frame_format == CCDC_FRMFMT_PROGRESSIVE) {
+		return request_irq(vpfe_dev->ccdc_irq1, vdint1_isr,
+				    IRQF_DISABLED, "vpfe_capture1",
+				    vpfe_dev);
+	}
+	return 0;
+}
+
+/* vpfe_stop_ccdc_capture: stop streaming in ccdc/isif */
+static void vpfe_stop_ccdc_capture(struct vpfe_device *vpfe_dev)
+{
+	vpfe_dev->started = 0;
+	ccdc_dev->hw_ops.enable(0);
+	if (ccdc_dev->hw_ops.enable_out_to_sdram)
+		ccdc_dev->hw_ops.enable_out_to_sdram(0);
+}
+
+/*
+ * vpfe_release : This function deletes buffer queue, frees the
+ * buffers and the vpfe file  handle
+ */
+static int vpfe_release(struct file *file)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	struct vpfe_fh *fh = file->private_data;
+	struct vpfe_subdev_info *sdinfo;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_release\n");
+
+	/* Get the device lock */
+	mutex_lock(&vpfe_dev->lock);
+	/* if this instance is doing IO */
+	if (fh->io_allowed) {
+		if (vpfe_dev->started) {
+			sdinfo = vpfe_dev->current_subdev;
+			ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
+							 sdinfo->grp_id,
+							 video, s_stream, 0);
+			if (ret && (ret != -ENOIOCTLCMD))
+				v4l2_err(&vpfe_dev->v4l2_dev,
+				"stream off failed in subdev\n");
+			vpfe_stop_ccdc_capture(vpfe_dev);
+			vpfe_detach_irq(vpfe_dev);
+			videobuf_streamoff(&vpfe_dev->buffer_queue);
+		}
+		vpfe_dev->io_usrs = 0;
+		vpfe_dev->numbuffers = config_params.numbuffers;
+	}
+
+	/* Decrement device usrs counter */
+	vpfe_dev->usrs--;
+	/* Close the priority */
+	v4l2_prio_close(&vpfe_dev->prio, fh->prio);
+	/* If this is the last file handle */
+	if (!vpfe_dev->usrs) {
+		vpfe_dev->initialized = 0;
+		if (ccdc_dev->hw_ops.close)
+			ccdc_dev->hw_ops.close(vpfe_dev->pdev);
+		module_put(ccdc_dev->owner);
+	}
+	mutex_unlock(&vpfe_dev->lock);
+	file->private_data = NULL;
+	/* Free memory allocated to file handle object */
+	kfree(fh);
+	return 0;
+}
+
+/*
+ * vpfe_mmap : It is used to map kernel space buffers
+ * into user spaces
+ */
+static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	/* Get the device object and file handle object */
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_mmap\n");
+
+	return videobuf_mmap_mapper(&vpfe_dev->buffer_queue, vma);
+}
+
+/*
+ * vpfe_poll: It is used for select/poll system call
+ */
+static unsigned int vpfe_poll(struct file *file, poll_table *wait)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_poll\n");
+
+	if (vpfe_dev->started)
+		return videobuf_poll_stream(file,
+					    &vpfe_dev->buffer_queue, wait);
+	return 0;
+}
+
+/* vpfe capture driver file operations */
+static const struct v4l2_file_operations vpfe_fops = {
+	.owner = THIS_MODULE,
+	.open = vpfe_open,
+	.release = vpfe_release,
+	.unlocked_ioctl = video_ioctl2,
+	.mmap = vpfe_mmap,
+	.poll = vpfe_poll
+};
+
+/*
+ * vpfe_check_format()
+ * This function adjust the input pixel format as per hardware
+ * capabilities and update the same in pixfmt.
+ * Following algorithm used :-
+ *
+ *	If given pixformat is not in the vpfe list of pix formats or not
+ *	supported by the hardware, current value of pixformat in the device
+ *	is used
+ *	If given field is not supported, then current field is used. If field
+ *	is different from current, then it is matched with that from sub device.
+ *	Minimum height is 2 lines for interlaced or tb field and 1 line for
+ *	progressive. Maximum height is clamped to active active lines of scan
+ *	Minimum width is 32 bytes in memory and width is clamped to active
+ *	pixels of scan.
+ *	bytesperline is a multiple of 32.
+ */
+static const struct vpfe_pixel_format *
+	vpfe_check_format(struct vpfe_device *vpfe_dev,
+			  struct v4l2_pix_format *pixfmt)
+{
+	u32 min_height = 1, min_width = 32, max_width, max_height;
+	const struct vpfe_pixel_format *vpfe_pix_fmt;
+	u32 pix;
+	int temp, found;
+
+	vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
+	if (NULL == vpfe_pix_fmt) {
+		/*
+		 * use current pixel format in the vpfe device. We
+		 * will find this pix format in the table
+		 */
+		pixfmt->pixelformat = vpfe_dev->fmt.fmt.pix.pixelformat;
+		vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
+	}
+
+	/* check if hw supports it */
+	temp = 0;
+	found = 0;
+	while (ccdc_dev->hw_ops.enum_pix(&pix, temp) >= 0) {
+		if (vpfe_pix_fmt->fmtdesc.pixelformat == pix) {
+			found = 1;
+			break;
+		}
+		temp++;
+	}
+
+	if (!found) {
+		/* use current pixel format */
+		pixfmt->pixelformat = vpfe_dev->fmt.fmt.pix.pixelformat;
+		/*
+		 * Since this is currently used in the vpfe device, we
+		 * will find this pix format in the table
+		 */
+		vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
+	}
+
+	/* check what field format is supported */
+	if (pixfmt->field == V4L2_FIELD_ANY) {
+		/* if field is any, use current value as default */
+		pixfmt->field = vpfe_dev->fmt.fmt.pix.field;
+	}
+
+	/*
+	 * if field is not same as current field in the vpfe device
+	 * try matching the field with the sub device field
+	 */
+	if (vpfe_dev->fmt.fmt.pix.field != pixfmt->field) {
+		/*
+		 * If field value is not in the supported fields, use current
+		 * field used in the device as default
+		 */
+		switch (pixfmt->field) {
+		case V4L2_FIELD_INTERLACED:
+		case V4L2_FIELD_SEQ_TB:
+			/* if sub device is supporting progressive, use that */
+			if (!vpfe_dev->std_info.frame_format)
+				pixfmt->field = V4L2_FIELD_NONE;
+			break;
+		case V4L2_FIELD_NONE:
+			if (vpfe_dev->std_info.frame_format)
+				pixfmt->field = V4L2_FIELD_INTERLACED;
+			break;
+
+		default:
+			/* use current field as default */
+			pixfmt->field = vpfe_dev->fmt.fmt.pix.field;
+			break;
+		}
+	}
+
+	/* Now adjust image resolutions supported */
+	if (pixfmt->field == V4L2_FIELD_INTERLACED ||
+	    pixfmt->field == V4L2_FIELD_SEQ_TB)
+		min_height = 2;
+
+	max_width = vpfe_dev->std_info.active_pixels;
+	max_height = vpfe_dev->std_info.active_lines;
+	min_width /= vpfe_pix_fmt->bpp;
+
+	v4l2_info(&vpfe_dev->v4l2_dev, "width = %d, height = %d, bpp = %d\n",
+		  pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp);
+
+	pixfmt->width = clamp((pixfmt->width), min_width, max_width);
+	pixfmt->height = clamp((pixfmt->height), min_height, max_height);
+
+	/* If interlaced, adjust height to be a multiple of 2 */
+	if (pixfmt->field == V4L2_FIELD_INTERLACED)
+		pixfmt->height &= (~1);
+	/*
+	 * recalculate bytesperline and sizeimage since width
+	 * and height might have changed
+	 */
+	pixfmt->bytesperline = (((pixfmt->width * vpfe_pix_fmt->bpp) + 31)
+				& ~31);
+	if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
+		pixfmt->sizeimage =
+			pixfmt->bytesperline * pixfmt->height +
+			((pixfmt->bytesperline * pixfmt->height) >> 1);
+	else
+		pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+	v4l2_info(&vpfe_dev->v4l2_dev, "adjusted width = %d, height ="
+		 " %d, bpp = %d, bytesperline = %d, sizeimage = %d\n",
+		 pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp,
+		 pixfmt->bytesperline, pixfmt->sizeimage);
+	return vpfe_pix_fmt;
+}
+
+static int vpfe_querycap(struct file *file, void  *priv,
+			       struct v4l2_capability *cap)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
+
+	cap->version = VPFE_CAPTURE_VERSION_CODE;
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+	strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
+	strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
+	strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
+	return 0;
+}
+
+static int vpfe_g_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *fmt)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	int ret = 0;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_fmt_vid_cap\n");
+	/* Fill in the information about format */
+	*fmt = vpfe_dev->fmt;
+	return ret;
+}
+
+static int vpfe_enum_fmt_vid_cap(struct file *file, void  *priv,
+				   struct v4l2_fmtdesc *fmt)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	const struct vpfe_pixel_format *pix_fmt;
+	int temp_index;
+	u32 pix;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt_vid_cap\n");
+
+	if (ccdc_dev->hw_ops.enum_pix(&pix, fmt->index) < 0)
+		return -EINVAL;
+
+	/* Fill in the information about format */
+	pix_fmt = vpfe_lookup_pix_format(pix);
+	if (NULL != pix_fmt) {
+		temp_index = fmt->index;
+		*fmt = pix_fmt->fmtdesc;
+		fmt->index = temp_index;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int vpfe_s_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *fmt)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	const struct vpfe_pixel_format *pix_fmts;
+	int ret = 0;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt_vid_cap\n");
+
+	/* If streaming is started, return error */
+	if (vpfe_dev->started) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is started\n");
+		return -EBUSY;
+	}
+
+	/* Check for valid frame format */
+	pix_fmts = vpfe_check_format(vpfe_dev, &fmt->fmt.pix);
+
+	if (NULL == pix_fmts)
+		return -EINVAL;
+
+	/* store the pixel format in the device  object */
+	ret = mutex_lock_interruptible(&vpfe_dev->lock);
+	if (ret)
+		return ret;
+
+	/* First detach any IRQ if currently attached */
+	vpfe_detach_irq(vpfe_dev);
+	vpfe_dev->fmt = *fmt;
+	/* set image capture parameters in the ccdc */
+	ret = vpfe_config_ccdc_image_format(vpfe_dev);
+	mutex_unlock(&vpfe_dev->lock);
+	return ret;
+}
+
+static int vpfe_try_fmt_vid_cap(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	const struct vpfe_pixel_format *pix_fmts;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt_vid_cap\n");
+
+	pix_fmts = vpfe_check_format(vpfe_dev, &f->fmt.pix);
+	if (NULL == pix_fmts)
+		return -EINVAL;
+	return 0;
+}
+
+/*
+ * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
+ * given app input index
+ */
+static int vpfe_get_subdev_input_index(struct vpfe_device *vpfe_dev,
+					int *subdev_index,
+					int *subdev_input_index,
+					int app_input_index)
+{
+	struct vpfe_config *cfg = vpfe_dev->cfg;
+	struct vpfe_subdev_info *sdinfo;
+	int i, j = 0;
+
+	for (i = 0; i < cfg->num_subdevs; i++) {
+		sdinfo = &cfg->sub_devs[i];
+		if (app_input_index < (j + sdinfo->num_inputs)) {
+			*subdev_index = i;
+			*subdev_input_index = app_input_index - j;
+			return 0;
+		}
+		j += sdinfo->num_inputs;
+	}
+	return -EINVAL;
+}
+
+/*
+ * vpfe_get_app_input - Get app input index for a given subdev input index
+ * driver stores the input index of the current sub device and translate it
+ * when application request the current input
+ */
+static int vpfe_get_app_input_index(struct vpfe_device *vpfe_dev,
+				    int *app_input_index)
+{
+	struct vpfe_config *cfg = vpfe_dev->cfg;
+	struct vpfe_subdev_info *sdinfo;
+	int i, j = 0;
+
+	for (i = 0; i < cfg->num_subdevs; i++) {
+		sdinfo = &cfg->sub_devs[i];
+		if (!strcmp(sdinfo->name, vpfe_dev->current_subdev->name)) {
+			if (vpfe_dev->current_input >= sdinfo->num_inputs)
+				return -1;
+			*app_input_index = j + vpfe_dev->current_input;
+			return 0;
+		}
+		j += sdinfo->num_inputs;
+	}
+	return -EINVAL;
+}
+
+static int vpfe_enum_input(struct file *file, void *priv,
+				 struct v4l2_input *inp)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	struct vpfe_subdev_info *sdinfo;
+	int subdev, index ;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_input\n");
+
+	if (vpfe_get_subdev_input_index(vpfe_dev,
+					&subdev,
+					&index,
+					inp->index) < 0) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "input information not found"
+			 " for the subdev\n");
+		return -EINVAL;
+	}
+	sdinfo = &vpfe_dev->cfg->sub_devs[subdev];
+	memcpy(inp, &sdinfo->inputs[index], sizeof(struct v4l2_input));
+	return 0;
+}
+
+static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_input\n");
+
+	return vpfe_get_app_input_index(vpfe_dev, index);
+}
+
+
+static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	struct vpfe_subdev_info *sdinfo;
+	int subdev_index, inp_index;
+	struct vpfe_route *route;
+	u32 input = 0, output = 0;
+	int ret = -EINVAL;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
+
+	ret = mutex_lock_interruptible(&vpfe_dev->lock);
+	if (ret)
+		return ret;
+
+	/*
+	 * If streaming is started return device busy
+	 * error
+	 */
+	if (vpfe_dev->started) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is on\n");
+		ret = -EBUSY;
+		goto unlock_out;
+	}
+
+	if (vpfe_get_subdev_input_index(vpfe_dev,
+					&subdev_index,
+					&inp_index,
+					index) < 0) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "invalid input index\n");
+		goto unlock_out;
+	}
+
+	sdinfo = &vpfe_dev->cfg->sub_devs[subdev_index];
+	route = &sdinfo->routes[inp_index];
+	if (route && sdinfo->can_route) {
+		input = route->input;
+		output = route->output;
+	}
+
+	ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+					 video, s_routing, input, output, 0);
+
+	if (ret) {
+		v4l2_err(&vpfe_dev->v4l2_dev,
+			"vpfe_doioctl:error in setting input in decoder\n");
+		ret = -EINVAL;
+		goto unlock_out;
+	}
+	vpfe_dev->current_subdev = sdinfo;
+	vpfe_dev->current_input = index;
+	vpfe_dev->std_index = 0;
+
+	/* set the bus/interface parameter for the sub device in ccdc */
+	ret = ccdc_dev->hw_ops.set_hw_if_params(&sdinfo->ccdc_if_params);
+	if (ret)
+		goto unlock_out;
+
+	/* set the default image parameters in the device */
+	ret = vpfe_config_image_format(vpfe_dev,
+				&vpfe_standards[vpfe_dev->std_index].std_id);
+unlock_out:
+	mutex_unlock(&vpfe_dev->lock);
+	return ret;
+}
+
+static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	struct vpfe_subdev_info *sdinfo;
+	int ret = 0;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n");
+
+	ret = mutex_lock_interruptible(&vpfe_dev->lock);
+	sdinfo = vpfe_dev->current_subdev;
+	if (ret)
+		return ret;
+	/* Call querystd function of decoder device */
+	ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+					 video, querystd, std_id);
+	mutex_unlock(&vpfe_dev->lock);
+	return ret;
+}
+
+static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	struct vpfe_subdev_info *sdinfo;
+	int ret = 0;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
+
+	/* Call decoder driver function to set the standard */
+	ret = mutex_lock_interruptible(&vpfe_dev->lock);
+	if (ret)
+		return ret;
+
+	sdinfo = vpfe_dev->current_subdev;
+	/* If streaming is started, return device busy error */
+	if (vpfe_dev->started) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "streaming is started\n");
+		ret = -EBUSY;
+		goto unlock_out;
+	}
+
+	ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+					 core, s_std, *std_id);
+	if (ret < 0) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "Failed to set standard\n");
+		goto unlock_out;
+	}
+	ret = vpfe_config_image_format(vpfe_dev, std_id);
+
+unlock_out:
+	mutex_unlock(&vpfe_dev->lock);
+	return ret;
+}
+
+static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_std\n");
+
+	*std_id = vpfe_standards[vpfe_dev->std_index].std_id;
+	return 0;
+}
+/*
+ *  Videobuf operations
+ */
+static int vpfe_videobuf_setup(struct videobuf_queue *vq,
+				unsigned int *count,
+				unsigned int *size)
+{
+	struct vpfe_fh *fh = vq->priv_data;
+	struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_setup\n");
+	*size = vpfe_dev->fmt.fmt.pix.sizeimage;
+	if (vpfe_dev->memory == V4L2_MEMORY_MMAP &&
+		vpfe_dev->fmt.fmt.pix.sizeimage > config_params.device_bufsize)
+		*size = config_params.device_bufsize;
+
+	if (*count < config_params.min_numbuffers)
+		*count = config_params.min_numbuffers;
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+		"count=%d, size=%d\n", *count, *size);
+	return 0;
+}
+
+static int vpfe_videobuf_prepare(struct videobuf_queue *vq,
+				struct videobuf_buffer *vb,
+				enum v4l2_field field)
+{
+	struct vpfe_fh *fh = vq->priv_data;
+	struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+	unsigned long addr;
+	int ret;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
+
+	/* If buffer is not initialized, initialize it */
+	if (VIDEOBUF_NEEDS_INIT == vb->state) {
+		vb->width = vpfe_dev->fmt.fmt.pix.width;
+		vb->height = vpfe_dev->fmt.fmt.pix.height;
+		vb->size = vpfe_dev->fmt.fmt.pix.sizeimage;
+		vb->field = field;
+
+		ret = videobuf_iolock(vq, vb, NULL);
+		if (ret < 0)
+			return ret;
+
+		addr = videobuf_to_dma_contig(vb);
+		/* Make sure user addresses are aligned to 32 bytes */
+		if (!ALIGN(addr, 32))
+			return -EINVAL;
+
+		vb->state = VIDEOBUF_PREPARED;
+	}
+	return 0;
+}
+
+static void vpfe_videobuf_queue(struct videobuf_queue *vq,
+				struct videobuf_buffer *vb)
+{
+	/* Get the file handle object and device object */
+	struct vpfe_fh *fh = vq->priv_data;
+	struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+	unsigned long flags;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_queue\n");
+
+	/* add the buffer to the DMA queue */
+	spin_lock_irqsave(&vpfe_dev->dma_queue_lock, flags);
+	list_add_tail(&vb->queue, &vpfe_dev->dma_queue);
+	spin_unlock_irqrestore(&vpfe_dev->dma_queue_lock, flags);
+
+	/* Change state of the buffer */
+	vb->state = VIDEOBUF_QUEUED;
+}
+
+static void vpfe_videobuf_release(struct videobuf_queue *vq,
+				  struct videobuf_buffer *vb)
+{
+	struct vpfe_fh *fh = vq->priv_data;
+	struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+	unsigned long flags;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_videobuf_release\n");
+
+	/*
+	 * We need to flush the buffer from the dma queue since
+	 * they are de-allocated
+	 */
+	spin_lock_irqsave(&vpfe_dev->dma_queue_lock, flags);
+	INIT_LIST_HEAD(&vpfe_dev->dma_queue);
+	spin_unlock_irqrestore(&vpfe_dev->dma_queue_lock, flags);
+	videobuf_dma_contig_free(vq, vb);
+	vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static struct videobuf_queue_ops vpfe_videobuf_qops = {
+	.buf_setup      = vpfe_videobuf_setup,
+	.buf_prepare    = vpfe_videobuf_prepare,
+	.buf_queue      = vpfe_videobuf_queue,
+	.buf_release    = vpfe_videobuf_release,
+};
+
+/*
+ * vpfe_reqbufs. currently support REQBUF only once opening
+ * the device.
+ */
+static int vpfe_reqbufs(struct file *file, void *priv,
+			struct v4l2_requestbuffers *req_buf)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	struct vpfe_fh *fh = file->private_data;
+	int ret = 0;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
+
+	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != req_buf->type) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n");
+		return -EINVAL;
+	}
+
+	ret = mutex_lock_interruptible(&vpfe_dev->lock);
+	if (ret)
+		return ret;
+
+	if (vpfe_dev->io_usrs != 0) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
+		ret = -EBUSY;
+		goto unlock_out;
+	}
+
+	vpfe_dev->memory = req_buf->memory;
+	videobuf_queue_dma_contig_init(&vpfe_dev->buffer_queue,
+				&vpfe_videobuf_qops,
+				vpfe_dev->pdev,
+				&vpfe_dev->irqlock,
+				req_buf->type,
+				vpfe_dev->fmt.fmt.pix.field,
+				sizeof(struct videobuf_buffer),
+				fh, NULL);
+
+	fh->io_allowed = 1;
+	vpfe_dev->io_usrs = 1;
+	INIT_LIST_HEAD(&vpfe_dev->dma_queue);
+	ret = videobuf_reqbufs(&vpfe_dev->buffer_queue, req_buf);
+unlock_out:
+	mutex_unlock(&vpfe_dev->lock);
+	return ret;
+}
+
+static int vpfe_querybuf(struct file *file, void *priv,
+			 struct v4l2_buffer *buf)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n");
+
+	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+		return  -EINVAL;
+	}
+
+	if (vpfe_dev->memory != V4L2_MEMORY_MMAP) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "Invalid memory\n");
+		return -EINVAL;
+	}
+	/* Call videobuf_querybuf to get information */
+	return videobuf_querybuf(&vpfe_dev->buffer_queue, buf);
+}
+
+static int vpfe_qbuf(struct file *file, void *priv,
+		     struct v4l2_buffer *p)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	struct vpfe_fh *fh = file->private_data;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n");
+
+	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != p->type) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * If this file handle is not allowed to do IO,
+	 * return error
+	 */
+	if (!fh->io_allowed) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+		return -EACCES;
+	}
+	return videobuf_qbuf(&vpfe_dev->buffer_queue, p);
+}
+
+static int vpfe_dqbuf(struct file *file, void *priv,
+		      struct v4l2_buffer *buf)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n");
+
+	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+		return -EINVAL;
+	}
+	return videobuf_dqbuf(&vpfe_dev->buffer_queue,
+				      buf, file->f_flags & O_NONBLOCK);
+}
+
+static int vpfe_queryctrl(struct file *file, void *priv,
+		struct v4l2_queryctrl *qctrl)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	struct vpfe_subdev_info *sdinfo;
+
+	sdinfo = vpfe_dev->current_subdev;
+
+	return v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+					 core, queryctrl, qctrl);
+
+}
+
+static int vpfe_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	struct vpfe_subdev_info *sdinfo;
+
+	sdinfo = vpfe_dev->current_subdev;
+
+	return v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+					 core, g_ctrl, ctrl);
+}
+
+static int vpfe_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	struct vpfe_subdev_info *sdinfo;
+
+	sdinfo = vpfe_dev->current_subdev;
+
+	return v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+					 core, s_ctrl, ctrl);
+}
+
+/*
+ * vpfe_calculate_offsets : This function calculates buffers offset
+ * for top and bottom field
+ */
+static void vpfe_calculate_offsets(struct vpfe_device *vpfe_dev)
+{
+	struct v4l2_rect image_win;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_calculate_offsets\n");
+
+	ccdc_dev->hw_ops.get_image_window(&image_win);
+	vpfe_dev->field_off = image_win.height * image_win.width;
+}
+
+/* vpfe_start_ccdc_capture: start streaming in ccdc/isif */
+static void vpfe_start_ccdc_capture(struct vpfe_device *vpfe_dev)
+{
+	ccdc_dev->hw_ops.enable(1);
+	if (ccdc_dev->hw_ops.enable_out_to_sdram)
+		ccdc_dev->hw_ops.enable_out_to_sdram(1);
+	vpfe_dev->started = 1;
+}
+
+/*
+ * vpfe_streamon. Assume the DMA queue is not empty.
+ * application is expected to call QBUF before calling
+ * this ioctl. If not, driver returns error
+ */
+static int vpfe_streamon(struct file *file, void *priv,
+			 enum v4l2_buf_type buf_type)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	struct vpfe_fh *fh = file->private_data;
+	struct vpfe_subdev_info *sdinfo;
+	unsigned long addr;
+	int ret = 0;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
+
+	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+		return -EINVAL;
+	}
+
+	/* If file handle is not allowed IO, return error */
+	if (!fh->io_allowed) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+		return -EACCES;
+	}
+
+	sdinfo = vpfe_dev->current_subdev;
+	ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+					video, s_stream, 1);
+
+	if (ret && (ret != -ENOIOCTLCMD)) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "stream on failed in subdev\n");
+		return -EINVAL;
+	}
+
+	/* If buffer queue is empty, return error */
+	if (list_empty(&vpfe_dev->buffer_queue.stream)) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "buffer queue is empty\n");
+		return -EIO;
+	}
+
+	/* Call videobuf_streamon to start streaming * in videobuf */
+	ret = videobuf_streamon(&vpfe_dev->buffer_queue);
+	if (ret)
+		return ret;
+
+
+	ret = mutex_lock_interruptible(&vpfe_dev->lock);
+	if (ret)
+		goto streamoff;
+	/* Get the next frame from the buffer queue */
+	vpfe_dev->next_frm = list_entry(vpfe_dev->dma_queue.next,
+					struct videobuf_buffer, queue);
+	vpfe_dev->cur_frm = vpfe_dev->next_frm;
+	/* Remove buffer from the buffer queue */
+	list_del(&vpfe_dev->cur_frm->queue);
+	/* Mark state of the current frame to active */
+	vpfe_dev->cur_frm->state = VIDEOBUF_ACTIVE;
+	/* Initialize field_id and started member */
+	vpfe_dev->field_id = 0;
+	addr = videobuf_to_dma_contig(vpfe_dev->cur_frm);
+
+	/* Calculate field offset */
+	vpfe_calculate_offsets(vpfe_dev);
+
+	if (vpfe_attach_irq(vpfe_dev) < 0) {
+		v4l2_err(&vpfe_dev->v4l2_dev,
+			 "Error in attaching interrupt handle\n");
+		ret = -EFAULT;
+		goto unlock_out;
+	}
+	if (ccdc_dev->hw_ops.configure() < 0) {
+		v4l2_err(&vpfe_dev->v4l2_dev,
+			 "Error in configuring ccdc\n");
+		ret = -EINVAL;
+		goto unlock_out;
+	}
+	ccdc_dev->hw_ops.setfbaddr((unsigned long)(addr));
+	vpfe_start_ccdc_capture(vpfe_dev);
+	mutex_unlock(&vpfe_dev->lock);
+	return ret;
+unlock_out:
+	mutex_unlock(&vpfe_dev->lock);
+streamoff:
+	ret = videobuf_streamoff(&vpfe_dev->buffer_queue);
+	return ret;
+}
+
+static int vpfe_streamoff(struct file *file, void *priv,
+			  enum v4l2_buf_type buf_type)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	struct vpfe_fh *fh = file->private_data;
+	struct vpfe_subdev_info *sdinfo;
+	int ret = 0;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n");
+
+	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
+		return -EINVAL;
+	}
+
+	/* If io is allowed for this file handle, return error */
+	if (!fh->io_allowed) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
+		return -EACCES;
+	}
+
+	/* If streaming is not started, return error */
+	if (!vpfe_dev->started) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "device started\n");
+		return -EINVAL;
+	}
+
+	ret = mutex_lock_interruptible(&vpfe_dev->lock);
+	if (ret)
+		return ret;
+
+	vpfe_stop_ccdc_capture(vpfe_dev);
+	vpfe_detach_irq(vpfe_dev);
+
+	sdinfo = vpfe_dev->current_subdev;
+	ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
+					video, s_stream, 0);
+
+	if (ret && (ret != -ENOIOCTLCMD))
+		v4l2_err(&vpfe_dev->v4l2_dev, "stream off failed in subdev\n");
+	ret = videobuf_streamoff(&vpfe_dev->buffer_queue);
+	mutex_unlock(&vpfe_dev->lock);
+	return ret;
+}
+
+static int vpfe_cropcap(struct file *file, void *priv,
+			      struct v4l2_cropcap *crop)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_cropcap\n");
+
+	if (vpfe_dev->std_index >= ARRAY_SIZE(vpfe_standards))
+		return -EINVAL;
+
+	memset(crop, 0, sizeof(struct v4l2_cropcap));
+	crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	crop->bounds.width = crop->defrect.width =
+		vpfe_standards[vpfe_dev->std_index].width;
+	crop->bounds.height = crop->defrect.height =
+		vpfe_standards[vpfe_dev->std_index].height;
+	crop->pixelaspect = vpfe_standards[vpfe_dev->std_index].pixelaspect;
+	return 0;
+}
+
+static int vpfe_g_crop(struct file *file, void *priv,
+			     struct v4l2_crop *crop)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_crop\n");
+
+	crop->c = vpfe_dev->crop;
+	return 0;
+}
+
+static int vpfe_s_crop(struct file *file, void *priv,
+			     struct v4l2_crop *crop)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	int ret = 0;
+
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_crop\n");
+
+	if (vpfe_dev->started) {
+		/* make sure streaming is not started */
+		v4l2_err(&vpfe_dev->v4l2_dev,
+			"Cannot change crop when streaming is ON\n");
+		return -EBUSY;
+	}
+
+	ret = mutex_lock_interruptible(&vpfe_dev->lock);
+	if (ret)
+		return ret;
+
+	if (crop->c.top < 0 || crop->c.left < 0) {
+		v4l2_err(&vpfe_dev->v4l2_dev,
+			"doesn't support negative values for top & left\n");
+		ret = -EINVAL;
+		goto unlock_out;
+	}
+
+	/* adjust the width to 16 pixel boundary */
+	crop->c.width = ((crop->c.width + 15) & ~0xf);
+
+	/* make sure parameters are valid */
+	if ((crop->c.left + crop->c.width >
+		vpfe_dev->std_info.active_pixels) ||
+	    (crop->c.top + crop->c.height >
+		vpfe_dev->std_info.active_lines)) {
+		v4l2_err(&vpfe_dev->v4l2_dev, "Error in S_CROP params\n");
+		ret = -EINVAL;
+		goto unlock_out;
+	}
+	ccdc_dev->hw_ops.set_image_window(&crop->c);
+	vpfe_dev->fmt.fmt.pix.width = crop->c.width;
+	vpfe_dev->fmt.fmt.pix.height = crop->c.height;
+	vpfe_dev->fmt.fmt.pix.bytesperline =
+		ccdc_dev->hw_ops.get_line_length();
+	vpfe_dev->fmt.fmt.pix.sizeimage =
+		vpfe_dev->fmt.fmt.pix.bytesperline *
+		vpfe_dev->fmt.fmt.pix.height;
+	vpfe_dev->crop = crop->c;
+unlock_out:
+	mutex_unlock(&vpfe_dev->lock);
+	return ret;
+}
+
+
+static long vpfe_param_handler(struct file *file, void *priv,
+		bool valid_prio, int cmd, void *param)
+{
+	struct vpfe_device *vpfe_dev = video_drvdata(file);
+	int ret = 0;
+
+	v4l2_dbg(2, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n");
+
+	if (vpfe_dev->started) {
+		/* only allowed if streaming is not started */
+		v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+			"device already started\n");
+		return -EBUSY;
+	}
+
+	ret = mutex_lock_interruptible(&vpfe_dev->lock);
+	if (ret)
+		return ret;
+
+	switch (cmd) {
+	case VPFE_CMD_S_CCDC_RAW_PARAMS:
+		v4l2_warn(&vpfe_dev->v4l2_dev,
+			  "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
+		if (ccdc_dev->hw_ops.set_params) {
+			ret = ccdc_dev->hw_ops.set_params(param);
+			if (ret) {
+				v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+					"Error setting parameters in CCDC\n");
+				goto unlock_out;
+			}
+			if (vpfe_get_ccdc_image_format(vpfe_dev,
+						       &vpfe_dev->fmt) < 0) {
+				v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+					"Invalid image format at CCDC\n");
+				goto unlock_out;
+			}
+		} else {
+			ret = -EINVAL;
+			v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+				"VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
+		}
+		break;
+	default:
+		ret = -ENOTTY;
+	}
+unlock_out:
+	mutex_unlock(&vpfe_dev->lock);
+	return ret;
+}
+
+
+/* vpfe capture ioctl operations */
+static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
+	.vidioc_querycap	 = vpfe_querycap,
+	.vidioc_g_fmt_vid_cap    = vpfe_g_fmt_vid_cap,
+	.vidioc_enum_fmt_vid_cap = vpfe_enum_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap    = vpfe_s_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap  = vpfe_try_fmt_vid_cap,
+	.vidioc_enum_input	 = vpfe_enum_input,
+	.vidioc_g_input		 = vpfe_g_input,
+	.vidioc_s_input		 = vpfe_s_input,
+	.vidioc_querystd	 = vpfe_querystd,
+	.vidioc_s_std		 = vpfe_s_std,
+	.vidioc_g_std		 = vpfe_g_std,
+	.vidioc_queryctrl	 = vpfe_queryctrl,
+	.vidioc_g_ctrl		 = vpfe_g_ctrl,
+	.vidioc_s_ctrl		 = vpfe_s_ctrl,
+	.vidioc_reqbufs		 = vpfe_reqbufs,
+	.vidioc_querybuf	 = vpfe_querybuf,
+	.vidioc_qbuf		 = vpfe_qbuf,
+	.vidioc_dqbuf		 = vpfe_dqbuf,
+	.vidioc_streamon	 = vpfe_streamon,
+	.vidioc_streamoff	 = vpfe_streamoff,
+	.vidioc_cropcap		 = vpfe_cropcap,
+	.vidioc_g_crop		 = vpfe_g_crop,
+	.vidioc_s_crop		 = vpfe_s_crop,
+	.vidioc_default		 = vpfe_param_handler,
+};
+
+static struct vpfe_device *vpfe_initialize(void)
+{
+	struct vpfe_device *vpfe_dev;
+
+	/* Default number of buffers should be 3 */
+	if ((numbuffers > 0) &&
+	    (numbuffers < config_params.min_numbuffers))
+		numbuffers = config_params.min_numbuffers;
+
+	/*
+	 * Set buffer size to min buffers size if invalid buffer size is
+	 * given
+	 */
+	if (bufsize < config_params.min_bufsize)
+		bufsize = config_params.min_bufsize;
+
+	config_params.numbuffers = numbuffers;
+
+	if (numbuffers)
+		config_params.device_bufsize = bufsize;
+
+	/* Allocate memory for device objects */
+	vpfe_dev = kzalloc(sizeof(*vpfe_dev), GFP_KERNEL);
+
+	return vpfe_dev;
+}
+
+/*
+ * vpfe_probe : This function creates device entries by register
+ * itself to the V4L2 driver and initializes fields of each
+ * device objects
+ */
+static __devinit int vpfe_probe(struct platform_device *pdev)
+{
+	struct vpfe_subdev_info *sdinfo;
+	struct vpfe_config *vpfe_cfg;
+	struct resource *res1;
+	struct vpfe_device *vpfe_dev;
+	struct i2c_adapter *i2c_adap;
+	struct video_device *vfd;
+	int ret = -ENOMEM, i, j;
+	int num_subdevs = 0;
+
+	/* Get the pointer to the device object */
+	vpfe_dev = vpfe_initialize();
+
+	if (!vpfe_dev) {
+		v4l2_err(pdev->dev.driver,
+			"Failed to allocate memory for vpfe_dev\n");
+		return ret;
+	}
+
+	vpfe_dev->pdev = &pdev->dev;
+
+	if (NULL == pdev->dev.platform_data) {
+		v4l2_err(pdev->dev.driver, "Unable to get vpfe config\n");
+		ret = -ENODEV;
+		goto probe_free_dev_mem;
+	}
+
+	vpfe_cfg = pdev->dev.platform_data;
+	vpfe_dev->cfg = vpfe_cfg;
+	if (NULL == vpfe_cfg->ccdc ||
+	    NULL == vpfe_cfg->card_name ||
+	    NULL == vpfe_cfg->sub_devs) {
+		v4l2_err(pdev->dev.driver, "null ptr in vpfe_cfg\n");
+		ret = -ENOENT;
+		goto probe_free_dev_mem;
+	}
+
+	/* Allocate memory for ccdc configuration */
+	ccdc_cfg = kmalloc(sizeof(struct ccdc_config), GFP_KERNEL);
+	if (NULL == ccdc_cfg) {
+		v4l2_err(pdev->dev.driver,
+			 "Memory allocation failed for ccdc_cfg\n");
+		goto probe_free_lock;
+	}
+
+	mutex_lock(&ccdc_lock);
+
+	strncpy(ccdc_cfg->name, vpfe_cfg->ccdc, 32);
+	/* Get VINT0 irq resource */
+	res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res1) {
+		v4l2_err(pdev->dev.driver,
+			 "Unable to get interrupt for VINT0\n");
+		ret = -ENODEV;
+		goto probe_free_ccdc_cfg_mem;
+	}
+	vpfe_dev->ccdc_irq0 = res1->start;
+
+	/* Get VINT1 irq resource */
+	res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+	if (!res1) {
+		v4l2_err(pdev->dev.driver,
+			 "Unable to get interrupt for VINT1\n");
+		ret = -ENODEV;
+		goto probe_free_ccdc_cfg_mem;
+	}
+	vpfe_dev->ccdc_irq1 = res1->start;
+
+	ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, IRQF_DISABLED,
+			  "vpfe_capture0", vpfe_dev);
+
+	if (0 != ret) {
+		v4l2_err(pdev->dev.driver, "Unable to request interrupt\n");
+		goto probe_free_ccdc_cfg_mem;
+	}
+
+	/* Allocate memory for video device */
+	vfd = video_device_alloc();
+	if (NULL == vfd) {
+		ret = -ENOMEM;
+		v4l2_err(pdev->dev.driver, "Unable to alloc video device\n");
+		goto probe_out_release_irq;
+	}
+
+	/* Initialize field of video device */
+	vfd->release		= video_device_release;
+	vfd->fops		= &vpfe_fops;
+	vfd->ioctl_ops		= &vpfe_ioctl_ops;
+	vfd->tvnorms		= 0;
+	vfd->current_norm	= V4L2_STD_PAL;
+	vfd->v4l2_dev 		= &vpfe_dev->v4l2_dev;
+	snprintf(vfd->name, sizeof(vfd->name),
+		 "%s_V%d.%d.%d",
+		 CAPTURE_DRV_NAME,
+		 (VPFE_CAPTURE_VERSION_CODE >> 16) & 0xff,
+		 (VPFE_CAPTURE_VERSION_CODE >> 8) & 0xff,
+		 (VPFE_CAPTURE_VERSION_CODE) & 0xff);
+	/* Set video_dev to the video device */
+	vpfe_dev->video_dev	= vfd;
+
+	ret = v4l2_device_register(&pdev->dev, &vpfe_dev->v4l2_dev);
+	if (ret) {
+		v4l2_err(pdev->dev.driver,
+			"Unable to register v4l2 device.\n");
+		goto probe_out_video_release;
+	}
+	v4l2_info(&vpfe_dev->v4l2_dev, "v4l2 device registered\n");
+	spin_lock_init(&vpfe_dev->irqlock);
+	spin_lock_init(&vpfe_dev->dma_queue_lock);
+	mutex_init(&vpfe_dev->lock);
+
+	/* Initialize field of the device objects */
+	vpfe_dev->numbuffers = config_params.numbuffers;
+
+	/* Initialize prio member of device object */
+	v4l2_prio_init(&vpfe_dev->prio);
+	/* register video device */
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+		"trying to register vpfe device.\n");
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+		"video_dev=%x\n", (int)&vpfe_dev->video_dev);
+	vpfe_dev->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	ret = video_register_device(vpfe_dev->video_dev,
+				    VFL_TYPE_GRABBER, -1);
+
+	if (ret) {
+		v4l2_err(pdev->dev.driver,
+			"Unable to register video device.\n");
+		goto probe_out_v4l2_unregister;
+	}
+
+	v4l2_info(&vpfe_dev->v4l2_dev, "video device registered\n");
+	/* set the driver data in platform device */
+	platform_set_drvdata(pdev, vpfe_dev);
+	/* set driver private data */
+	video_set_drvdata(vpfe_dev->video_dev, vpfe_dev);
+	i2c_adap = i2c_get_adapter(vpfe_cfg->i2c_adapter_id);
+	num_subdevs = vpfe_cfg->num_subdevs;
+	vpfe_dev->sd = kmalloc(sizeof(struct v4l2_subdev *) * num_subdevs,
+				GFP_KERNEL);
+	if (NULL == vpfe_dev->sd) {
+		v4l2_err(&vpfe_dev->v4l2_dev,
+			"unable to allocate memory for subdevice pointers\n");
+		ret = -ENOMEM;
+		goto probe_out_video_unregister;
+	}
+
+	for (i = 0; i < num_subdevs; i++) {
+		struct v4l2_input *inps;
+
+		sdinfo = &vpfe_cfg->sub_devs[i];
+
+		/* Load up the subdevice */
+		vpfe_dev->sd[i] =
+			v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev,
+						  i2c_adap,
+						  &sdinfo->board_info,
+						  NULL);
+		if (vpfe_dev->sd[i]) {
+			v4l2_info(&vpfe_dev->v4l2_dev,
+				  "v4l2 sub device %s registered\n",
+				  sdinfo->name);
+			vpfe_dev->sd[i]->grp_id = sdinfo->grp_id;
+			/* update tvnorms from the sub devices */
+			for (j = 0; j < sdinfo->num_inputs; j++) {
+				inps = &sdinfo->inputs[j];
+				vfd->tvnorms |= inps->std;
+			}
+		} else {
+			v4l2_info(&vpfe_dev->v4l2_dev,
+				  "v4l2 sub device %s register fails\n",
+				  sdinfo->name);
+			goto probe_sd_out;
+		}
+	}
+
+	/* set first sub device as current one */
+	vpfe_dev->current_subdev = &vpfe_cfg->sub_devs[0];
+
+	/* We have at least one sub device to work with */
+	mutex_unlock(&ccdc_lock);
+	return 0;
+
+probe_sd_out:
+	kfree(vpfe_dev->sd);
+probe_out_video_unregister:
+	video_unregister_device(vpfe_dev->video_dev);
+probe_out_v4l2_unregister:
+	v4l2_device_unregister(&vpfe_dev->v4l2_dev);
+probe_out_video_release:
+	if (!video_is_registered(vpfe_dev->video_dev))
+		video_device_release(vpfe_dev->video_dev);
+probe_out_release_irq:
+	free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
+probe_free_ccdc_cfg_mem:
+	kfree(ccdc_cfg);
+probe_free_lock:
+	mutex_unlock(&ccdc_lock);
+probe_free_dev_mem:
+	kfree(vpfe_dev);
+	return ret;
+}
+
+/*
+ * vpfe_remove : It un-register device from V4L2 driver
+ */
+static int __devexit vpfe_remove(struct platform_device *pdev)
+{
+	struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
+
+	v4l2_info(pdev->dev.driver, "vpfe_remove\n");
+
+	free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
+	kfree(vpfe_dev->sd);
+	v4l2_device_unregister(&vpfe_dev->v4l2_dev);
+	video_unregister_device(vpfe_dev->video_dev);
+	kfree(vpfe_dev);
+	kfree(ccdc_cfg);
+	return 0;
+}
+
+static int vpfe_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int vpfe_resume(struct device *dev)
+{
+	return 0;
+}
+
+static const struct dev_pm_ops vpfe_dev_pm_ops = {
+	.suspend = vpfe_suspend,
+	.resume = vpfe_resume,
+};
+
+static struct platform_driver vpfe_driver = {
+	.driver = {
+		.name = CAPTURE_DRV_NAME,
+		.owner = THIS_MODULE,
+		.pm = &vpfe_dev_pm_ops,
+	},
+	.probe = vpfe_probe,
+	.remove = __devexit_p(vpfe_remove),
+};
+
+module_platform_driver(vpfe_driver);
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
new file mode 100644
index 0000000..9bd3caa
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif.c
@@ -0,0 +1,514 @@
+/*
+ * vpif - Video Port Interface driver
+ * VPIF is a receiver and transmitter for video data. It has two channels(0, 1)
+ * that receiveing video byte stream and two channels(2, 3) for video output.
+ * The hardware supports SDTV, HDTV formats, raw data capture.
+ * Currently, the driver supports NTSC and PAL standards.
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <mach/hardware.h>
+
+#include "vpif.h"
+
+MODULE_DESCRIPTION("TI DaVinci Video Port Interface driver");
+MODULE_LICENSE("GPL");
+
+#define VPIF_CH0_MAX_MODES	(22)
+#define VPIF_CH1_MAX_MODES	(02)
+#define VPIF_CH2_MAX_MODES	(15)
+#define VPIF_CH3_MAX_MODES	(02)
+
+static resource_size_t	res_len;
+static struct resource	*res;
+spinlock_t vpif_lock;
+
+void __iomem *vpif_base;
+struct clk *vpif_clk;
+
+/**
+ * ch_params: video standard configuration parameters for vpif
+ * The table must include all presets from supported subdevices.
+ */
+const struct vpif_channel_config_params ch_params[] = {
+	/* HDTV formats */
+	{
+		.name = "480p59_94",
+		.width = 720,
+		.height = 480,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 138-8,
+		.sav2eav = 720,
+		.l1 = 1,
+		.l3 = 43,
+		.l5 = 523,
+		.vsize = 525,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_480P59_94,
+	},
+	{
+		.name = "576p50",
+		.width = 720,
+		.height = 576,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 144-8,
+		.sav2eav = 720,
+		.l1 = 1,
+		.l3 = 45,
+		.l5 = 621,
+		.vsize = 625,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_576P50,
+	},
+	{
+		.name = "720p50",
+		.width = 1280,
+		.height = 720,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 700-8,
+		.sav2eav = 1280,
+		.l1 = 1,
+		.l3 = 26,
+		.l5 = 746,
+		.vsize = 750,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_720P50,
+	},
+	{
+		.name = "720p60",
+		.width = 1280,
+		.height = 720,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 370 - 8,
+		.sav2eav = 1280,
+		.l1 = 1,
+		.l3 = 26,
+		.l5 = 746,
+		.vsize = 750,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_720P60,
+	},
+	{
+		.name = "1080I50",
+		.width = 1920,
+		.height = 1080,
+		.frm_fmt = 0,
+		.ycmux_mode = 0,
+		.eav2sav = 720 - 8,
+		.sav2eav = 1920,
+		.l1 = 1,
+		.l3 = 21,
+		.l5 = 561,
+		.l7 = 563,
+		.l9 = 584,
+		.l11 = 1124,
+		.vsize = 1125,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_1080I50,
+	},
+	{
+		.name = "1080I60",
+		.width = 1920,
+		.height = 1080,
+		.frm_fmt = 0,
+		.ycmux_mode = 0,
+		.eav2sav = 280 - 8,
+		.sav2eav = 1920,
+		.l1 = 1,
+		.l3 = 21,
+		.l5 = 561,
+		.l7 = 563,
+		.l9 = 584,
+		.l11 = 1124,
+		.vsize = 1125,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_1080I60,
+	},
+	{
+		.name = "1080p60",
+		.width = 1920,
+		.height = 1080,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 280 - 8,
+		.sav2eav = 1920,
+		.l1 = 1,
+		.l3 = 42,
+		.l5 = 1122,
+		.vsize = 1125,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_1080P60,
+	},
+
+	/* SDTV formats */
+	{
+		.name = "NTSC_M",
+		.width = 720,
+		.height = 480,
+		.frm_fmt = 0,
+		.ycmux_mode = 1,
+		.eav2sav = 268,
+		.sav2eav = 1440,
+		.l1 = 1,
+		.l3 = 23,
+		.l5 = 263,
+		.l7 = 266,
+		.l9 = 286,
+		.l11 = 525,
+		.vsize = 525,
+		.capture_format = 0,
+		.vbi_supported = 1,
+		.hd_sd = 0,
+		.stdid = V4L2_STD_525_60,
+	},
+	{
+		.name = "PAL_BDGHIK",
+		.width = 720,
+		.height = 576,
+		.frm_fmt = 0,
+		.ycmux_mode = 1,
+		.eav2sav = 280,
+		.sav2eav = 1440,
+		.l1 = 1,
+		.l3 = 23,
+		.l5 = 311,
+		.l7 = 313,
+		.l9 = 336,
+		.l11 = 624,
+		.vsize = 625,
+		.capture_format = 0,
+		.vbi_supported = 1,
+		.hd_sd = 0,
+		.stdid = V4L2_STD_625_50,
+	},
+};
+
+const unsigned int vpif_ch_params_count = ARRAY_SIZE(ch_params);
+
+static inline void vpif_wr_bit(u32 reg, u32 bit, u32 val)
+{
+	if (val)
+		vpif_set_bit(reg, bit);
+	else
+		vpif_clr_bit(reg, bit);
+}
+
+/* This structure is used to keep track of VPIF size register's offsets */
+struct vpif_registers {
+	u32 h_cfg, v_cfg_00, v_cfg_01, v_cfg_02, v_cfg, ch_ctrl;
+	u32 line_offset, vanc0_strt, vanc0_size, vanc1_strt;
+	u32 vanc1_size, width_mask, len_mask;
+	u8 max_modes;
+};
+
+static const struct vpif_registers vpifregs[VPIF_NUM_CHANNELS] = {
+	/* Channel0 */
+	{
+		VPIF_CH0_H_CFG, VPIF_CH0_V_CFG_00, VPIF_CH0_V_CFG_01,
+		VPIF_CH0_V_CFG_02, VPIF_CH0_V_CFG_03, VPIF_CH0_CTRL,
+		VPIF_CH0_IMG_ADD_OFST, 0, 0, 0, 0, 0x1FFF, 0xFFF,
+		VPIF_CH0_MAX_MODES,
+	},
+	/* Channel1 */
+	{
+		VPIF_CH1_H_CFG, VPIF_CH1_V_CFG_00, VPIF_CH1_V_CFG_01,
+		VPIF_CH1_V_CFG_02, VPIF_CH1_V_CFG_03, VPIF_CH1_CTRL,
+		VPIF_CH1_IMG_ADD_OFST, 0, 0, 0, 0, 0x1FFF, 0xFFF,
+		VPIF_CH1_MAX_MODES,
+	},
+	/* Channel2 */
+	{
+		VPIF_CH2_H_CFG, VPIF_CH2_V_CFG_00, VPIF_CH2_V_CFG_01,
+		VPIF_CH2_V_CFG_02, VPIF_CH2_V_CFG_03, VPIF_CH2_CTRL,
+		VPIF_CH2_IMG_ADD_OFST, VPIF_CH2_VANC0_STRT, VPIF_CH2_VANC0_SIZE,
+		VPIF_CH2_VANC1_STRT, VPIF_CH2_VANC1_SIZE, 0x7FF, 0x7FF,
+		VPIF_CH2_MAX_MODES
+	},
+	/* Channel3 */
+	{
+		VPIF_CH3_H_CFG, VPIF_CH3_V_CFG_00, VPIF_CH3_V_CFG_01,
+		VPIF_CH3_V_CFG_02, VPIF_CH3_V_CFG_03, VPIF_CH3_CTRL,
+		VPIF_CH3_IMG_ADD_OFST, VPIF_CH3_VANC0_STRT, VPIF_CH3_VANC0_SIZE,
+		VPIF_CH3_VANC1_STRT, VPIF_CH3_VANC1_SIZE, 0x7FF, 0x7FF,
+		VPIF_CH3_MAX_MODES
+	},
+};
+
+/* vpif_set_mode_info:
+ * This function is used to set horizontal and vertical config parameters
+ * As per the standard in the channel, configure the values of L1, L3,
+ * L5, L7  L9, L11 in VPIF Register , also write width and height
+ */
+static void vpif_set_mode_info(const struct vpif_channel_config_params *config,
+				u8 channel_id, u8 config_channel_id)
+{
+	u32 value;
+
+	value = (config->eav2sav & vpifregs[config_channel_id].width_mask);
+	value <<= VPIF_CH_LEN_SHIFT;
+	value |= (config->sav2eav & vpifregs[config_channel_id].width_mask);
+	regw(value, vpifregs[channel_id].h_cfg);
+
+	value = (config->l1 & vpifregs[config_channel_id].len_mask);
+	value <<= VPIF_CH_LEN_SHIFT;
+	value |= (config->l3 & vpifregs[config_channel_id].len_mask);
+	regw(value, vpifregs[channel_id].v_cfg_00);
+
+	value = (config->l5 & vpifregs[config_channel_id].len_mask);
+	value <<= VPIF_CH_LEN_SHIFT;
+	value |= (config->l7 & vpifregs[config_channel_id].len_mask);
+	regw(value, vpifregs[channel_id].v_cfg_01);
+
+	value = (config->l9 & vpifregs[config_channel_id].len_mask);
+	value <<= VPIF_CH_LEN_SHIFT;
+	value |= (config->l11 & vpifregs[config_channel_id].len_mask);
+	regw(value, vpifregs[channel_id].v_cfg_02);
+
+	value = (config->vsize & vpifregs[config_channel_id].len_mask);
+	regw(value, vpifregs[channel_id].v_cfg);
+}
+
+/* config_vpif_params
+ * Function to set the parameters of a channel
+ * Mainly modifies the channel ciontrol register
+ * It sets frame format, yc mux mode
+ */
+static void config_vpif_params(struct vpif_params *vpifparams,
+				u8 channel_id, u8 found)
+{
+	const struct vpif_channel_config_params *config = &vpifparams->std_info;
+	u32 value, ch_nip, reg;
+	u8 start, end;
+	int i;
+
+	start = channel_id;
+	end = channel_id + found;
+
+	for (i = start; i < end; i++) {
+		reg = vpifregs[i].ch_ctrl;
+		if (channel_id < 2)
+			ch_nip = VPIF_CAPTURE_CH_NIP;
+		else
+			ch_nip = VPIF_DISPLAY_CH_NIP;
+
+		vpif_wr_bit(reg, ch_nip, config->frm_fmt);
+		vpif_wr_bit(reg, VPIF_CH_YC_MUX_BIT, config->ycmux_mode);
+		vpif_wr_bit(reg, VPIF_CH_INPUT_FIELD_FRAME_BIT,
+					vpifparams->video_params.storage_mode);
+
+		/* Set raster scanning SDR Format */
+		vpif_clr_bit(reg, VPIF_CH_SDR_FMT_BIT);
+		vpif_wr_bit(reg, VPIF_CH_DATA_MODE_BIT, config->capture_format);
+
+		if (channel_id > 1)	/* Set the Pixel enable bit */
+			vpif_set_bit(reg, VPIF_DISPLAY_PIX_EN_BIT);
+		else if (config->capture_format) {
+			/* Set the polarity of various pins */
+			vpif_wr_bit(reg, VPIF_CH_FID_POLARITY_BIT,
+					vpifparams->iface.fid_pol);
+			vpif_wr_bit(reg, VPIF_CH_V_VALID_POLARITY_BIT,
+					vpifparams->iface.vd_pol);
+			vpif_wr_bit(reg, VPIF_CH_H_VALID_POLARITY_BIT,
+					vpifparams->iface.hd_pol);
+
+			value = regr(reg);
+			/* Set data width */
+			value &= ~(0x3u <<
+					VPIF_CH_DATA_WIDTH_BIT);
+			value |= ((vpifparams->params.data_sz) <<
+						     VPIF_CH_DATA_WIDTH_BIT);
+			regw(value, reg);
+		}
+
+		/* Write the pitch in the driver */
+		regw((vpifparams->video_params.hpitch),
+						vpifregs[i].line_offset);
+	}
+}
+
+/* vpif_set_video_params
+ * This function is used to set video parameters in VPIF register
+ */
+int vpif_set_video_params(struct vpif_params *vpifparams, u8 channel_id)
+{
+	const struct vpif_channel_config_params *config = &vpifparams->std_info;
+	int found = 1;
+
+	vpif_set_mode_info(config, channel_id, channel_id);
+	if (!config->ycmux_mode) {
+		/* YC are on separate channels (HDTV formats) */
+		vpif_set_mode_info(config, channel_id + 1, channel_id);
+		found = 2;
+	}
+
+	config_vpif_params(vpifparams, channel_id, found);
+
+	regw(0x80, VPIF_REQ_SIZE);
+	regw(0x01, VPIF_EMULATION_CTRL);
+
+	return found;
+}
+EXPORT_SYMBOL(vpif_set_video_params);
+
+void vpif_set_vbi_display_params(struct vpif_vbi_params *vbiparams,
+				u8 channel_id)
+{
+	u32 value;
+
+	value = 0x3F8 & (vbiparams->hstart0);
+	value |= 0x3FFFFFF & ((vbiparams->vstart0) << 16);
+	regw(value, vpifregs[channel_id].vanc0_strt);
+
+	value = 0x3F8 & (vbiparams->hstart1);
+	value |= 0x3FFFFFF & ((vbiparams->vstart1) << 16);
+	regw(value, vpifregs[channel_id].vanc1_strt);
+
+	value = 0x3F8 & (vbiparams->hsize0);
+	value |= 0x3FFFFFF & ((vbiparams->vsize0) << 16);
+	regw(value, vpifregs[channel_id].vanc0_size);
+
+	value = 0x3F8 & (vbiparams->hsize1);
+	value |= 0x3FFFFFF & ((vbiparams->vsize1) << 16);
+	regw(value, vpifregs[channel_id].vanc1_size);
+
+}
+EXPORT_SYMBOL(vpif_set_vbi_display_params);
+
+int vpif_channel_getfid(u8 channel_id)
+{
+	return (regr(vpifregs[channel_id].ch_ctrl) & VPIF_CH_FID_MASK)
+					>> VPIF_CH_FID_SHIFT;
+}
+EXPORT_SYMBOL(vpif_channel_getfid);
+
+static int __devinit vpif_probe(struct platform_device *pdev)
+{
+	int status = 0;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENOENT;
+
+	res_len = resource_size(res);
+
+	res = request_mem_region(res->start, res_len, res->name);
+	if (!res)
+		return -EBUSY;
+
+	vpif_base = ioremap(res->start, res_len);
+	if (!vpif_base) {
+		status = -EBUSY;
+		goto fail;
+	}
+
+	vpif_clk = clk_get(&pdev->dev, "vpif");
+	if (IS_ERR(vpif_clk)) {
+		status = PTR_ERR(vpif_clk);
+		goto clk_fail;
+	}
+	clk_enable(vpif_clk);
+
+	spin_lock_init(&vpif_lock);
+	dev_info(&pdev->dev, "vpif probe success\n");
+	return 0;
+
+clk_fail:
+	iounmap(vpif_base);
+fail:
+	release_mem_region(res->start, res_len);
+	return status;
+}
+
+static int __devexit vpif_remove(struct platform_device *pdev)
+{
+	if (vpif_clk) {
+		clk_disable(vpif_clk);
+		clk_put(vpif_clk);
+	}
+
+	iounmap(vpif_base);
+	release_mem_region(res->start, res_len);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int vpif_suspend(struct device *dev)
+{
+	clk_disable(vpif_clk);
+	return 0;
+}
+
+static int vpif_resume(struct device *dev)
+{
+	clk_enable(vpif_clk);
+	return 0;
+}
+
+static const struct dev_pm_ops vpif_pm = {
+	.suspend        = vpif_suspend,
+	.resume         = vpif_resume,
+};
+
+#define vpif_pm_ops (&vpif_pm)
+#else
+#define vpif_pm_ops NULL
+#endif
+
+static struct platform_driver vpif_driver = {
+	.driver = {
+		.name	= "vpif",
+		.owner = THIS_MODULE,
+		.pm	= vpif_pm_ops,
+	},
+	.remove = __devexit_p(vpif_remove),
+	.probe = vpif_probe,
+};
+
+static void vpif_exit(void)
+{
+	platform_driver_unregister(&vpif_driver);
+}
+
+static int __init vpif_init(void)
+{
+	return platform_driver_register(&vpif_driver);
+}
+subsys_initcall(vpif_init);
+module_exit(vpif_exit);
+
diff --git a/drivers/media/platform/davinci/vpif.h b/drivers/media/platform/davinci/vpif.h
new file mode 100644
index 0000000..c2ce4d9
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif.h
@@ -0,0 +1,688 @@
+/*
+ * VPIF header file
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef VPIF_H
+#define VPIF_H
+
+#include <linux/io.h>
+#include <linux/videodev2.h>
+#include <media/davinci/vpif_types.h>
+
+/* Maximum channel allowed */
+#define VPIF_NUM_CHANNELS		(4)
+#define VPIF_CAPTURE_NUM_CHANNELS	(2)
+#define VPIF_DISPLAY_NUM_CHANNELS	(2)
+
+/* Macros to read/write registers */
+extern void __iomem *vpif_base;
+extern spinlock_t vpif_lock;
+
+#define regr(reg)               readl((reg) + vpif_base)
+#define regw(value, reg)        writel(value, (reg + vpif_base))
+
+/* Register Address Offsets */
+#define VPIF_PID			(0x0000)
+#define VPIF_CH0_CTRL			(0x0004)
+#define VPIF_CH1_CTRL			(0x0008)
+#define VPIF_CH2_CTRL			(0x000C)
+#define VPIF_CH3_CTRL			(0x0010)
+
+#define VPIF_INTEN			(0x0020)
+#define VPIF_INTEN_SET			(0x0024)
+#define VPIF_INTEN_CLR			(0x0028)
+#define VPIF_STATUS			(0x002C)
+#define VPIF_STATUS_CLR			(0x0030)
+#define VPIF_EMULATION_CTRL		(0x0034)
+#define VPIF_REQ_SIZE			(0x0038)
+
+#define VPIF_CH0_TOP_STRT_ADD_LUMA	(0x0040)
+#define VPIF_CH0_BTM_STRT_ADD_LUMA	(0x0044)
+#define VPIF_CH0_TOP_STRT_ADD_CHROMA	(0x0048)
+#define VPIF_CH0_BTM_STRT_ADD_CHROMA	(0x004c)
+#define VPIF_CH0_TOP_STRT_ADD_HANC	(0x0050)
+#define VPIF_CH0_BTM_STRT_ADD_HANC	(0x0054)
+#define VPIF_CH0_TOP_STRT_ADD_VANC	(0x0058)
+#define VPIF_CH0_BTM_STRT_ADD_VANC	(0x005c)
+#define VPIF_CH0_SP_CFG			(0x0060)
+#define VPIF_CH0_IMG_ADD_OFST		(0x0064)
+#define VPIF_CH0_HANC_ADD_OFST		(0x0068)
+#define VPIF_CH0_H_CFG			(0x006c)
+#define VPIF_CH0_V_CFG_00		(0x0070)
+#define VPIF_CH0_V_CFG_01		(0x0074)
+#define VPIF_CH0_V_CFG_02		(0x0078)
+#define VPIF_CH0_V_CFG_03		(0x007c)
+
+#define VPIF_CH1_TOP_STRT_ADD_LUMA	(0x0080)
+#define VPIF_CH1_BTM_STRT_ADD_LUMA	(0x0084)
+#define VPIF_CH1_TOP_STRT_ADD_CHROMA	(0x0088)
+#define VPIF_CH1_BTM_STRT_ADD_CHROMA	(0x008c)
+#define VPIF_CH1_TOP_STRT_ADD_HANC	(0x0090)
+#define VPIF_CH1_BTM_STRT_ADD_HANC	(0x0094)
+#define VPIF_CH1_TOP_STRT_ADD_VANC	(0x0098)
+#define VPIF_CH1_BTM_STRT_ADD_VANC	(0x009c)
+#define VPIF_CH1_SP_CFG			(0x00a0)
+#define VPIF_CH1_IMG_ADD_OFST		(0x00a4)
+#define VPIF_CH1_HANC_ADD_OFST		(0x00a8)
+#define VPIF_CH1_H_CFG			(0x00ac)
+#define VPIF_CH1_V_CFG_00		(0x00b0)
+#define VPIF_CH1_V_CFG_01		(0x00b4)
+#define VPIF_CH1_V_CFG_02		(0x00b8)
+#define VPIF_CH1_V_CFG_03		(0x00bc)
+
+#define VPIF_CH2_TOP_STRT_ADD_LUMA	(0x00c0)
+#define VPIF_CH2_BTM_STRT_ADD_LUMA	(0x00c4)
+#define VPIF_CH2_TOP_STRT_ADD_CHROMA	(0x00c8)
+#define VPIF_CH2_BTM_STRT_ADD_CHROMA	(0x00cc)
+#define VPIF_CH2_TOP_STRT_ADD_HANC	(0x00d0)
+#define VPIF_CH2_BTM_STRT_ADD_HANC	(0x00d4)
+#define VPIF_CH2_TOP_STRT_ADD_VANC	(0x00d8)
+#define VPIF_CH2_BTM_STRT_ADD_VANC	(0x00dc)
+#define VPIF_CH2_SP_CFG			(0x00e0)
+#define VPIF_CH2_IMG_ADD_OFST		(0x00e4)
+#define VPIF_CH2_HANC_ADD_OFST		(0x00e8)
+#define VPIF_CH2_H_CFG			(0x00ec)
+#define VPIF_CH2_V_CFG_00		(0x00f0)
+#define VPIF_CH2_V_CFG_01		(0x00f4)
+#define VPIF_CH2_V_CFG_02		(0x00f8)
+#define VPIF_CH2_V_CFG_03		(0x00fc)
+#define VPIF_CH2_HANC0_STRT		(0x0100)
+#define VPIF_CH2_HANC0_SIZE		(0x0104)
+#define VPIF_CH2_HANC1_STRT		(0x0108)
+#define VPIF_CH2_HANC1_SIZE		(0x010c)
+#define VPIF_CH2_VANC0_STRT		(0x0110)
+#define VPIF_CH2_VANC0_SIZE		(0x0114)
+#define VPIF_CH2_VANC1_STRT		(0x0118)
+#define VPIF_CH2_VANC1_SIZE		(0x011c)
+
+#define VPIF_CH3_TOP_STRT_ADD_LUMA	(0x0140)
+#define VPIF_CH3_BTM_STRT_ADD_LUMA	(0x0144)
+#define VPIF_CH3_TOP_STRT_ADD_CHROMA	(0x0148)
+#define VPIF_CH3_BTM_STRT_ADD_CHROMA	(0x014c)
+#define VPIF_CH3_TOP_STRT_ADD_HANC	(0x0150)
+#define VPIF_CH3_BTM_STRT_ADD_HANC	(0x0154)
+#define VPIF_CH3_TOP_STRT_ADD_VANC	(0x0158)
+#define VPIF_CH3_BTM_STRT_ADD_VANC	(0x015c)
+#define VPIF_CH3_SP_CFG			(0x0160)
+#define VPIF_CH3_IMG_ADD_OFST		(0x0164)
+#define VPIF_CH3_HANC_ADD_OFST		(0x0168)
+#define VPIF_CH3_H_CFG			(0x016c)
+#define VPIF_CH3_V_CFG_00		(0x0170)
+#define VPIF_CH3_V_CFG_01		(0x0174)
+#define VPIF_CH3_V_CFG_02		(0x0178)
+#define VPIF_CH3_V_CFG_03		(0x017c)
+#define VPIF_CH3_HANC0_STRT		(0x0180)
+#define VPIF_CH3_HANC0_SIZE		(0x0184)
+#define VPIF_CH3_HANC1_STRT		(0x0188)
+#define VPIF_CH3_HANC1_SIZE		(0x018c)
+#define VPIF_CH3_VANC0_STRT		(0x0190)
+#define VPIF_CH3_VANC0_SIZE		(0x0194)
+#define VPIF_CH3_VANC1_STRT		(0x0198)
+#define VPIF_CH3_VANC1_SIZE		(0x019c)
+
+#define VPIF_IODFT_CTRL			(0x01c0)
+
+/* Functions for bit Manipulation */
+static inline void vpif_set_bit(u32 reg, u32 bit)
+{
+	regw((regr(reg)) | (0x01 << bit), reg);
+}
+
+static inline void vpif_clr_bit(u32 reg, u32 bit)
+{
+	regw(((regr(reg)) & ~(0x01 << bit)), reg);
+}
+
+/* Macro for Generating mask */
+#ifdef GENERATE_MASK
+#undef GENERATE_MASK
+#endif
+
+#define GENERATE_MASK(bits, pos) \
+		((((0xFFFFFFFF) << (32 - bits)) >> (32 - bits)) << pos)
+
+/* Bit positions in the channel control registers */
+#define VPIF_CH_DATA_MODE_BIT	(2)
+#define VPIF_CH_YC_MUX_BIT	(3)
+#define VPIF_CH_SDR_FMT_BIT	(4)
+#define VPIF_CH_HANC_EN_BIT	(8)
+#define VPIF_CH_VANC_EN_BIT	(9)
+
+#define VPIF_CAPTURE_CH_NIP	(10)
+#define VPIF_DISPLAY_CH_NIP	(11)
+
+#define VPIF_DISPLAY_PIX_EN_BIT	(10)
+
+#define VPIF_CH_INPUT_FIELD_FRAME_BIT	(12)
+
+#define VPIF_CH_FID_POLARITY_BIT	(15)
+#define VPIF_CH_V_VALID_POLARITY_BIT	(14)
+#define VPIF_CH_H_VALID_POLARITY_BIT	(13)
+#define VPIF_CH_DATA_WIDTH_BIT		(28)
+
+#define VPIF_CH_CLK_EDGE_CTRL_BIT	(31)
+
+/* Mask various length */
+#define VPIF_CH_EAVSAV_MASK	GENERATE_MASK(13, 0)
+#define VPIF_CH_LEN_MASK	GENERATE_MASK(12, 0)
+#define VPIF_CH_WIDTH_MASK	GENERATE_MASK(13, 0)
+#define VPIF_CH_LEN_SHIFT	(16)
+
+/* VPIF masks for registers */
+#define VPIF_REQ_SIZE_MASK	(0x1ff)
+
+/* bit posotion of interrupt vpif_ch_intr register */
+#define VPIF_INTEN_FRAME_CH0	(0x00000001)
+#define VPIF_INTEN_FRAME_CH1	(0x00000002)
+#define VPIF_INTEN_FRAME_CH2	(0x00000004)
+#define VPIF_INTEN_FRAME_CH3	(0x00000008)
+
+/* bit position of clock and channel enable in vpif_chn_ctrl register */
+
+#define VPIF_CH0_CLK_EN		(0x00000002)
+#define VPIF_CH0_EN		(0x00000001)
+#define VPIF_CH1_CLK_EN		(0x00000002)
+#define VPIF_CH1_EN		(0x00000001)
+#define VPIF_CH2_CLK_EN		(0x00000002)
+#define VPIF_CH2_EN		(0x00000001)
+#define VPIF_CH3_CLK_EN		(0x00000002)
+#define VPIF_CH3_EN		(0x00000001)
+#define VPIF_CH_CLK_EN		(0x00000002)
+#define VPIF_CH_EN		(0x00000001)
+
+#define VPIF_INT_TOP	(0x00)
+#define VPIF_INT_BOTTOM	(0x01)
+#define VPIF_INT_BOTH	(0x02)
+
+#define VPIF_CH0_INT_CTRL_SHIFT	(6)
+#define VPIF_CH1_INT_CTRL_SHIFT	(6)
+#define VPIF_CH2_INT_CTRL_SHIFT	(6)
+#define VPIF_CH3_INT_CTRL_SHIFT	(6)
+#define VPIF_CH_INT_CTRL_SHIFT	(6)
+
+#define VPIF_CH2_CLIP_ANC_EN	14
+#define VPIF_CH2_CLIP_ACTIVE_EN	13
+
+#define VPIF_CH3_CLIP_ANC_EN	14
+#define VPIF_CH3_CLIP_ACTIVE_EN	13
+
+/* enabled interrupt on both the fields on vpid_ch0_ctrl register */
+#define channel0_intr_assert()	(regw((regr(VPIF_CH0_CTRL)|\
+	(VPIF_INT_BOTH << VPIF_CH0_INT_CTRL_SHIFT)), VPIF_CH0_CTRL))
+
+/* enabled interrupt on both the fields on vpid_ch1_ctrl register */
+#define channel1_intr_assert()	(regw((regr(VPIF_CH1_CTRL)|\
+	(VPIF_INT_BOTH << VPIF_CH1_INT_CTRL_SHIFT)), VPIF_CH1_CTRL))
+
+/* enabled interrupt on both the fields on vpid_ch0_ctrl register */
+#define channel2_intr_assert() 	(regw((regr(VPIF_CH2_CTRL)|\
+	(VPIF_INT_BOTH << VPIF_CH2_INT_CTRL_SHIFT)), VPIF_CH2_CTRL))
+
+/* enabled interrupt on both the fields on vpid_ch1_ctrl register */
+#define channel3_intr_assert() 	(regw((regr(VPIF_CH3_CTRL)|\
+	(VPIF_INT_BOTH << VPIF_CH3_INT_CTRL_SHIFT)), VPIF_CH3_CTRL))
+
+#define VPIF_CH_FID_MASK	(0x20)
+#define VPIF_CH_FID_SHIFT	(5)
+
+#define VPIF_NTSC_VBI_START_FIELD0	(1)
+#define VPIF_NTSC_VBI_START_FIELD1	(263)
+#define VPIF_PAL_VBI_START_FIELD0	(624)
+#define VPIF_PAL_VBI_START_FIELD1	(311)
+
+#define VPIF_NTSC_HBI_START_FIELD0	(1)
+#define VPIF_NTSC_HBI_START_FIELD1	(263)
+#define VPIF_PAL_HBI_START_FIELD0	(624)
+#define VPIF_PAL_HBI_START_FIELD1	(311)
+
+#define VPIF_NTSC_VBI_COUNT_FIELD0	(20)
+#define VPIF_NTSC_VBI_COUNT_FIELD1	(19)
+#define VPIF_PAL_VBI_COUNT_FIELD0	(24)
+#define VPIF_PAL_VBI_COUNT_FIELD1	(25)
+
+#define VPIF_NTSC_HBI_COUNT_FIELD0	(263)
+#define VPIF_NTSC_HBI_COUNT_FIELD1	(262)
+#define VPIF_PAL_HBI_COUNT_FIELD0	(312)
+#define VPIF_PAL_HBI_COUNT_FIELD1	(313)
+
+#define VPIF_NTSC_VBI_SAMPLES_PER_LINE	(720)
+#define VPIF_PAL_VBI_SAMPLES_PER_LINE	(720)
+#define VPIF_NTSC_HBI_SAMPLES_PER_LINE	(268)
+#define VPIF_PAL_HBI_SAMPLES_PER_LINE	(280)
+
+#define VPIF_CH_VANC_EN			(0x20)
+#define VPIF_DMA_REQ_SIZE		(0x080)
+#define VPIF_EMULATION_DISABLE		(0x01)
+
+extern u8 irq_vpif_capture_channel[VPIF_NUM_CHANNELS];
+
+/* inline function to enable/disable channel0 */
+static inline void enable_channel0(int enable)
+{
+	if (enable)
+		regw((regr(VPIF_CH0_CTRL) | (VPIF_CH0_EN)), VPIF_CH0_CTRL);
+	else
+		regw((regr(VPIF_CH0_CTRL) & (~VPIF_CH0_EN)), VPIF_CH0_CTRL);
+}
+
+/* inline function to enable/disable channel1 */
+static inline void enable_channel1(int enable)
+{
+	if (enable)
+		regw((regr(VPIF_CH1_CTRL) | (VPIF_CH1_EN)), VPIF_CH1_CTRL);
+	else
+		regw((regr(VPIF_CH1_CTRL) & (~VPIF_CH1_EN)), VPIF_CH1_CTRL);
+}
+
+/* inline function to enable interrupt for channel0 */
+static inline void channel0_intr_enable(int enable)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vpif_lock, flags);
+
+	if (enable) {
+		regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN);
+		regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET);
+
+		regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH0), VPIF_INTEN);
+		regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH0),
+							VPIF_INTEN_SET);
+	} else {
+		regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH0)), VPIF_INTEN);
+		regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH0),
+							VPIF_INTEN_SET);
+	}
+	spin_unlock_irqrestore(&vpif_lock, flags);
+}
+
+/* inline function to enable interrupt for channel1 */
+static inline void channel1_intr_enable(int enable)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vpif_lock, flags);
+
+	if (enable) {
+		regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN);
+		regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET);
+
+		regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH1), VPIF_INTEN);
+		regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH1),
+							VPIF_INTEN_SET);
+	} else {
+		regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH1)), VPIF_INTEN);
+		regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH1),
+							VPIF_INTEN_SET);
+	}
+	spin_unlock_irqrestore(&vpif_lock, flags);
+}
+
+/* inline function to set buffer addresses in case of Y/C non mux mode */
+static inline void ch0_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
+						 unsigned long btm_strt_luma,
+						 unsigned long top_strt_chroma,
+						 unsigned long btm_strt_chroma)
+{
+	regw(top_strt_luma, VPIF_CH0_TOP_STRT_ADD_LUMA);
+	regw(btm_strt_luma, VPIF_CH0_BTM_STRT_ADD_LUMA);
+	regw(top_strt_chroma, VPIF_CH1_TOP_STRT_ADD_CHROMA);
+	regw(btm_strt_chroma, VPIF_CH1_BTM_STRT_ADD_CHROMA);
+}
+
+/* inline function to set buffer addresses in VPIF registers for video data */
+static inline void ch0_set_videobuf_addr(unsigned long top_strt_luma,
+					 unsigned long btm_strt_luma,
+					 unsigned long top_strt_chroma,
+					 unsigned long btm_strt_chroma)
+{
+	regw(top_strt_luma, VPIF_CH0_TOP_STRT_ADD_LUMA);
+	regw(btm_strt_luma, VPIF_CH0_BTM_STRT_ADD_LUMA);
+	regw(top_strt_chroma, VPIF_CH0_TOP_STRT_ADD_CHROMA);
+	regw(btm_strt_chroma, VPIF_CH0_BTM_STRT_ADD_CHROMA);
+}
+
+static inline void ch1_set_videobuf_addr(unsigned long top_strt_luma,
+					 unsigned long btm_strt_luma,
+					 unsigned long top_strt_chroma,
+					 unsigned long btm_strt_chroma)
+{
+
+	regw(top_strt_luma, VPIF_CH1_TOP_STRT_ADD_LUMA);
+	regw(btm_strt_luma, VPIF_CH1_BTM_STRT_ADD_LUMA);
+	regw(top_strt_chroma, VPIF_CH1_TOP_STRT_ADD_CHROMA);
+	regw(btm_strt_chroma, VPIF_CH1_BTM_STRT_ADD_CHROMA);
+}
+
+static inline void ch0_set_vbi_addr(unsigned long top_vbi,
+	unsigned long btm_vbi, unsigned long a, unsigned long b)
+{
+	regw(top_vbi, VPIF_CH0_TOP_STRT_ADD_VANC);
+	regw(btm_vbi, VPIF_CH0_BTM_STRT_ADD_VANC);
+}
+
+static inline void ch0_set_hbi_addr(unsigned long top_vbi,
+	unsigned long btm_vbi, unsigned long a, unsigned long b)
+{
+	regw(top_vbi, VPIF_CH0_TOP_STRT_ADD_HANC);
+	regw(btm_vbi, VPIF_CH0_BTM_STRT_ADD_HANC);
+}
+
+static inline void ch1_set_vbi_addr(unsigned long top_vbi,
+	unsigned long btm_vbi, unsigned long a, unsigned long b)
+{
+	regw(top_vbi, VPIF_CH1_TOP_STRT_ADD_VANC);
+	regw(btm_vbi, VPIF_CH1_BTM_STRT_ADD_VANC);
+}
+
+static inline void ch1_set_hbi_addr(unsigned long top_vbi,
+	unsigned long btm_vbi, unsigned long a, unsigned long b)
+{
+	regw(top_vbi, VPIF_CH1_TOP_STRT_ADD_HANC);
+	regw(btm_vbi, VPIF_CH1_BTM_STRT_ADD_HANC);
+}
+
+/* Inline function to enable raw vbi in the given channel */
+static inline void disable_raw_feature(u8 channel_id, u8 index)
+{
+	u32 ctrl_reg;
+	if (0 == channel_id)
+		ctrl_reg = VPIF_CH0_CTRL;
+	else
+		ctrl_reg = VPIF_CH1_CTRL;
+
+	if (1 == index)
+		vpif_clr_bit(ctrl_reg, VPIF_CH_VANC_EN_BIT);
+	else
+		vpif_clr_bit(ctrl_reg, VPIF_CH_HANC_EN_BIT);
+}
+
+static inline void enable_raw_feature(u8 channel_id, u8 index)
+{
+	u32 ctrl_reg;
+	if (0 == channel_id)
+		ctrl_reg = VPIF_CH0_CTRL;
+	else
+		ctrl_reg = VPIF_CH1_CTRL;
+
+	if (1 == index)
+		vpif_set_bit(ctrl_reg, VPIF_CH_VANC_EN_BIT);
+	else
+		vpif_set_bit(ctrl_reg, VPIF_CH_HANC_EN_BIT);
+}
+
+/* inline function to enable/disable channel2 */
+static inline void enable_channel2(int enable)
+{
+	if (enable) {
+		regw((regr(VPIF_CH2_CTRL) | (VPIF_CH2_CLK_EN)), VPIF_CH2_CTRL);
+		regw((regr(VPIF_CH2_CTRL) | (VPIF_CH2_EN)), VPIF_CH2_CTRL);
+	} else {
+		regw((regr(VPIF_CH2_CTRL) & (~VPIF_CH2_CLK_EN)), VPIF_CH2_CTRL);
+		regw((regr(VPIF_CH2_CTRL) & (~VPIF_CH2_EN)), VPIF_CH2_CTRL);
+	}
+}
+
+/* inline function to enable/disable channel3 */
+static inline void enable_channel3(int enable)
+{
+	if (enable) {
+		regw((regr(VPIF_CH3_CTRL) | (VPIF_CH3_CLK_EN)), VPIF_CH3_CTRL);
+		regw((regr(VPIF_CH3_CTRL) | (VPIF_CH3_EN)), VPIF_CH3_CTRL);
+	} else {
+		regw((regr(VPIF_CH3_CTRL) & (~VPIF_CH3_CLK_EN)), VPIF_CH3_CTRL);
+		regw((regr(VPIF_CH3_CTRL) & (~VPIF_CH3_EN)), VPIF_CH3_CTRL);
+	}
+}
+
+/* inline function to enable interrupt for channel2 */
+static inline void channel2_intr_enable(int enable)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vpif_lock, flags);
+
+	if (enable) {
+		regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN);
+		regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET);
+		regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH2), VPIF_INTEN);
+		regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH2),
+							VPIF_INTEN_SET);
+	} else {
+		regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH2)), VPIF_INTEN);
+		regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH2),
+							VPIF_INTEN_SET);
+	}
+	spin_unlock_irqrestore(&vpif_lock, flags);
+}
+
+/* inline function to enable interrupt for channel3 */
+static inline void channel3_intr_enable(int enable)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vpif_lock, flags);
+
+	if (enable) {
+		regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN);
+		regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET);
+
+		regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH3), VPIF_INTEN);
+		regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH3),
+							VPIF_INTEN_SET);
+	} else {
+		regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH3)), VPIF_INTEN);
+		regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH3),
+							VPIF_INTEN_SET);
+	}
+	spin_unlock_irqrestore(&vpif_lock, flags);
+}
+
+/* inline function to enable raw vbi data for channel2 */
+static inline void channel2_raw_enable(int enable, u8 index)
+{
+	u32 mask;
+
+	if (1 == index)
+		mask = VPIF_CH_VANC_EN_BIT;
+	else
+		mask = VPIF_CH_HANC_EN_BIT;
+
+	if (enable)
+		vpif_set_bit(VPIF_CH2_CTRL, mask);
+	else
+		vpif_clr_bit(VPIF_CH2_CTRL, mask);
+}
+
+/* inline function to enable raw vbi data for channel3*/
+static inline void channel3_raw_enable(int enable, u8 index)
+{
+	u32 mask;
+
+	if (1 == index)
+		mask = VPIF_CH_VANC_EN_BIT;
+	else
+		mask = VPIF_CH_HANC_EN_BIT;
+
+	if (enable)
+		vpif_set_bit(VPIF_CH3_CTRL, mask);
+	else
+		vpif_clr_bit(VPIF_CH3_CTRL, mask);
+}
+
+/* function to enable clipping (for both active and blanking regions) on ch 2 */
+static inline void channel2_clipping_enable(int enable)
+{
+	if (enable) {
+		vpif_set_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ANC_EN);
+		vpif_set_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ACTIVE_EN);
+	} else {
+		vpif_clr_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ANC_EN);
+		vpif_clr_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ACTIVE_EN);
+	}
+}
+
+/* function to enable clipping (for both active and blanking regions) on ch 2 */
+static inline void channel3_clipping_enable(int enable)
+{
+	if (enable) {
+		vpif_set_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ANC_EN);
+		vpif_set_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ACTIVE_EN);
+	} else {
+		vpif_clr_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ANC_EN);
+		vpif_clr_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ACTIVE_EN);
+	}
+}
+
+/* inline function to set buffer addresses in case of Y/C non mux mode */
+static inline void ch2_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
+						 unsigned long btm_strt_luma,
+						 unsigned long top_strt_chroma,
+						 unsigned long btm_strt_chroma)
+{
+	regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_LUMA);
+	regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_LUMA);
+	regw(top_strt_chroma, VPIF_CH3_TOP_STRT_ADD_CHROMA);
+	regw(btm_strt_chroma, VPIF_CH3_BTM_STRT_ADD_CHROMA);
+}
+
+/* inline function to set buffer addresses in VPIF registers for video data */
+static inline void ch2_set_videobuf_addr(unsigned long top_strt_luma,
+					 unsigned long btm_strt_luma,
+					 unsigned long top_strt_chroma,
+					 unsigned long btm_strt_chroma)
+{
+	regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_LUMA);
+	regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_LUMA);
+	regw(top_strt_chroma, VPIF_CH2_TOP_STRT_ADD_CHROMA);
+	regw(btm_strt_chroma, VPIF_CH2_BTM_STRT_ADD_CHROMA);
+}
+
+static inline void ch3_set_videobuf_addr(unsigned long top_strt_luma,
+					 unsigned long btm_strt_luma,
+					 unsigned long top_strt_chroma,
+					 unsigned long btm_strt_chroma)
+{
+	regw(top_strt_luma, VPIF_CH3_TOP_STRT_ADD_LUMA);
+	regw(btm_strt_luma, VPIF_CH3_BTM_STRT_ADD_LUMA);
+	regw(top_strt_chroma, VPIF_CH3_TOP_STRT_ADD_CHROMA);
+	regw(btm_strt_chroma, VPIF_CH3_BTM_STRT_ADD_CHROMA);
+}
+
+/* inline function to set buffer addresses in VPIF registers for vbi data */
+static inline void ch2_set_vbi_addr(unsigned long top_strt_luma,
+					 unsigned long btm_strt_luma,
+					 unsigned long top_strt_chroma,
+					 unsigned long btm_strt_chroma)
+{
+	regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_VANC);
+	regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_VANC);
+}
+
+static inline void ch3_set_vbi_addr(unsigned long top_strt_luma,
+					 unsigned long btm_strt_luma,
+					 unsigned long top_strt_chroma,
+					 unsigned long btm_strt_chroma)
+{
+	regw(top_strt_luma, VPIF_CH3_TOP_STRT_ADD_VANC);
+	regw(btm_strt_luma, VPIF_CH3_BTM_STRT_ADD_VANC);
+}
+
+static inline int vpif_intr_status(int channel)
+{
+	int status = 0;
+	int mask;
+
+	if (channel < 0 || channel > 3)
+		return 0;
+
+	mask = 1 << channel;
+	status = regr(VPIF_STATUS) & mask;
+	regw(status, VPIF_STATUS_CLR);
+
+	return status;
+}
+
+#define VPIF_MAX_NAME	(30)
+
+/* This structure will store size parameters as per the mode selected by user */
+struct vpif_channel_config_params {
+	char name[VPIF_MAX_NAME];	/* Name of the mode */
+	u16 width;			/* Indicates width of the image */
+	u16 height;			/* Indicates height of the image */
+	u8 frm_fmt;			/* Interlaced (0) or progressive (1) */
+	u8 ycmux_mode;			/* This mode requires one (0) or two (1)
+					   channels */
+	u16 eav2sav;			/* length of eav 2 sav */
+	u16 sav2eav;			/* length of sav 2 eav */
+	u16 l1, l3, l5, l7, l9, l11;	/* Other parameter configurations */
+	u16 vsize;			/* Vertical size of the image */
+	u8 capture_format;		/* Indicates whether capture format
+					 * is in BT or in CCD/CMOS */
+	u8  vbi_supported;		/* Indicates whether this mode
+					 * supports capturing vbi or not */
+	u8 hd_sd;			/* HDTV (1) or SDTV (0) format */
+	v4l2_std_id stdid;		/* SDTV format */
+	u32 dv_preset;			/* HDTV format */
+};
+
+extern const unsigned int vpif_ch_params_count;
+extern const struct vpif_channel_config_params ch_params[];
+
+struct vpif_video_params;
+struct vpif_params;
+struct vpif_vbi_params;
+
+int vpif_set_video_params(struct vpif_params *vpifparams, u8 channel_id);
+void vpif_set_vbi_display_params(struct vpif_vbi_params *vbiparams,
+							u8 channel_id);
+int vpif_channel_getfid(u8 channel_id);
+
+enum data_size {
+	_8BITS = 0,
+	_10BITS,
+	_12BITS,
+};
+
+/* Structure for vpif parameters for raw vbi data */
+struct vpif_vbi_params {
+	__u32 hstart0;  /* Horizontal start of raw vbi data for first field */
+	__u32 vstart0;  /* Vertical start of raw vbi data for first field */
+	__u32 hsize0;   /* Horizontal size of raw vbi data for first field */
+	__u32 vsize0;   /* Vertical size of raw vbi data for first field */
+	__u32 hstart1;  /* Horizontal start of raw vbi data for second field */
+	__u32 vstart1;  /* Vertical start of raw vbi data for second field */
+	__u32 hsize1;   /* Horizontal size of raw vbi data for second field */
+	__u32 vsize1;   /* Vertical size of raw vbi data for second field */
+};
+
+/* structure for vpif parameters */
+struct vpif_video_params {
+	__u8 storage_mode;	/* Indicates field or frame mode */
+	unsigned long hpitch;
+	v4l2_std_id stdid;
+};
+
+struct vpif_params {
+	struct vpif_interface iface;
+	struct vpif_video_params video_params;
+	struct vpif_channel_config_params std_info;
+	union param {
+		struct vpif_vbi_params	vbi_params;
+		enum data_size data_sz;
+	} params;
+};
+
+#endif				/* End of #ifndef VPIF_H */
+
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
new file mode 100644
index 0000000..1b625b0
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -0,0 +1,2456 @@
+/*
+ * Copyright (C) 2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ * TODO : add support for VBI & HBI data service
+ *	  add static buffer allocation
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
+
+#include "vpif_capture.h"
+#include "vpif.h"
+
+MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VPIF_CAPTURE_VERSION);
+
+#define vpif_err(fmt, arg...)	v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg)
+#define vpif_dbg(level, debug, fmt, arg...)	\
+		v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg)
+
+static int debug = 1;
+static u32 ch0_numbuffers = 3;
+static u32 ch1_numbuffers = 3;
+static u32 ch0_bufsize = 1920 * 1080 * 2;
+static u32 ch1_bufsize = 720 * 576 * 2;
+
+module_param(debug, int, 0644);
+module_param(ch0_numbuffers, uint, S_IRUGO);
+module_param(ch1_numbuffers, uint, S_IRUGO);
+module_param(ch0_bufsize, uint, S_IRUGO);
+module_param(ch1_bufsize, uint, S_IRUGO);
+
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+MODULE_PARM_DESC(ch2_numbuffers, "Channel0 buffer count (default:3)");
+MODULE_PARM_DESC(ch3_numbuffers, "Channel1 buffer count (default:3)");
+MODULE_PARM_DESC(ch2_bufsize, "Channel0 buffer size (default:1920 x 1080 x 2)");
+MODULE_PARM_DESC(ch3_bufsize, "Channel1 buffer size (default:720 x 576 x 2)");
+
+static struct vpif_config_params config_params = {
+	.min_numbuffers = 3,
+	.numbuffers[0] = 3,
+	.numbuffers[1] = 3,
+	.min_bufsize[0] = 720 * 480 * 2,
+	.min_bufsize[1] = 720 * 480 * 2,
+	.channel_bufsize[0] = 1920 * 1080 * 2,
+	.channel_bufsize[1] = 720 * 576 * 2,
+};
+
+/* global variables */
+static struct vpif_device vpif_obj = { {NULL} };
+static struct device *vpif_dev;
+static void vpif_calculate_offsets(struct channel_obj *ch);
+static void vpif_config_addr(struct channel_obj *ch, int muxmode);
+
+/**
+ * buffer_prepare :  callback function for buffer prepare
+ * @vb: ptr to vb2_buffer
+ *
+ * This is the callback function for buffer prepare when vb2_qbuf()
+ * function is called. The buffer is prepared and user space virtual address
+ * or user address is converted into  physical address
+ */
+static int vpif_buffer_prepare(struct vb2_buffer *vb)
+{
+	/* Get the file handle object and channel object */
+	struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+	struct vb2_queue *q = vb->vb2_queue;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common;
+	unsigned long addr;
+
+	vpif_dbg(2, debug, "vpif_buffer_prepare\n");
+
+	common = &ch->common[VPIF_VIDEO_INDEX];
+
+	if (vb->state != VB2_BUF_STATE_ACTIVE &&
+		vb->state != VB2_BUF_STATE_PREPARED) {
+		vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage);
+		if (vb2_plane_vaddr(vb, 0) &&
+		vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+			goto exit;
+		addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+		if (q->streaming) {
+			if (!IS_ALIGNED((addr + common->ytop_off), 8) ||
+				!IS_ALIGNED((addr + common->ybtm_off), 8) ||
+				!IS_ALIGNED((addr + common->ctop_off), 8) ||
+				!IS_ALIGNED((addr + common->cbtm_off), 8))
+				goto exit;
+		}
+	}
+	return 0;
+exit:
+	vpif_dbg(1, debug, "buffer_prepare:offset is not aligned to 8 bytes\n");
+	return -EINVAL;
+}
+
+/**
+ * vpif_buffer_queue_setup : Callback function for buffer setup.
+ * @vq: vb2_queue ptr
+ * @fmt: v4l2 format
+ * @nbuffers: ptr to number of buffers requested by application
+ * @nplanes:: contains number of distinct video planes needed to hold a frame
+ * @sizes[]: contains the size (in bytes) of each plane.
+ * @alloc_ctxs: ptr to allocation context
+ *
+ * This callback function is called when reqbuf() is called to adjust
+ * the buffer count and buffer size
+ */
+static int vpif_buffer_queue_setup(struct vb2_queue *vq,
+				const struct v4l2_format *fmt,
+				unsigned int *nbuffers, unsigned int *nplanes,
+				unsigned int sizes[], void *alloc_ctxs[])
+{
+	/* Get the file handle object and channel object */
+	struct vpif_fh *fh = vb2_get_drv_priv(vq);
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common;
+	unsigned long size;
+
+	common = &ch->common[VPIF_VIDEO_INDEX];
+
+	vpif_dbg(2, debug, "vpif_buffer_setup\n");
+
+	/* If memory type is not mmap, return */
+	if (V4L2_MEMORY_MMAP == common->memory) {
+		/* Calculate the size of the buffer */
+		size = config_params.channel_bufsize[ch->channel_id];
+		/*
+		 * Checking if the buffer size exceeds the available buffer
+		 * ycmux_mode = 0 means 1 channel mode HD and
+		 * ycmux_mode = 1 means 2 channels mode SD
+		 */
+		if (ch->vpifparams.std_info.ycmux_mode == 0) {
+			if (config_params.video_limit[ch->channel_id])
+				while (size * *nbuffers >
+					(config_params.video_limit[0]
+						+ config_params.video_limit[1]))
+					(*nbuffers)--;
+		} else {
+			if (config_params.video_limit[ch->channel_id])
+				while (size * *nbuffers >
+				config_params.video_limit[ch->channel_id])
+					(*nbuffers)--;
+		}
+
+	} else {
+		size = common->fmt.fmt.pix.sizeimage;
+	}
+
+	if (*nbuffers < config_params.min_numbuffers)
+		*nbuffers = config_params.min_numbuffers;
+
+	*nplanes = 1;
+	sizes[0] = size;
+	alloc_ctxs[0] = common->alloc_ctx;
+
+	return 0;
+}
+
+/**
+ * vpif_buffer_queue : Callback function to add buffer to DMA queue
+ * @vb: ptr to vb2_buffer
+ */
+static void vpif_buffer_queue(struct vb2_buffer *vb)
+{
+	/* Get the file handle object and channel object */
+	struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+	struct channel_obj *ch = fh->channel;
+	struct vpif_cap_buffer *buf = container_of(vb,
+				struct vpif_cap_buffer, vb);
+	struct common_obj *common;
+
+	common = &ch->common[VPIF_VIDEO_INDEX];
+
+	vpif_dbg(2, debug, "vpif_buffer_queue\n");
+
+	/* add the buffer to the DMA queue */
+	list_add_tail(&buf->list, &common->dma_queue);
+}
+
+/**
+ * vpif_buf_cleanup : Callback function to free buffer
+ * @vb: ptr to vb2_buffer
+ *
+ * This function is called from the videobuf2 layer to free memory
+ * allocated to  the buffers
+ */
+static void vpif_buf_cleanup(struct vb2_buffer *vb)
+{
+	/* Get the file handle object and channel object */
+	struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+	struct vpif_cap_buffer *buf = container_of(vb,
+					struct vpif_cap_buffer, vb);
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common;
+	unsigned long flags;
+
+	common = &ch->common[VPIF_VIDEO_INDEX];
+
+	spin_lock_irqsave(&common->irqlock, flags);
+	if (vb->state == VB2_BUF_STATE_ACTIVE)
+		list_del_init(&buf->list);
+	spin_unlock_irqrestore(&common->irqlock, flags);
+
+}
+
+static void vpif_wait_prepare(struct vb2_queue *vq)
+{
+	struct vpif_fh *fh = vb2_get_drv_priv(vq);
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common;
+
+	common = &ch->common[VPIF_VIDEO_INDEX];
+	mutex_unlock(&common->lock);
+}
+
+static void vpif_wait_finish(struct vb2_queue *vq)
+{
+	struct vpif_fh *fh = vb2_get_drv_priv(vq);
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common;
+
+	common = &ch->common[VPIF_VIDEO_INDEX];
+	mutex_lock(&common->lock);
+}
+
+static int vpif_buffer_init(struct vb2_buffer *vb)
+{
+	struct vpif_cap_buffer *buf = container_of(vb,
+					struct vpif_cap_buffer, vb);
+
+	INIT_LIST_HEAD(&buf->list);
+
+	return 0;
+}
+
+static u8 channel_first_int[VPIF_NUMBER_OF_OBJECTS][2] =
+	{ {1, 1} };
+
+static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct vpif_capture_config *vpif_config_data =
+					vpif_dev->platform_data;
+	struct vpif_fh *fh = vb2_get_drv_priv(vq);
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct vpif_params *vpif = &ch->vpifparams;
+	unsigned long addr = 0;
+	int ret;
+
+		/* If buffer queue is empty, return error */
+	if (list_empty(&common->dma_queue)) {
+		vpif_dbg(1, debug, "buffer queue is empty\n");
+		return -EIO;
+	}
+
+	/* Get the next frame from the buffer queue */
+	common->cur_frm = common->next_frm = list_entry(common->dma_queue.next,
+				    struct vpif_cap_buffer, list);
+	/* Remove buffer from the buffer queue */
+	list_del(&common->cur_frm->list);
+	/* Mark state of the current frame to active */
+	common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+	/* Initialize field_id and started member */
+	ch->field_id = 0;
+	common->started = 1;
+	addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0);
+
+	/* Calculate the offset for Y and C data in the buffer */
+	vpif_calculate_offsets(ch);
+
+	if ((vpif->std_info.frm_fmt &&
+	    ((common->fmt.fmt.pix.field != V4L2_FIELD_NONE) &&
+	     (common->fmt.fmt.pix.field != V4L2_FIELD_ANY))) ||
+	    (!vpif->std_info.frm_fmt &&
+	     (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) {
+		vpif_dbg(1, debug, "conflict in field format and std format\n");
+		return -EINVAL;
+	}
+
+	/* configure 1 or 2 channel mode */
+	ret = vpif_config_data->setup_input_channel_mode
+					(vpif->std_info.ycmux_mode);
+
+	if (ret < 0) {
+		vpif_dbg(1, debug, "can't set vpif channel mode\n");
+		return ret;
+	}
+
+	/* Call vpif_set_params function to set the parameters and addresses */
+	ret = vpif_set_video_params(vpif, ch->channel_id);
+
+	if (ret < 0) {
+		vpif_dbg(1, debug, "can't set video params\n");
+		return ret;
+	}
+
+	common->started = ret;
+	vpif_config_addr(ch, ret);
+
+	common->set_addr(addr + common->ytop_off,
+			 addr + common->ybtm_off,
+			 addr + common->ctop_off,
+			 addr + common->cbtm_off);
+
+	/**
+	 * Set interrupt for both the fields in VPIF Register enable channel in
+	 * VPIF register
+	 */
+	if ((VPIF_CHANNEL0_VIDEO == ch->channel_id)) {
+		channel0_intr_assert();
+		channel0_intr_enable(1);
+		enable_channel0(1);
+	}
+	if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
+	    (common->started == 2)) {
+		channel1_intr_assert();
+		channel1_intr_enable(1);
+		enable_channel1(1);
+	}
+	channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
+
+	return 0;
+}
+
+/* abort streaming and wait for last buffer */
+static int vpif_stop_streaming(struct vb2_queue *vq)
+{
+	struct vpif_fh *fh = vb2_get_drv_priv(vq);
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common;
+
+	if (!vb2_is_streaming(vq))
+		return 0;
+
+	common = &ch->common[VPIF_VIDEO_INDEX];
+
+	/* release all active buffers */
+	while (!list_empty(&common->dma_queue)) {
+		common->next_frm = list_entry(common->dma_queue.next,
+						struct vpif_cap_buffer, list);
+		list_del(&common->next_frm->list);
+		vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
+	}
+
+	return 0;
+}
+
+static struct vb2_ops video_qops = {
+	.queue_setup		= vpif_buffer_queue_setup,
+	.wait_prepare		= vpif_wait_prepare,
+	.wait_finish		= vpif_wait_finish,
+	.buf_init		= vpif_buffer_init,
+	.buf_prepare		= vpif_buffer_prepare,
+	.start_streaming	= vpif_start_streaming,
+	.stop_streaming		= vpif_stop_streaming,
+	.buf_cleanup		= vpif_buf_cleanup,
+	.buf_queue		= vpif_buffer_queue,
+};
+
+/**
+ * vpif_process_buffer_complete: process a completed buffer
+ * @common: ptr to common channel object
+ *
+ * This function time stamp the buffer and mark it as DONE. It also
+ * wake up any process waiting on the QUEUE and set the next buffer
+ * as current
+ */
+static void vpif_process_buffer_complete(struct common_obj *common)
+{
+	do_gettimeofday(&common->cur_frm->vb.v4l2_buf.timestamp);
+	vb2_buffer_done(&common->cur_frm->vb,
+					    VB2_BUF_STATE_DONE);
+	/* Make curFrm pointing to nextFrm */
+	common->cur_frm = common->next_frm;
+}
+
+/**
+ * vpif_schedule_next_buffer: set next buffer address for capture
+ * @common : ptr to common channel object
+ *
+ * This function will get next buffer from the dma queue and
+ * set the buffer address in the vpif register for capture.
+ * the buffer is marked active
+ */
+static void vpif_schedule_next_buffer(struct common_obj *common)
+{
+	unsigned long addr = 0;
+
+	common->next_frm = list_entry(common->dma_queue.next,
+				     struct vpif_cap_buffer, list);
+	/* Remove that buffer from the buffer queue */
+	list_del(&common->next_frm->list);
+	common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+	addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
+
+	/* Set top and bottom field addresses in VPIF registers */
+	common->set_addr(addr + common->ytop_off,
+			 addr + common->ybtm_off,
+			 addr + common->ctop_off,
+			 addr + common->cbtm_off);
+}
+
+/**
+ * vpif_channel_isr : ISR handler for vpif capture
+ * @irq: irq number
+ * @dev_id: dev_id ptr
+ *
+ * It changes status of the captured buffer, takes next buffer from the queue
+ * and sets its address in VPIF registers
+ */
+static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
+{
+	struct vpif_device *dev = &vpif_obj;
+	struct common_obj *common;
+	struct channel_obj *ch;
+	enum v4l2_field field;
+	int channel_id = 0;
+	int fid = -1, i;
+
+	channel_id = *(int *)(dev_id);
+	if (!vpif_intr_status(channel_id))
+		return IRQ_NONE;
+
+	ch = dev->dev[channel_id];
+
+	field = ch->common[VPIF_VIDEO_INDEX].fmt.fmt.pix.field;
+
+	for (i = 0; i < VPIF_NUMBER_OF_OBJECTS; i++) {
+		common = &ch->common[i];
+		/* skip If streaming is not started in this channel */
+		if (0 == common->started)
+			continue;
+
+		/* Check the field format */
+		if (1 == ch->vpifparams.std_info.frm_fmt) {
+			/* Progressive mode */
+			if (list_empty(&common->dma_queue))
+				continue;
+
+			if (!channel_first_int[i][channel_id])
+				vpif_process_buffer_complete(common);
+
+			channel_first_int[i][channel_id] = 0;
+
+			vpif_schedule_next_buffer(common);
+
+
+			channel_first_int[i][channel_id] = 0;
+		} else {
+			/**
+			 * Interlaced mode. If it is first interrupt, ignore
+			 * it
+			 */
+			if (channel_first_int[i][channel_id]) {
+				channel_first_int[i][channel_id] = 0;
+				continue;
+			}
+			if (0 == i) {
+				ch->field_id ^= 1;
+				/* Get field id from VPIF registers */
+				fid = vpif_channel_getfid(ch->channel_id);
+				if (fid != ch->field_id) {
+					/**
+					 * If field id does not match stored
+					 * field id, make them in sync
+					 */
+					if (0 == fid)
+						ch->field_id = fid;
+					return IRQ_HANDLED;
+				}
+			}
+			/* device field id and local field id are in sync */
+			if (0 == fid) {
+				/* this is even field */
+				if (common->cur_frm == common->next_frm)
+					continue;
+
+				/* mark the current buffer as done */
+				vpif_process_buffer_complete(common);
+			} else if (1 == fid) {
+				/* odd field */
+				if (list_empty(&common->dma_queue) ||
+				    (common->cur_frm != common->next_frm))
+					continue;
+
+				vpif_schedule_next_buffer(common);
+			}
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+/**
+ * vpif_update_std_info() - update standard related info
+ * @ch: ptr to channel object
+ *
+ * For a given standard selected by application, update values
+ * in the device data structures
+ */
+static int vpif_update_std_info(struct channel_obj *ch)
+{
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct vpif_params *vpifparams = &ch->vpifparams;
+	const struct vpif_channel_config_params *config;
+	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+	struct video_obj *vid_ch = &ch->video;
+	int index;
+
+	vpif_dbg(2, debug, "vpif_update_std_info\n");
+
+	for (index = 0; index < vpif_ch_params_count; index++) {
+		config = &ch_params[index];
+		if (config->hd_sd == 0) {
+			vpif_dbg(2, debug, "SD format\n");
+			if (config->stdid & vid_ch->stdid) {
+				memcpy(std_info, config, sizeof(*config));
+				break;
+			}
+		} else {
+			vpif_dbg(2, debug, "HD format\n");
+			if (config->dv_preset == vid_ch->dv_preset) {
+				memcpy(std_info, config, sizeof(*config));
+				break;
+			}
+		}
+	}
+
+	/* standard not found */
+	if (index == vpif_ch_params_count)
+		return -EINVAL;
+
+	common->fmt.fmt.pix.width = std_info->width;
+	common->width = std_info->width;
+	common->fmt.fmt.pix.height = std_info->height;
+	common->height = std_info->height;
+	common->fmt.fmt.pix.bytesperline = std_info->width;
+	vpifparams->video_params.hpitch = std_info->width;
+	vpifparams->video_params.storage_mode = std_info->frm_fmt;
+
+	return 0;
+}
+
+/**
+ * vpif_calculate_offsets : This function calculates buffers offsets
+ * @ch : ptr to channel object
+ *
+ * This function calculates buffer offsets for Y and C in the top and
+ * bottom field
+ */
+static void vpif_calculate_offsets(struct channel_obj *ch)
+{
+	unsigned int hpitch, vpitch, sizeimage;
+	struct video_obj *vid_ch = &(ch->video);
+	struct vpif_params *vpifparams = &ch->vpifparams;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	enum v4l2_field field = common->fmt.fmt.pix.field;
+
+	vpif_dbg(2, debug, "vpif_calculate_offsets\n");
+
+	if (V4L2_FIELD_ANY == field) {
+		if (vpifparams->std_info.frm_fmt)
+			vid_ch->buf_field = V4L2_FIELD_NONE;
+		else
+			vid_ch->buf_field = V4L2_FIELD_INTERLACED;
+	} else
+		vid_ch->buf_field = common->fmt.fmt.pix.field;
+
+	sizeimage = common->fmt.fmt.pix.sizeimage;
+
+	hpitch = common->fmt.fmt.pix.bytesperline;
+	vpitch = sizeimage / (hpitch * 2);
+
+	if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
+	    (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) {
+		/* Calculate offsets for Y top, Y Bottom, C top and C Bottom */
+		common->ytop_off = 0;
+		common->ybtm_off = hpitch;
+		common->ctop_off = sizeimage / 2;
+		common->cbtm_off = sizeimage / 2 + hpitch;
+	} else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) {
+		/* Calculate offsets for Y top, Y Bottom, C top and C Bottom */
+		common->ytop_off = 0;
+		common->ybtm_off = sizeimage / 4;
+		common->ctop_off = sizeimage / 2;
+		common->cbtm_off = common->ctop_off + sizeimage / 4;
+	} else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) {
+		/* Calculate offsets for Y top, Y Bottom, C top and C Bottom */
+		common->ybtm_off = 0;
+		common->ytop_off = sizeimage / 4;
+		common->cbtm_off = sizeimage / 2;
+		common->ctop_off = common->cbtm_off + sizeimage / 4;
+	}
+	if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
+	    (V4L2_FIELD_INTERLACED == vid_ch->buf_field))
+		vpifparams->video_params.storage_mode = 1;
+	else
+		vpifparams->video_params.storage_mode = 0;
+
+	if (1 == vpifparams->std_info.frm_fmt)
+		vpifparams->video_params.hpitch =
+		    common->fmt.fmt.pix.bytesperline;
+	else {
+		if ((field == V4L2_FIELD_ANY)
+		    || (field == V4L2_FIELD_INTERLACED))
+			vpifparams->video_params.hpitch =
+			    common->fmt.fmt.pix.bytesperline * 2;
+		else
+			vpifparams->video_params.hpitch =
+			    common->fmt.fmt.pix.bytesperline;
+	}
+
+	ch->vpifparams.video_params.stdid = vpifparams->std_info.stdid;
+}
+
+/**
+ * vpif_config_format: configure default frame format in the device
+ * ch : ptr to channel object
+ */
+static void vpif_config_format(struct channel_obj *ch)
+{
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+	vpif_dbg(2, debug, "vpif_config_format\n");
+
+	common->fmt.fmt.pix.field = V4L2_FIELD_ANY;
+	if (config_params.numbuffers[ch->channel_id] == 0)
+		common->memory = V4L2_MEMORY_USERPTR;
+	else
+		common->memory = V4L2_MEMORY_MMAP;
+
+	common->fmt.fmt.pix.sizeimage
+	    = config_params.channel_bufsize[ch->channel_id];
+
+	if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER)
+		common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8;
+	else
+		common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P;
+	common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+}
+
+/**
+ * vpif_get_default_field() - Get default field type based on interface
+ * @vpif_params - ptr to vpif params
+ */
+static inline enum v4l2_field vpif_get_default_field(
+				struct vpif_interface *iface)
+{
+	return (iface->if_type == VPIF_IF_RAW_BAYER) ? V4L2_FIELD_NONE :
+						V4L2_FIELD_INTERLACED;
+}
+
+/**
+ * vpif_check_format()  - check given pixel format for compatibility
+ * @ch - channel  ptr
+ * @pixfmt - Given pixel format
+ * @update - update the values as per hardware requirement
+ *
+ * Check the application pixel format for S_FMT and update the input
+ * values as per hardware limits for TRY_FMT. The default pixel and
+ * field format is selected based on interface type.
+ */
+static int vpif_check_format(struct channel_obj *ch,
+			     struct v4l2_pix_format *pixfmt,
+			     int update)
+{
+	struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]);
+	struct vpif_params *vpif_params = &ch->vpifparams;
+	enum v4l2_field field = pixfmt->field;
+	u32 sizeimage, hpitch, vpitch;
+	int ret = -EINVAL;
+
+	vpif_dbg(2, debug, "vpif_check_format\n");
+	/**
+	 * first check for the pixel format. If if_type is Raw bayer,
+	 * only V4L2_PIX_FMT_SBGGR8 format is supported. Otherwise only
+	 * V4L2_PIX_FMT_YUV422P is supported
+	 */
+	if (vpif_params->iface.if_type == VPIF_IF_RAW_BAYER) {
+		if (pixfmt->pixelformat != V4L2_PIX_FMT_SBGGR8) {
+			if (!update) {
+				vpif_dbg(2, debug, "invalid pix format\n");
+				goto exit;
+			}
+			pixfmt->pixelformat = V4L2_PIX_FMT_SBGGR8;
+		}
+	} else {
+		if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P) {
+			if (!update) {
+				vpif_dbg(2, debug, "invalid pixel format\n");
+				goto exit;
+			}
+			pixfmt->pixelformat = V4L2_PIX_FMT_YUV422P;
+		}
+	}
+
+	if (!(VPIF_VALID_FIELD(field))) {
+		if (!update) {
+			vpif_dbg(2, debug, "invalid field format\n");
+			goto exit;
+		}
+		/**
+		 * By default use FIELD_NONE for RAW Bayer capture
+		 * and FIELD_INTERLACED for other interfaces
+		 */
+		field = vpif_get_default_field(&vpif_params->iface);
+	} else if (field == V4L2_FIELD_ANY)
+		/* unsupported field. Use default */
+		field = vpif_get_default_field(&vpif_params->iface);
+
+	/* validate the hpitch */
+	hpitch = pixfmt->bytesperline;
+	if (hpitch < vpif_params->std_info.width) {
+		if (!update) {
+			vpif_dbg(2, debug, "invalid hpitch\n");
+			goto exit;
+		}
+		hpitch = vpif_params->std_info.width;
+	}
+
+	sizeimage = pixfmt->sizeimage;
+
+	vpitch = sizeimage / (hpitch * 2);
+
+	/* validate the vpitch */
+	if (vpitch < vpif_params->std_info.height) {
+		if (!update) {
+			vpif_dbg(2, debug, "Invalid vpitch\n");
+			goto exit;
+		}
+		vpitch = vpif_params->std_info.height;
+	}
+
+	/* Check for 8 byte alignment */
+	if (!ALIGN(hpitch, 8)) {
+		if (!update) {
+			vpif_dbg(2, debug, "invalid pitch alignment\n");
+			goto exit;
+		}
+		/* adjust to next 8 byte boundary */
+		hpitch = (((hpitch + 7) / 8) * 8);
+	}
+	/* if update is set, modify the bytesperline and sizeimage */
+	if (update) {
+		pixfmt->bytesperline = hpitch;
+		pixfmt->sizeimage = hpitch * vpitch * 2;
+	}
+	/**
+	 * Image width and height is always based on current standard width and
+	 * height
+	 */
+	pixfmt->width = common->fmt.fmt.pix.width;
+	pixfmt->height = common->fmt.fmt.pix.height;
+	return 0;
+exit:
+	return ret;
+}
+
+/**
+ * vpif_config_addr() - function to configure buffer address in vpif
+ * @ch - channel ptr
+ * @muxmode - channel mux mode
+ */
+static void vpif_config_addr(struct channel_obj *ch, int muxmode)
+{
+	struct common_obj *common;
+
+	vpif_dbg(2, debug, "vpif_config_addr\n");
+
+	common = &(ch->common[VPIF_VIDEO_INDEX]);
+
+	if (VPIF_CHANNEL1_VIDEO == ch->channel_id)
+		common->set_addr = ch1_set_videobuf_addr;
+	else if (2 == muxmode)
+		common->set_addr = ch0_set_videobuf_addr_yc_nmux;
+	else
+		common->set_addr = ch0_set_videobuf_addr;
+}
+
+/**
+ * vpif_mmap : It is used to map kernel space buffers into user spaces
+ * @filep: file pointer
+ * @vma: ptr to vm_area_struct
+ */
+static int vpif_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+	/* Get the channel object and file handle object */
+	struct vpif_fh *fh = filep->private_data;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]);
+	int ret;
+
+	vpif_dbg(2, debug, "vpif_mmap\n");
+
+	if (mutex_lock_interruptible(&common->lock))
+		return -ERESTARTSYS;
+	ret = vb2_mmap(&common->buffer_queue, vma);
+	mutex_unlock(&common->lock);
+	return ret;
+}
+
+/**
+ * vpif_poll: It is used for select/poll system call
+ * @filep: file pointer
+ * @wait: poll table to wait
+ */
+static unsigned int vpif_poll(struct file *filep, poll_table * wait)
+{
+	struct vpif_fh *fh = filep->private_data;
+	struct channel_obj *channel = fh->channel;
+	struct common_obj *common = &(channel->common[VPIF_VIDEO_INDEX]);
+	unsigned int res = 0;
+
+	vpif_dbg(2, debug, "vpif_poll\n");
+
+	if (common->started) {
+		mutex_lock(&common->lock);
+		res = vb2_poll(&common->buffer_queue, filep, wait);
+		mutex_unlock(&common->lock);
+	}
+	return res;
+}
+
+/**
+ * vpif_open : vpif open handler
+ * @filep: file ptr
+ *
+ * It creates object of file handle structure and stores it in private_data
+ * member of filepointer
+ */
+static int vpif_open(struct file *filep)
+{
+	struct vpif_capture_config *config = vpif_dev->platform_data;
+	struct video_device *vdev = video_devdata(filep);
+	struct common_obj *common;
+	struct video_obj *vid_ch;
+	struct channel_obj *ch;
+	struct vpif_fh *fh;
+	int i;
+
+	vpif_dbg(2, debug, "vpif_open\n");
+
+	ch = video_get_drvdata(vdev);
+
+	vid_ch = &ch->video;
+	common = &ch->common[VPIF_VIDEO_INDEX];
+
+	if (NULL == ch->curr_subdev_info) {
+		/**
+		 * search through the sub device to see a registered
+		 * sub device and make it as current sub device
+		 */
+		for (i = 0; i < config->subdev_count; i++) {
+			if (vpif_obj.sd[i]) {
+				/* the sub device is registered */
+				ch->curr_subdev_info = &config->subdev_info[i];
+				/* make first input as the current input */
+				vid_ch->input_idx = 0;
+				break;
+			}
+		}
+		if (i == config->subdev_count) {
+			vpif_err("No sub device registered\n");
+			return -ENOENT;
+		}
+	}
+
+	/* Allocate memory for the file handle object */
+	fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL);
+	if (NULL == fh) {
+		vpif_err("unable to allocate memory for file handle object\n");
+		return -ENOMEM;
+	}
+
+	if (mutex_lock_interruptible(&common->lock)) {
+		kfree(fh);
+		return -ERESTARTSYS;
+	}
+	/* store pointer to fh in private_data member of filep */
+	filep->private_data = fh;
+	fh->channel = ch;
+	fh->initialized = 0;
+	/* If decoder is not initialized. initialize it */
+	if (!ch->initialized) {
+		fh->initialized = 1;
+		ch->initialized = 1;
+		memset(&(ch->vpifparams), 0, sizeof(struct vpif_params));
+	}
+	/* Increment channel usrs counter */
+	ch->usrs++;
+	/* Set io_allowed member to false */
+	fh->io_allowed[VPIF_VIDEO_INDEX] = 0;
+	/* Initialize priority of this instance to default priority */
+	fh->prio = V4L2_PRIORITY_UNSET;
+	v4l2_prio_open(&ch->prio, &fh->prio);
+	mutex_unlock(&common->lock);
+	return 0;
+}
+
+/**
+ * vpif_release : function to clean up file close
+ * @filep: file pointer
+ *
+ * This function deletes buffer queue, frees the buffers and the vpif file
+ * handle
+ */
+static int vpif_release(struct file *filep)
+{
+	struct vpif_fh *fh = filep->private_data;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common;
+
+	vpif_dbg(2, debug, "vpif_release\n");
+
+	common = &ch->common[VPIF_VIDEO_INDEX];
+
+	mutex_lock(&common->lock);
+	/* if this instance is doing IO */
+	if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
+		/* Reset io_usrs member of channel object */
+		common->io_usrs = 0;
+		/* Disable channel as per its device type and channel id */
+		if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
+			enable_channel0(0);
+			channel0_intr_enable(0);
+		}
+		if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
+		    (2 == common->started)) {
+			enable_channel1(0);
+			channel1_intr_enable(0);
+		}
+		common->started = 0;
+		/* Free buffers allocated */
+		vb2_queue_release(&common->buffer_queue);
+		vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
+	}
+
+	/* Decrement channel usrs counter */
+	ch->usrs--;
+
+	/* Close the priority */
+	v4l2_prio_close(&ch->prio, fh->prio);
+
+	if (fh->initialized)
+		ch->initialized = 0;
+
+	mutex_unlock(&common->lock);
+	filep->private_data = NULL;
+	kfree(fh);
+	return 0;
+}
+
+/**
+ * vpif_reqbufs() - request buffer handler
+ * @file: file ptr
+ * @priv: file handle
+ * @reqbuf: request buffer structure ptr
+ */
+static int vpif_reqbufs(struct file *file, void *priv,
+			struct v4l2_requestbuffers *reqbuf)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common;
+	u8 index = 0;
+	struct vb2_queue *q;
+
+	vpif_dbg(2, debug, "vpif_reqbufs\n");
+
+	/**
+	 * This file handle has not initialized the channel,
+	 * It is not allowed to do settings
+	 */
+	if ((VPIF_CHANNEL0_VIDEO == ch->channel_id)
+	    || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) {
+		if (!fh->initialized) {
+			vpif_dbg(1, debug, "Channel Busy\n");
+			return -EBUSY;
+		}
+	}
+
+	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != reqbuf->type || !vpif_dev)
+		return -EINVAL;
+
+	index = VPIF_VIDEO_INDEX;
+
+	common = &ch->common[index];
+
+	if (0 != common->io_usrs)
+		return -EBUSY;
+
+	/* Initialize videobuf2 queue as per the buffer type */
+	common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev);
+	if (!common->alloc_ctx) {
+		vpif_err("Failed to get the context\n");
+		return -EINVAL;
+	}
+	q = &common->buffer_queue;
+	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	q->io_modes = VB2_MMAP | VB2_USERPTR;
+	q->drv_priv = fh;
+	q->ops = &video_qops;
+	q->mem_ops = &vb2_dma_contig_memops;
+	q->buf_struct_size = sizeof(struct vpif_cap_buffer);
+
+	vb2_queue_init(q);
+
+	/* Set io allowed member of file handle to TRUE */
+	fh->io_allowed[index] = 1;
+	/* Increment io usrs member of channel object to 1 */
+	common->io_usrs = 1;
+	/* Store type of memory requested in channel object */
+	common->memory = reqbuf->memory;
+	INIT_LIST_HEAD(&common->dma_queue);
+
+	/* Allocate buffers */
+	return vb2_reqbufs(&common->buffer_queue, reqbuf);
+}
+
+/**
+ * vpif_querybuf() - query buffer handler
+ * @file: file ptr
+ * @priv: file handle
+ * @buf: v4l2 buffer structure ptr
+ */
+static int vpif_querybuf(struct file *file, void *priv,
+				struct v4l2_buffer *buf)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+	vpif_dbg(2, debug, "vpif_querybuf\n");
+
+	if (common->fmt.type != buf->type)
+		return -EINVAL;
+
+	if (common->memory != V4L2_MEMORY_MMAP) {
+		vpif_dbg(1, debug, "Invalid memory\n");
+		return -EINVAL;
+	}
+
+	return vb2_querybuf(&common->buffer_queue, buf);
+}
+
+/**
+ * vpif_qbuf() - query buffer handler
+ * @file: file ptr
+ * @priv: file handle
+ * @buf: v4l2 buffer structure ptr
+ */
+static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct v4l2_buffer tbuf = *buf;
+
+	vpif_dbg(2, debug, "vpif_qbuf\n");
+
+	if (common->fmt.type != tbuf.type) {
+		vpif_err("invalid buffer type\n");
+		return -EINVAL;
+	}
+
+	if (!fh->io_allowed[VPIF_VIDEO_INDEX]) {
+		vpif_err("fh io not allowed\n");
+		return -EACCES;
+	}
+
+	return vb2_qbuf(&common->buffer_queue, buf);
+}
+
+/**
+ * vpif_dqbuf() - query buffer handler
+ * @file: file ptr
+ * @priv: file handle
+ * @buf: v4l2 buffer structure ptr
+ */
+static int vpif_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+	vpif_dbg(2, debug, "vpif_dqbuf\n");
+
+	return vb2_dqbuf(&common->buffer_queue, buf,
+			 (file->f_flags & O_NONBLOCK));
+}
+
+/**
+ * vpif_streamon() - streamon handler
+ * @file: file ptr
+ * @priv: file handle
+ * @buftype: v4l2 buffer type
+ */
+static int vpif_streamon(struct file *file, void *priv,
+				enum v4l2_buf_type buftype)
+{
+
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct channel_obj *oth_ch = vpif_obj.dev[!ch->channel_id];
+	struct vpif_params *vpif;
+	int ret = 0;
+
+	vpif_dbg(2, debug, "vpif_streamon\n");
+
+	vpif = &ch->vpifparams;
+
+	if (buftype != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		vpif_dbg(1, debug, "buffer type not supported\n");
+		return -EINVAL;
+	}
+
+	/* If file handle is not allowed IO, return error */
+	if (!fh->io_allowed[VPIF_VIDEO_INDEX]) {
+		vpif_dbg(1, debug, "io not allowed\n");
+		return -EACCES;
+	}
+
+	/* If Streaming is already started, return error */
+	if (common->started) {
+		vpif_dbg(1, debug, "channel->started\n");
+		return -EBUSY;
+	}
+
+	if ((ch->channel_id == VPIF_CHANNEL0_VIDEO &&
+	    oth_ch->common[VPIF_VIDEO_INDEX].started &&
+	    vpif->std_info.ycmux_mode == 0) ||
+	   ((ch->channel_id == VPIF_CHANNEL1_VIDEO) &&
+	    (2 == oth_ch->common[VPIF_VIDEO_INDEX].started))) {
+		vpif_dbg(1, debug, "other channel is being used\n");
+		return -EBUSY;
+	}
+
+	ret = vpif_check_format(ch, &common->fmt.fmt.pix, 0);
+	if (ret)
+		return ret;
+
+	/* Enable streamon on the sub device */
+	ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video,
+				s_stream, 1);
+
+	if (ret && (ret != -ENOIOCTLCMD)) {
+		vpif_dbg(1, debug, "stream on failed in subdev\n");
+		return ret;
+	}
+
+	/* Call vb2_streamon to start streaming in videobuf2 */
+	ret = vb2_streamon(&common->buffer_queue, buftype);
+	if (ret) {
+		vpif_dbg(1, debug, "vb2_streamon\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+/**
+ * vpif_streamoff() - streamoff handler
+ * @file: file ptr
+ * @priv: file handle
+ * @buftype: v4l2 buffer type
+ */
+static int vpif_streamoff(struct file *file, void *priv,
+				enum v4l2_buf_type buftype)
+{
+
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	int ret;
+
+	vpif_dbg(2, debug, "vpif_streamoff\n");
+
+	if (buftype != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		vpif_dbg(1, debug, "buffer type not supported\n");
+		return -EINVAL;
+	}
+
+	/* If io is allowed for this file handle, return error */
+	if (!fh->io_allowed[VPIF_VIDEO_INDEX]) {
+		vpif_dbg(1, debug, "io not allowed\n");
+		return -EACCES;
+	}
+
+	/* If streaming is not started, return error */
+	if (!common->started) {
+		vpif_dbg(1, debug, "channel->started\n");
+		return -EINVAL;
+	}
+
+	/* disable channel */
+	if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
+		enable_channel0(0);
+		channel0_intr_enable(0);
+	} else {
+		enable_channel1(0);
+		channel1_intr_enable(0);
+	}
+
+	common->started = 0;
+
+	ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video,
+				s_stream, 0);
+
+	if (ret && (ret != -ENOIOCTLCMD))
+		vpif_dbg(1, debug, "stream off failed in subdev\n");
+
+	return vb2_streamoff(&common->buffer_queue, buftype);
+}
+
+/**
+ * vpif_map_sub_device_to_input() - Maps sub device to input
+ * @ch - ptr to channel
+ * @config - ptr to capture configuration
+ * @input_index - Given input index from application
+ * @sub_device_index - index into sd table
+ *
+ * lookup the sub device information for a given input index.
+ * we report all the inputs to application. inputs table also
+ * has sub device name for the each input
+ */
+static struct vpif_subdev_info *vpif_map_sub_device_to_input(
+				struct channel_obj *ch,
+				struct vpif_capture_config *vpif_cfg,
+				int input_index,
+				int *sub_device_index)
+{
+	struct vpif_capture_chan_config *chan_cfg;
+	struct vpif_subdev_info *subdev_info = NULL;
+	const char *subdev_name = NULL;
+	int i;
+
+	vpif_dbg(2, debug, "vpif_map_sub_device_to_input\n");
+
+	chan_cfg = &vpif_cfg->chan_config[ch->channel_id];
+
+	/**
+	 * search through the inputs to find the sub device supporting
+	 * the input
+	 */
+	for (i = 0; i < chan_cfg->input_count; i++) {
+		/* For each sub device, loop through input */
+		if (i == input_index) {
+			subdev_name = chan_cfg->inputs[i].subdev_name;
+			break;
+		}
+	}
+
+	/* if reached maximum. return null */
+	if (i == chan_cfg->input_count || (NULL == subdev_name))
+		return subdev_info;
+
+	/* loop through the sub device list to get the sub device info */
+	for (i = 0; i < vpif_cfg->subdev_count; i++) {
+		subdev_info = &vpif_cfg->subdev_info[i];
+		if (!strcmp(subdev_info->name, subdev_name))
+			break;
+	}
+
+	if (i == vpif_cfg->subdev_count)
+		return subdev_info;
+
+	/* check if the sub device is registered */
+	if (NULL == vpif_obj.sd[i])
+		return NULL;
+
+	*sub_device_index = i;
+	return subdev_info;
+}
+
+/**
+ * vpif_querystd() - querystd handler
+ * @file: file ptr
+ * @priv: file handle
+ * @std_id: ptr to std id
+ *
+ * This function is called to detect standard at the selected input
+ */
+static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	int ret = 0;
+
+	vpif_dbg(2, debug, "vpif_querystd\n");
+
+	/* Call querystd function of decoder device */
+	ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video,
+				querystd, std_id);
+	if (ret < 0)
+		vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
+
+	return ret;
+}
+
+/**
+ * vpif_g_std() - get STD handler
+ * @file: file ptr
+ * @priv: file handle
+ * @std_id: ptr to std id
+ */
+static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	vpif_dbg(2, debug, "vpif_g_std\n");
+
+	*std = ch->video.stdid;
+	return 0;
+}
+
+/**
+ * vpif_s_std() - set STD handler
+ * @file: file ptr
+ * @priv: file handle
+ * @std_id: ptr to std id
+ */
+static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	int ret = 0;
+
+	vpif_dbg(2, debug, "vpif_s_std\n");
+
+	if (common->started) {
+		vpif_err("streaming in progress\n");
+		return -EBUSY;
+	}
+
+	if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) ||
+	    (VPIF_CHANNEL1_VIDEO == ch->channel_id)) {
+		if (!fh->initialized) {
+			vpif_dbg(1, debug, "Channel Busy\n");
+			return -EBUSY;
+		}
+	}
+
+	ret = v4l2_prio_check(&ch->prio, fh->prio);
+	if (0 != ret)
+		return ret;
+
+	fh->initialized = 1;
+
+	/* Call encoder subdevice function to set the standard */
+	ch->video.stdid = *std_id;
+	ch->video.dv_preset = V4L2_DV_INVALID;
+	memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
+	/* Get the information about the standard */
+	if (vpif_update_std_info(ch)) {
+		vpif_err("Error getting the standard info\n");
+		return -EINVAL;
+	}
+
+	/* Configure the default format information */
+	vpif_config_format(ch);
+
+	/* set standard in the sub device */
+	ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core,
+				s_std, *std_id);
+	if (ret < 0)
+		vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
+	return ret;
+}
+
+/**
+ * vpif_enum_input() - ENUMINPUT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @input: ptr to input structure
+ */
+static int vpif_enum_input(struct file *file, void *priv,
+				struct v4l2_input *input)
+{
+
+	struct vpif_capture_config *config = vpif_dev->platform_data;
+	struct vpif_capture_chan_config *chan_cfg;
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	chan_cfg = &config->chan_config[ch->channel_id];
+
+	if (input->index >= chan_cfg->input_count) {
+		vpif_dbg(1, debug, "Invalid input index\n");
+		return -EINVAL;
+	}
+
+	memcpy(input, &chan_cfg->inputs[input->index].input,
+		sizeof(*input));
+	return 0;
+}
+
+/**
+ * vpif_g_input() - Get INPUT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @index: ptr to input index
+ */
+static int vpif_g_input(struct file *file, void *priv, unsigned int *index)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+
+	*index = vid_ch->input_idx;
+
+	return 0;
+}
+
+/**
+ * vpif_s_input() - Set INPUT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @index: input index
+ */
+static int vpif_s_input(struct file *file, void *priv, unsigned int index)
+{
+	struct vpif_capture_config *config = vpif_dev->platform_data;
+	struct vpif_capture_chan_config *chan_cfg;
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct video_obj *vid_ch = &ch->video;
+	struct vpif_subdev_info *subdev_info;
+	int ret = 0, sd_index = 0;
+	u32 input = 0, output = 0;
+
+	chan_cfg = &config->chan_config[ch->channel_id];
+
+	if (common->started) {
+		vpif_err("Streaming in progress\n");
+		return -EBUSY;
+	}
+
+	if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) ||
+	    (VPIF_CHANNEL1_VIDEO == ch->channel_id)) {
+		if (!fh->initialized) {
+			vpif_dbg(1, debug, "Channel Busy\n");
+			return -EBUSY;
+		}
+	}
+
+	ret = v4l2_prio_check(&ch->prio, fh->prio);
+	if (0 != ret)
+		return ret;
+
+	fh->initialized = 1;
+	subdev_info = vpif_map_sub_device_to_input(ch, config, index,
+						   &sd_index);
+	if (NULL == subdev_info) {
+		vpif_dbg(1, debug,
+			"couldn't lookup sub device for the input index\n");
+		return -EINVAL;
+	}
+
+	/* first setup input path from sub device to vpif */
+	if (config->setup_input_path) {
+		ret = config->setup_input_path(ch->channel_id,
+					       subdev_info->name);
+		if (ret < 0) {
+			vpif_dbg(1, debug, "couldn't setup input path for the"
+				" sub device %s, for input index %d\n",
+				subdev_info->name, index);
+			return ret;
+		}
+	}
+
+	if (subdev_info->can_route) {
+		input = subdev_info->input;
+		output = subdev_info->output;
+		ret = v4l2_subdev_call(vpif_obj.sd[sd_index], video, s_routing,
+					input, output, 0);
+		if (ret < 0) {
+			vpif_dbg(1, debug, "Failed to set input\n");
+			return ret;
+		}
+	}
+	vid_ch->input_idx = index;
+	ch->curr_subdev_info = subdev_info;
+	ch->curr_sd_index = sd_index;
+	/* copy interface parameters to vpif */
+	ch->vpifparams.iface = subdev_info->vpif_if;
+
+	/* update tvnorms from the sub device input info */
+	ch->video_dev->tvnorms = chan_cfg->inputs[index].input.std;
+	return ret;
+}
+
+/**
+ * vpif_enum_fmt_vid_cap() - ENUM_FMT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @index: input index
+ */
+static int vpif_enum_fmt_vid_cap(struct file *file, void  *priv,
+					struct v4l2_fmtdesc *fmt)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	if (fmt->index != 0) {
+		vpif_dbg(1, debug, "Invalid format index\n");
+		return -EINVAL;
+	}
+
+	/* Fill in the information about format */
+	if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) {
+		fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+		strcpy(fmt->description, "Raw Mode -Bayer Pattern GrRBGb");
+		fmt->pixelformat = V4L2_PIX_FMT_SBGGR8;
+	} else {
+		fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+		strcpy(fmt->description, "YCbCr4:2:2 YC Planar");
+		fmt->pixelformat = V4L2_PIX_FMT_YUV422P;
+	}
+	return 0;
+}
+
+/**
+ * vpif_try_fmt_vid_cap() - TRY_FMT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @fmt: ptr to v4l2 format structure
+ */
+static int vpif_try_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *fmt)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+
+	return vpif_check_format(ch, pixfmt, 1);
+}
+
+
+/**
+ * vpif_g_fmt_vid_cap() - Set INPUT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @fmt: ptr to v4l2 format structure
+ */
+static int vpif_g_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *fmt)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+	/* Check the validity of the buffer type */
+	if (common->fmt.type != fmt->type)
+		return -EINVAL;
+
+	/* Fill in the information about format */
+	*fmt = common->fmt;
+	return 0;
+}
+
+/**
+ * vpif_s_fmt_vid_cap() - Set FMT handler
+ * @file: file ptr
+ * @priv: file handle
+ * @fmt: ptr to v4l2 format structure
+ */
+static int vpif_s_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *fmt)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct v4l2_pix_format *pixfmt;
+	int ret = 0;
+
+	vpif_dbg(2, debug, "%s\n", __func__);
+
+	/* If streaming is started, return error */
+	if (common->started) {
+		vpif_dbg(1, debug, "Streaming is started\n");
+		return -EBUSY;
+	}
+
+	if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) ||
+	    (VPIF_CHANNEL1_VIDEO == ch->channel_id)) {
+		if (!fh->initialized) {
+			vpif_dbg(1, debug, "Channel Busy\n");
+			return -EBUSY;
+		}
+	}
+
+	ret = v4l2_prio_check(&ch->prio, fh->prio);
+	if (0 != ret)
+		return ret;
+
+	fh->initialized = 1;
+
+	pixfmt = &fmt->fmt.pix;
+	/* Check for valid field format */
+	ret = vpif_check_format(ch, pixfmt, 0);
+
+	if (ret)
+		return ret;
+	/* store the format in the channel object */
+	common->fmt = *fmt;
+	return 0;
+}
+
+/**
+ * vpif_querycap() - QUERYCAP handler
+ * @file: file ptr
+ * @priv: file handle
+ * @cap: ptr to v4l2_capability structure
+ */
+static int vpif_querycap(struct file *file, void  *priv,
+				struct v4l2_capability *cap)
+{
+	struct vpif_capture_config *config = vpif_dev->platform_data;
+
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+	strlcpy(cap->driver, "vpif capture", sizeof(cap->driver));
+	strlcpy(cap->bus_info, "VPIF Platform", sizeof(cap->bus_info));
+	strlcpy(cap->card, config->card_name, sizeof(cap->card));
+
+	return 0;
+}
+
+/**
+ * vpif_g_priority() - get priority handler
+ * @file: file ptr
+ * @priv: file handle
+ * @prio: ptr to v4l2_priority structure
+ */
+static int vpif_g_priority(struct file *file, void *priv,
+			   enum v4l2_priority *prio)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	*prio = v4l2_prio_max(&ch->prio);
+
+	return 0;
+}
+
+/**
+ * vpif_s_priority() - set priority handler
+ * @file: file ptr
+ * @priv: file handle
+ * @prio: ptr to v4l2_priority structure
+ */
+static int vpif_s_priority(struct file *file, void *priv, enum v4l2_priority p)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	return v4l2_prio_change(&ch->prio, &fh->prio, p);
+}
+
+/**
+ * vpif_cropcap() - cropcap handler
+ * @file: file ptr
+ * @priv: file handle
+ * @crop: ptr to v4l2_cropcap structure
+ */
+static int vpif_cropcap(struct file *file, void *priv,
+			struct v4l2_cropcap *crop)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != crop->type)
+		return -EINVAL;
+
+	crop->bounds.left = 0;
+	crop->bounds.top = 0;
+	crop->bounds.height = common->height;
+	crop->bounds.width = common->width;
+	crop->defrect = crop->bounds;
+	return 0;
+}
+
+/**
+ * vpif_enum_dv_presets() - ENUM_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_enum_dv_presets(struct file *file, void *priv,
+		struct v4l2_dv_enum_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+			video, enum_dv_presets, preset);
+}
+
+/**
+ * vpif_query_dv_presets() - QUERY_DV_PRESET handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_query_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+		       video, query_dv_preset, preset);
+}
+/**
+ * vpif_s_dv_presets() - S_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_s_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	int ret = 0;
+
+	if (common->started) {
+		vpif_dbg(1, debug, "streaming in progress\n");
+		return -EBUSY;
+	}
+
+	if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) ||
+	    (VPIF_CHANNEL1_VIDEO == ch->channel_id)) {
+		if (!fh->initialized) {
+			vpif_dbg(1, debug, "Channel Busy\n");
+			return -EBUSY;
+		}
+	}
+
+	ret = v4l2_prio_check(&ch->prio, fh->prio);
+	if (ret)
+		return ret;
+
+	fh->initialized = 1;
+
+	/* Call encoder subdevice function to set the standard */
+	if (mutex_lock_interruptible(&common->lock))
+		return -ERESTARTSYS;
+
+	ch->video.dv_preset = preset->preset;
+	ch->video.stdid = V4L2_STD_UNKNOWN;
+	memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
+	/* Get the information about the standard */
+	if (vpif_update_std_info(ch)) {
+		vpif_dbg(1, debug, "Error getting the standard info\n");
+		ret = -EINVAL;
+	} else {
+		/* Configure the default format information */
+		vpif_config_format(ch);
+
+		ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+				video, s_dv_preset, preset);
+	}
+
+	mutex_unlock(&common->lock);
+
+	return ret;
+}
+/**
+ * vpif_g_dv_presets() - G_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_g_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	preset->preset = ch->video.dv_preset;
+
+	return 0;
+}
+
+/**
+ * vpif_s_dv_timings() - S_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_s_dv_timings(struct file *file, void *priv,
+		struct v4l2_dv_timings *timings)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct vpif_params *vpifparams = &ch->vpifparams;
+	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+	struct video_obj *vid_ch = &ch->video;
+	struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+	int ret;
+
+	if (timings->type != V4L2_DV_BT_656_1120) {
+		vpif_dbg(2, debug, "Timing type not defined\n");
+		return -EINVAL;
+	}
+
+	/* Configure subdevice timings, if any */
+	ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+			video, s_dv_timings, timings);
+	if (ret == -ENOIOCTLCMD) {
+		vpif_dbg(2, debug, "Custom DV timings not supported by "
+				"subdevice\n");
+		return -EINVAL;
+	}
+	if (ret < 0) {
+		vpif_dbg(2, debug, "Error setting custom DV timings\n");
+		return ret;
+	}
+
+	if (!(timings->bt.width && timings->bt.height &&
+				(timings->bt.hbackporch ||
+				 timings->bt.hfrontporch ||
+				 timings->bt.hsync) &&
+				timings->bt.vfrontporch &&
+				(timings->bt.vbackporch ||
+				 timings->bt.vsync))) {
+		vpif_dbg(2, debug, "Timings for width, height, "
+				"horizontal back porch, horizontal sync, "
+				"horizontal front porch, vertical back porch, "
+				"vertical sync and vertical back porch "
+				"must be defined\n");
+		return -EINVAL;
+	}
+
+	*bt = timings->bt;
+
+	/* Configure video port timings */
+
+	std_info->eav2sav = bt->hbackporch + bt->hfrontporch +
+		bt->hsync - 8;
+	std_info->sav2eav = bt->width;
+
+	std_info->l1 = 1;
+	std_info->l3 = bt->vsync + bt->vbackporch + 1;
+
+	if (bt->interlaced) {
+		if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
+			std_info->vsize = bt->height * 2 +
+				bt->vfrontporch + bt->vsync + bt->vbackporch +
+				bt->il_vfrontporch + bt->il_vsync +
+				bt->il_vbackporch;
+			std_info->l5 = std_info->vsize/2 -
+				(bt->vfrontporch - 1);
+			std_info->l7 = std_info->vsize/2 + 1;
+			std_info->l9 = std_info->l7 + bt->il_vsync +
+				bt->il_vbackporch + 1;
+			std_info->l11 = std_info->vsize -
+				(bt->il_vfrontporch - 1);
+		} else {
+			vpif_dbg(2, debug, "Required timing values for "
+					"interlaced BT format missing\n");
+			return -EINVAL;
+		}
+	} else {
+		std_info->vsize = bt->height + bt->vfrontporch +
+			bt->vsync + bt->vbackporch;
+		std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
+	}
+	strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME);
+	std_info->width = bt->width;
+	std_info->height = bt->height;
+	std_info->frm_fmt = bt->interlaced ? 0 : 1;
+	std_info->ycmux_mode = 0;
+	std_info->capture_format = 0;
+	std_info->vbi_supported = 0;
+	std_info->hd_sd = 1;
+	std_info->stdid = 0;
+	std_info->dv_preset = V4L2_DV_INVALID;
+
+	vid_ch->stdid = 0;
+	vid_ch->dv_preset = V4L2_DV_INVALID;
+	return 0;
+}
+
+/**
+ * vpif_g_dv_timings() - G_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_g_dv_timings(struct file *file, void *priv,
+		struct v4l2_dv_timings *timings)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+	struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+
+	timings->bt = *bt;
+
+	return 0;
+}
+
+/*
+ * vpif_g_chip_ident() - Identify the chip
+ * @file: file ptr
+ * @priv: file handle
+ * @chip: chip identity
+ *
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_g_chip_ident(struct file *file, void *priv,
+		struct v4l2_dbg_chip_ident *chip)
+{
+	chip->ident = V4L2_IDENT_NONE;
+	chip->revision = 0;
+	if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+			chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) {
+		vpif_dbg(2, debug, "match_type is invalid.\n");
+		return -EINVAL;
+	}
+
+	return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core,
+			g_chip_ident, chip);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+/*
+ * vpif_dbg_g_register() - Read register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be read
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_dbg_g_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg){
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core,
+			g_register, reg);
+}
+
+/*
+ * vpif_dbg_s_register() - Write to register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be modified
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if write operations fails.
+ */
+static int vpif_dbg_s_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg){
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core,
+			s_register, reg);
+}
+#endif
+
+/*
+ * vpif_log_status() - Status information
+ * @file: file ptr
+ * @priv: file handle
+ *
+ * Returns zero.
+ */
+static int vpif_log_status(struct file *filep, void *priv)
+{
+	/* status for sub devices */
+	v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
+
+	return 0;
+}
+
+/* vpif capture ioctl operations */
+static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
+	.vidioc_querycap        	= vpif_querycap,
+	.vidioc_g_priority		= vpif_g_priority,
+	.vidioc_s_priority		= vpif_s_priority,
+	.vidioc_enum_fmt_vid_cap	= vpif_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap  		= vpif_g_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap		= vpif_s_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap		= vpif_try_fmt_vid_cap,
+	.vidioc_enum_input		= vpif_enum_input,
+	.vidioc_s_input			= vpif_s_input,
+	.vidioc_g_input			= vpif_g_input,
+	.vidioc_reqbufs         	= vpif_reqbufs,
+	.vidioc_querybuf        	= vpif_querybuf,
+	.vidioc_querystd		= vpif_querystd,
+	.vidioc_s_std           	= vpif_s_std,
+	.vidioc_g_std			= vpif_g_std,
+	.vidioc_qbuf            	= vpif_qbuf,
+	.vidioc_dqbuf           	= vpif_dqbuf,
+	.vidioc_streamon        	= vpif_streamon,
+	.vidioc_streamoff       	= vpif_streamoff,
+	.vidioc_cropcap         	= vpif_cropcap,
+	.vidioc_enum_dv_presets         = vpif_enum_dv_presets,
+	.vidioc_s_dv_preset             = vpif_s_dv_preset,
+	.vidioc_g_dv_preset             = vpif_g_dv_preset,
+	.vidioc_query_dv_preset         = vpif_query_dv_preset,
+	.vidioc_s_dv_timings            = vpif_s_dv_timings,
+	.vidioc_g_dv_timings            = vpif_g_dv_timings,
+	.vidioc_g_chip_ident		= vpif_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.vidioc_g_register		= vpif_dbg_g_register,
+	.vidioc_s_register		= vpif_dbg_s_register,
+#endif
+	.vidioc_log_status		= vpif_log_status,
+};
+
+/* vpif file operations */
+static struct v4l2_file_operations vpif_fops = {
+	.owner = THIS_MODULE,
+	.open = vpif_open,
+	.release = vpif_release,
+	.unlocked_ioctl = video_ioctl2,
+	.mmap = vpif_mmap,
+	.poll = vpif_poll
+};
+
+/* vpif video template */
+static struct video_device vpif_video_template = {
+	.name		= "vpif",
+	.fops		= &vpif_fops,
+	.minor		= -1,
+	.ioctl_ops	= &vpif_ioctl_ops,
+};
+
+/**
+ * initialize_vpif() - Initialize vpif data structures
+ *
+ * Allocate memory for data structures and initialize them
+ */
+static int initialize_vpif(void)
+{
+	int err = 0, i, j;
+	int free_channel_objects_index;
+
+	/* Default number of buffers should be 3 */
+	if ((ch0_numbuffers > 0) &&
+	    (ch0_numbuffers < config_params.min_numbuffers))
+		ch0_numbuffers = config_params.min_numbuffers;
+	if ((ch1_numbuffers > 0) &&
+	    (ch1_numbuffers < config_params.min_numbuffers))
+		ch1_numbuffers = config_params.min_numbuffers;
+
+	/* Set buffer size to min buffers size if it is invalid */
+	if (ch0_bufsize < config_params.min_bufsize[VPIF_CHANNEL0_VIDEO])
+		ch0_bufsize =
+		    config_params.min_bufsize[VPIF_CHANNEL0_VIDEO];
+	if (ch1_bufsize < config_params.min_bufsize[VPIF_CHANNEL1_VIDEO])
+		ch1_bufsize =
+		    config_params.min_bufsize[VPIF_CHANNEL1_VIDEO];
+
+	config_params.numbuffers[VPIF_CHANNEL0_VIDEO] = ch0_numbuffers;
+	config_params.numbuffers[VPIF_CHANNEL1_VIDEO] = ch1_numbuffers;
+	if (ch0_numbuffers) {
+		config_params.channel_bufsize[VPIF_CHANNEL0_VIDEO]
+		    = ch0_bufsize;
+	}
+	if (ch1_numbuffers) {
+		config_params.channel_bufsize[VPIF_CHANNEL1_VIDEO]
+		    = ch1_bufsize;
+	}
+
+	/* Allocate memory for six channel objects */
+	for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+		vpif_obj.dev[i] =
+		    kzalloc(sizeof(*vpif_obj.dev[i]), GFP_KERNEL);
+		/* If memory allocation fails, return error */
+		if (!vpif_obj.dev[i]) {
+			free_channel_objects_index = i;
+			err = -ENOMEM;
+			goto vpif_init_free_channel_objects;
+		}
+	}
+	return 0;
+
+vpif_init_free_channel_objects:
+	for (j = 0; j < free_channel_objects_index; j++)
+		kfree(vpif_obj.dev[j]);
+	return err;
+}
+
+/**
+ * vpif_probe : This function probes the vpif capture driver
+ * @pdev: platform device pointer
+ *
+ * This creates device entries by register itself to the V4L2 driver and
+ * initializes fields of each channel objects
+ */
+static __init int vpif_probe(struct platform_device *pdev)
+{
+	struct vpif_subdev_info *subdevdata;
+	struct vpif_capture_config *config;
+	int i, j, k, m, q, err;
+	struct i2c_adapter *i2c_adap;
+	struct channel_obj *ch;
+	struct common_obj *common;
+	struct video_device *vfd;
+	struct resource *res;
+	int subdev_count;
+	size_t size;
+
+	vpif_dev = &pdev->dev;
+
+	err = initialize_vpif();
+	if (err) {
+		v4l2_err(vpif_dev->driver, "Error initializing vpif\n");
+		return err;
+	}
+
+	err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
+	if (err) {
+		v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
+		return err;
+	}
+
+	k = 0;
+	while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, k))) {
+		for (i = res->start; i <= res->end; i++) {
+			if (request_irq(i, vpif_channel_isr, IRQF_SHARED,
+					"VPIF_Capture",
+				(void *)(&vpif_obj.dev[k]->channel_id))) {
+				err = -EBUSY;
+				i--;
+				goto vpif_int_err;
+			}
+		}
+		k++;
+	}
+
+	for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+		/* Get the pointer to the channel object */
+		ch = vpif_obj.dev[i];
+		/* Allocate memory for video device */
+		vfd = video_device_alloc();
+		if (NULL == vfd) {
+			for (j = 0; j < i; j++) {
+				ch = vpif_obj.dev[j];
+				video_device_release(ch->video_dev);
+			}
+			err = -ENOMEM;
+			goto vpif_dev_alloc_err;
+		}
+
+		/* Initialize field of video device */
+		*vfd = vpif_video_template;
+		vfd->v4l2_dev = &vpif_obj.v4l2_dev;
+		vfd->release = video_device_release;
+		snprintf(vfd->name, sizeof(vfd->name),
+			 "VPIF_Capture_DRIVER_V%s",
+			 VPIF_CAPTURE_VERSION);
+		/* Set video_dev to the video device */
+		ch->video_dev = vfd;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res) {
+		size = resource_size(res);
+		/* The resources are divided into two equal memory and when we
+		 * have HD output we can add them together
+		 */
+		for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) {
+			ch = vpif_obj.dev[j];
+			ch->channel_id = j;
+			/* only enabled if second resource exists */
+			config_params.video_limit[ch->channel_id] = 0;
+			if (size)
+				config_params.video_limit[ch->channel_id] =
+									size/2;
+		}
+	}
+
+	for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) {
+		ch = vpif_obj.dev[j];
+		ch->channel_id = j;
+		common = &(ch->common[VPIF_VIDEO_INDEX]);
+		spin_lock_init(&common->irqlock);
+		mutex_init(&common->lock);
+		ch->video_dev->lock = &common->lock;
+		/* Initialize prio member of channel object */
+		v4l2_prio_init(&ch->prio);
+		err = video_register_device(ch->video_dev,
+					    VFL_TYPE_GRABBER, (j ? 1 : 0));
+		if (err)
+			goto probe_out;
+
+		video_set_drvdata(ch->video_dev, ch);
+
+	}
+
+	i2c_adap = i2c_get_adapter(1);
+	config = pdev->dev.platform_data;
+
+	subdev_count = config->subdev_count;
+	vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
+				GFP_KERNEL);
+	if (vpif_obj.sd == NULL) {
+		vpif_err("unable to allocate memory for subdevice pointers\n");
+		err = -ENOMEM;
+		goto probe_out;
+	}
+
+	for (i = 0; i < subdev_count; i++) {
+		subdevdata = &config->subdev_info[i];
+		vpif_obj.sd[i] =
+			v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
+						  i2c_adap,
+						  &subdevdata->board_info,
+						  NULL);
+
+		if (!vpif_obj.sd[i]) {
+			vpif_err("Error registering v4l2 subdevice\n");
+			goto probe_subdev_out;
+		}
+		v4l2_info(&vpif_obj.v4l2_dev, "registered sub device %s\n",
+			  subdevdata->name);
+
+		if (vpif_obj.sd[i])
+			vpif_obj.sd[i]->grp_id = 1 << i;
+	}
+
+	v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n");
+	return 0;
+
+probe_subdev_out:
+	/* free sub devices memory */
+	kfree(vpif_obj.sd);
+
+	j = VPIF_CAPTURE_MAX_DEVICES;
+probe_out:
+	for (k = 0; k < j; k++) {
+		/* Get the pointer to the channel object */
+		ch = vpif_obj.dev[k];
+		/* Unregister video device */
+		video_unregister_device(ch->video_dev);
+	}
+
+vpif_dev_alloc_err:
+	k = VPIF_CAPTURE_MAX_DEVICES-1;
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, k);
+	i = res->end;
+
+vpif_int_err:
+	for (q = k; q >= 0; q--) {
+		for (m = i; m >= (int)res->start; m--)
+			free_irq(m, (void *)(&vpif_obj.dev[q]->channel_id));
+
+		res = platform_get_resource(pdev, IORESOURCE_IRQ, q-1);
+		if (res)
+			i = res->end;
+	}
+	v4l2_device_unregister(&vpif_obj.v4l2_dev);
+	return err;
+}
+
+/**
+ * vpif_remove() - driver remove handler
+ * @device: ptr to platform device structure
+ *
+ * The vidoe device is unregistered
+ */
+static int vpif_remove(struct platform_device *device)
+{
+	int i;
+	struct channel_obj *ch;
+
+	v4l2_device_unregister(&vpif_obj.v4l2_dev);
+
+	/* un-register device */
+	for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+		/* Get the pointer to the channel object */
+		ch = vpif_obj.dev[i];
+		/* Unregister video device */
+		video_unregister_device(ch->video_dev);
+	}
+	return 0;
+}
+
+#ifdef CONFIG_PM
+/**
+ * vpif_suspend: vpif device suspend
+ */
+static int vpif_suspend(struct device *dev)
+{
+
+	struct common_obj *common;
+	struct channel_obj *ch;
+	int i;
+
+	for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+		/* Get the pointer to the channel object */
+		ch = vpif_obj.dev[i];
+		common = &ch->common[VPIF_VIDEO_INDEX];
+		mutex_lock(&common->lock);
+		if (ch->usrs && common->io_usrs) {
+			/* Disable channel */
+			if (ch->channel_id == VPIF_CHANNEL0_VIDEO) {
+				enable_channel0(0);
+				channel0_intr_enable(0);
+			}
+			if (ch->channel_id == VPIF_CHANNEL1_VIDEO ||
+			    common->started == 2) {
+				enable_channel1(0);
+				channel1_intr_enable(0);
+			}
+		}
+		mutex_unlock(&common->lock);
+	}
+
+	return 0;
+}
+
+/*
+ * vpif_resume: vpif device suspend
+ */
+static int vpif_resume(struct device *dev)
+{
+	struct common_obj *common;
+	struct channel_obj *ch;
+	int i;
+
+	for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+		/* Get the pointer to the channel object */
+		ch = vpif_obj.dev[i];
+		common = &ch->common[VPIF_VIDEO_INDEX];
+		mutex_lock(&common->lock);
+		if (ch->usrs && common->io_usrs) {
+			/* Disable channel */
+			if (ch->channel_id == VPIF_CHANNEL0_VIDEO) {
+				enable_channel0(1);
+				channel0_intr_enable(1);
+			}
+			if (ch->channel_id == VPIF_CHANNEL1_VIDEO ||
+			    common->started == 2) {
+				enable_channel1(1);
+				channel1_intr_enable(1);
+			}
+		}
+		mutex_unlock(&common->lock);
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops vpif_dev_pm_ops = {
+	.suspend = vpif_suspend,
+	.resume = vpif_resume,
+};
+
+#define vpif_pm_ops (&vpif_dev_pm_ops)
+#else
+#define vpif_pm_ops NULL
+#endif
+
+static __refdata struct platform_driver vpif_driver = {
+	.driver	= {
+		.name	= "vpif_capture",
+		.owner	= THIS_MODULE,
+		.pm	= vpif_pm_ops,
+	},
+	.probe = vpif_probe,
+	.remove = vpif_remove,
+};
+
+/**
+ * vpif_init: initialize the vpif driver
+ *
+ * This function registers device and driver to the kernel, requests irq
+ * handler and allocates memory
+ * for channel objects
+ */
+static __init int vpif_init(void)
+{
+	return platform_driver_register(&vpif_driver);
+}
+
+/**
+ * vpif_cleanup : This function clean up the vpif capture resources
+ *
+ * This will un-registers device and driver to the kernel, frees
+ * requested irq handler and de-allocates memory allocated for channel
+ * objects.
+ */
+static void vpif_cleanup(void)
+{
+	struct platform_device *pdev;
+	struct resource *res;
+	int irq_num;
+	int i = 0;
+
+	pdev = container_of(vpif_dev, struct platform_device, dev);
+	while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, i))) {
+		for (irq_num = res->start; irq_num <= res->end; irq_num++)
+			free_irq(irq_num,
+				 (void *)(&vpif_obj.dev[i]->channel_id));
+		i++;
+	}
+
+	platform_driver_unregister(&vpif_driver);
+
+	kfree(vpif_obj.sd);
+	for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++)
+		kfree(vpif_obj.dev[i]);
+}
+
+/* Function for module initialization and cleanup */
+module_init(vpif_init);
+module_exit(vpif_cleanup);
diff --git a/drivers/media/platform/davinci/vpif_capture.h b/drivers/media/platform/davinci/vpif_capture.h
new file mode 100644
index 0000000..3511510
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif_capture.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2009 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef VPIF_CAPTURE_H
+#define VPIF_CAPTURE_H
+
+#ifdef __KERNEL__
+
+/* Header files */
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/davinci/vpif_types.h>
+
+#include "vpif.h"
+
+/* Macros */
+#define VPIF_CAPTURE_VERSION		"0.0.2"
+
+#define VPIF_VALID_FIELD(field)		(((V4L2_FIELD_ANY == field) || \
+	(V4L2_FIELD_NONE == field)) || \
+	(((V4L2_FIELD_INTERLACED == field) || \
+	(V4L2_FIELD_SEQ_TB == field)) || \
+	(V4L2_FIELD_SEQ_BT == field)))
+
+#define VPIF_CAPTURE_MAX_DEVICES	2
+#define VPIF_VIDEO_INDEX		0
+#define VPIF_NUMBER_OF_OBJECTS		1
+
+/* Enumerated data type to give id to each device per channel */
+enum vpif_channel_id {
+	VPIF_CHANNEL0_VIDEO = 0,
+	VPIF_CHANNEL1_VIDEO,
+};
+
+struct video_obj {
+	enum v4l2_field buf_field;
+	/* Currently selected or default standard */
+	v4l2_std_id stdid;
+	u32 dv_preset;
+	struct v4l2_bt_timings bt_timings;
+	/* This is to track the last input that is passed to application */
+	u32 input_idx;
+};
+
+struct vpif_cap_buffer {
+	struct vb2_buffer vb;
+	struct list_head list;
+};
+
+struct common_obj {
+	/* Pointer pointing to current v4l2_buffer */
+	struct vpif_cap_buffer *cur_frm;
+	/* Pointer pointing to current v4l2_buffer */
+	struct vpif_cap_buffer *next_frm;
+	/*
+	 * This field keeps track of type of buffer exchange mechanism
+	 * user has selected
+	 */
+	enum v4l2_memory memory;
+	/* Used to store pixel format */
+	struct v4l2_format fmt;
+	/* Buffer queue used in video-buf */
+	struct vb2_queue buffer_queue;
+	/* allocator-specific contexts for each plane */
+	struct vb2_alloc_ctx *alloc_ctx;
+	/* Queue of filled frames */
+	struct list_head dma_queue;
+	/* Used in video-buf */
+	spinlock_t irqlock;
+	/* lock used to access this structure */
+	struct mutex lock;
+	/* number of users performing IO */
+	u32 io_usrs;
+	/* Indicates whether streaming started */
+	u8 started;
+	/* Function pointer to set the addresses */
+	void (*set_addr) (unsigned long, unsigned long, unsigned long,
+			  unsigned long);
+	/* offset where Y top starts from the starting of the buffer */
+	u32 ytop_off;
+	/* offset where Y bottom starts from the starting of the buffer */
+	u32 ybtm_off;
+	/* offset where C top starts from the starting of the buffer */
+	u32 ctop_off;
+	/* offset where C bottom starts from the starting of the buffer */
+	u32 cbtm_off;
+	/* Indicates width of the image data */
+	u32 width;
+	/* Indicates height of the image data */
+	u32 height;
+};
+
+struct channel_obj {
+	/* Identifies video device for this channel */
+	struct video_device *video_dev;
+	/* Used to keep track of state of the priority */
+	struct v4l2_prio_state prio;
+	/* number of open instances of the channel */
+	int usrs;
+	/* Indicates id of the field which is being displayed */
+	u32 field_id;
+	/* flag to indicate whether decoder is initialized */
+	u8 initialized;
+	/* Identifies channel */
+	enum vpif_channel_id channel_id;
+	/* index into sd table */
+	int curr_sd_index;
+	/* ptr to current sub device information */
+	struct vpif_subdev_info *curr_subdev_info;
+	/* vpif configuration params */
+	struct vpif_params vpifparams;
+	/* common object array */
+	struct common_obj common[VPIF_NUMBER_OF_OBJECTS];
+	/* video object */
+	struct video_obj video;
+};
+
+/* File handle structure */
+struct vpif_fh {
+	/* pointer to channel object for opened device */
+	struct channel_obj *channel;
+	/* Indicates whether this file handle is doing IO */
+	u8 io_allowed[VPIF_NUMBER_OF_OBJECTS];
+	/* Used to keep track priority of this instance */
+	enum v4l2_priority prio;
+	/* Used to indicate channel is initialize or not */
+	u8 initialized;
+};
+
+struct vpif_device {
+	struct v4l2_device v4l2_dev;
+	struct channel_obj *dev[VPIF_CAPTURE_NUM_CHANNELS];
+	struct v4l2_subdev **sd;
+};
+
+struct vpif_config_params {
+	u8 min_numbuffers;
+	u8 numbuffers[VPIF_CAPTURE_NUM_CHANNELS];
+	s8 device_type;
+	u32 min_bufsize[VPIF_CAPTURE_NUM_CHANNELS];
+	u32 channel_bufsize[VPIF_CAPTURE_NUM_CHANNELS];
+	u8 default_device[VPIF_CAPTURE_NUM_CHANNELS];
+	u32 video_limit[VPIF_CAPTURE_NUM_CHANNELS];
+	u8 max_device_type;
+};
+/* Struct which keeps track of the line numbers for the sliced vbi service */
+struct vpif_service_line {
+	u16 service_id;
+	u16 service_line[2];
+};
+#endif				/* End of __KERNEL__ */
+#endif				/* VPIF_CAPTURE_H */
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
new file mode 100644
index 0000000..4a24848
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -0,0 +1,2010 @@
+/*
+ * vpif-display - VPIF display driver
+ * Display driver for TI DaVinci VPIF
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <asm/irq.h>
+#include <asm/page.h>
+
+#include <media/adv7343.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
+
+#include "vpif_display.h"
+#include "vpif.h"
+
+MODULE_DESCRIPTION("TI DaVinci VPIF Display driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VPIF_DISPLAY_VERSION);
+
+#define VPIF_V4L2_STD (V4L2_STD_525_60 | V4L2_STD_625_50)
+
+#define vpif_err(fmt, arg...)	v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg)
+#define vpif_dbg(level, debug, fmt, arg...)	\
+		v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg)
+
+static int debug = 1;
+static u32 ch2_numbuffers = 3;
+static u32 ch3_numbuffers = 3;
+static u32 ch2_bufsize = 1920 * 1080 * 2;
+static u32 ch3_bufsize = 720 * 576 * 2;
+
+module_param(debug, int, 0644);
+module_param(ch2_numbuffers, uint, S_IRUGO);
+module_param(ch3_numbuffers, uint, S_IRUGO);
+module_param(ch2_bufsize, uint, S_IRUGO);
+module_param(ch3_bufsize, uint, S_IRUGO);
+
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+MODULE_PARM_DESC(ch2_numbuffers, "Channel2 buffer count (default:3)");
+MODULE_PARM_DESC(ch3_numbuffers, "Channel3 buffer count (default:3)");
+MODULE_PARM_DESC(ch2_bufsize, "Channel2 buffer size (default:1920 x 1080 x 2)");
+MODULE_PARM_DESC(ch3_bufsize, "Channel3 buffer size (default:720 x 576 x 2)");
+
+static struct vpif_config_params config_params = {
+	.min_numbuffers		= 3,
+	.numbuffers[0]		= 3,
+	.numbuffers[1]		= 3,
+	.min_bufsize[0]		= 720 * 480 * 2,
+	.min_bufsize[1]		= 720 * 480 * 2,
+	.channel_bufsize[0]	= 1920 * 1080 * 2,
+	.channel_bufsize[1]	= 720 * 576 * 2,
+};
+
+static struct vpif_device vpif_obj = { {NULL} };
+static struct device *vpif_dev;
+static void vpif_calculate_offsets(struct channel_obj *ch);
+static void vpif_config_addr(struct channel_obj *ch, int muxmode);
+
+/*
+ * buffer_prepare: This is the callback function called from vb2_qbuf()
+ * function the buffer is prepared and user space virtual address is converted
+ * into physical address
+ */
+static int vpif_buffer_prepare(struct vb2_buffer *vb)
+{
+	struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+	struct vb2_queue *q = vb->vb2_queue;
+	struct common_obj *common;
+	unsigned long addr;
+
+	common = &fh->channel->common[VPIF_VIDEO_INDEX];
+	if (vb->state != VB2_BUF_STATE_ACTIVE &&
+		vb->state != VB2_BUF_STATE_PREPARED) {
+		vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage);
+		if (vb2_plane_vaddr(vb, 0) &&
+		vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+			goto buf_align_exit;
+
+		addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+		if (q->streaming &&
+			(V4L2_BUF_TYPE_SLICED_VBI_OUTPUT != q->type)) {
+			if (!ISALIGNED(addr + common->ytop_off) ||
+			!ISALIGNED(addr + common->ybtm_off) ||
+			!ISALIGNED(addr + common->ctop_off) ||
+			!ISALIGNED(addr + common->cbtm_off))
+				goto buf_align_exit;
+		}
+	}
+	return 0;
+
+buf_align_exit:
+	vpif_err("buffer offset not aligned to 8 bytes\n");
+	return -EINVAL;
+}
+
+/*
+ * vpif_buffer_queue_setup: This function allocates memory for the buffers
+ */
+static int vpif_buffer_queue_setup(struct vb2_queue *vq,
+				const struct v4l2_format *fmt,
+				unsigned int *nbuffers, unsigned int *nplanes,
+				unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct vpif_fh *fh = vb2_get_drv_priv(vq);
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	unsigned long size;
+
+	if (V4L2_MEMORY_MMAP == common->memory) {
+		size = config_params.channel_bufsize[ch->channel_id];
+		/*
+		* Checking if the buffer size exceeds the available buffer
+		* ycmux_mode = 0 means 1 channel mode HD and
+		* ycmux_mode = 1 means 2 channels mode SD
+		*/
+		if (ch->vpifparams.std_info.ycmux_mode == 0) {
+			if (config_params.video_limit[ch->channel_id])
+				while (size * *nbuffers >
+					(config_params.video_limit[0]
+						+ config_params.video_limit[1]))
+					(*nbuffers)--;
+		} else {
+			if (config_params.video_limit[ch->channel_id])
+				while (size * *nbuffers >
+				config_params.video_limit[ch->channel_id])
+					(*nbuffers)--;
+		}
+	} else {
+		size = common->fmt.fmt.pix.sizeimage;
+	}
+
+	if (*nbuffers < config_params.min_numbuffers)
+			*nbuffers = config_params.min_numbuffers;
+
+	*nplanes = 1;
+	sizes[0] = size;
+	alloc_ctxs[0] = common->alloc_ctx;
+	return 0;
+}
+
+/*
+ * vpif_buffer_queue: This function adds the buffer to DMA queue
+ */
+static void vpif_buffer_queue(struct vb2_buffer *vb)
+{
+	struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+	struct vpif_disp_buffer *buf = container_of(vb,
+				struct vpif_disp_buffer, vb);
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common;
+
+	common = &ch->common[VPIF_VIDEO_INDEX];
+
+	/* add the buffer to the DMA queue */
+	list_add_tail(&buf->list, &common->dma_queue);
+}
+
+/*
+ * vpif_buf_cleanup: This function is called from the videobuf2 layer to
+ * free memory allocated to the buffers
+ */
+static void vpif_buf_cleanup(struct vb2_buffer *vb)
+{
+	struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+	struct vpif_disp_buffer *buf = container_of(vb,
+					struct vpif_disp_buffer, vb);
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common;
+	unsigned long flags;
+
+	common = &ch->common[VPIF_VIDEO_INDEX];
+
+	spin_lock_irqsave(&common->irqlock, flags);
+	if (vb->state == VB2_BUF_STATE_ACTIVE)
+		list_del_init(&buf->list);
+	spin_unlock_irqrestore(&common->irqlock, flags);
+}
+
+static void vpif_wait_prepare(struct vb2_queue *vq)
+{
+	struct vpif_fh *fh = vb2_get_drv_priv(vq);
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common;
+
+	common = &ch->common[VPIF_VIDEO_INDEX];
+	mutex_unlock(&common->lock);
+}
+
+static void vpif_wait_finish(struct vb2_queue *vq)
+{
+	struct vpif_fh *fh = vb2_get_drv_priv(vq);
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common;
+
+	common = &ch->common[VPIF_VIDEO_INDEX];
+	mutex_lock(&common->lock);
+}
+
+static int vpif_buffer_init(struct vb2_buffer *vb)
+{
+	struct vpif_disp_buffer *buf = container_of(vb,
+					struct vpif_disp_buffer, vb);
+
+	INIT_LIST_HEAD(&buf->list);
+
+	return 0;
+}
+
+static u8 channel_first_int[VPIF_NUMOBJECTS][2] = { {1, 1} };
+
+static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct vpif_display_config *vpif_config_data =
+					vpif_dev->platform_data;
+	struct vpif_fh *fh = vb2_get_drv_priv(vq);
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct vpif_params *vpif = &ch->vpifparams;
+	unsigned long addr = 0;
+	int ret;
+
+	/* If buffer queue is empty, return error */
+	if (list_empty(&common->dma_queue)) {
+		vpif_err("buffer queue is empty\n");
+		return -EIO;
+	}
+
+	/* Get the next frame from the buffer queue */
+	common->next_frm = common->cur_frm =
+			    list_entry(common->dma_queue.next,
+				       struct vpif_disp_buffer, list);
+
+	list_del(&common->cur_frm->list);
+	/* Mark state of the current frame to active */
+	common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+
+	/* Initialize field_id and started member */
+	ch->field_id = 0;
+	common->started = 1;
+	addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0);
+	/* Calculate the offset for Y and C data  in the buffer */
+	vpif_calculate_offsets(ch);
+
+	if ((ch->vpifparams.std_info.frm_fmt &&
+		((common->fmt.fmt.pix.field != V4L2_FIELD_NONE)
+		&& (common->fmt.fmt.pix.field != V4L2_FIELD_ANY)))
+		|| (!ch->vpifparams.std_info.frm_fmt
+		&& (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) {
+		vpif_err("conflict in field format and std format\n");
+		return -EINVAL;
+	}
+
+	/* clock settings */
+	ret =
+	    vpif_config_data->set_clock(ch->vpifparams.std_info.ycmux_mode,
+					ch->vpifparams.std_info.hd_sd);
+	if (ret < 0) {
+		vpif_err("can't set clock\n");
+		return ret;
+	}
+
+	/* set the parameters and addresses */
+	ret = vpif_set_video_params(vpif, ch->channel_id + 2);
+	if (ret < 0)
+		return ret;
+
+	common->started = ret;
+	vpif_config_addr(ch, ret);
+	common->set_addr((addr + common->ytop_off),
+			    (addr + common->ybtm_off),
+			    (addr + common->ctop_off),
+			    (addr + common->cbtm_off));
+
+	/* Set interrupt for both the fields in VPIF
+	    Register enable channel in VPIF register */
+	if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
+		channel2_intr_assert();
+		channel2_intr_enable(1);
+		enable_channel2(1);
+		if (vpif_config_data->ch2_clip_en)
+			channel2_clipping_enable(1);
+	}
+
+	if ((VPIF_CHANNEL3_VIDEO == ch->channel_id)
+		|| (common->started == 2)) {
+		channel3_intr_assert();
+		channel3_intr_enable(1);
+		enable_channel3(1);
+		if (vpif_config_data->ch3_clip_en)
+			channel3_clipping_enable(1);
+	}
+	channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
+
+	return 0;
+}
+
+/* abort streaming and wait for last buffer */
+static int vpif_stop_streaming(struct vb2_queue *vq)
+{
+	struct vpif_fh *fh = vb2_get_drv_priv(vq);
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common;
+
+	if (!vb2_is_streaming(vq))
+		return 0;
+
+	common = &ch->common[VPIF_VIDEO_INDEX];
+
+	/* release all active buffers */
+	while (!list_empty(&common->dma_queue)) {
+		common->next_frm = list_entry(common->dma_queue.next,
+						struct vpif_disp_buffer, list);
+		list_del(&common->next_frm->list);
+		vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
+	}
+
+	return 0;
+}
+
+static struct vb2_ops video_qops = {
+	.queue_setup		= vpif_buffer_queue_setup,
+	.wait_prepare		= vpif_wait_prepare,
+	.wait_finish		= vpif_wait_finish,
+	.buf_init		= vpif_buffer_init,
+	.buf_prepare		= vpif_buffer_prepare,
+	.start_streaming	= vpif_start_streaming,
+	.stop_streaming		= vpif_stop_streaming,
+	.buf_cleanup		= vpif_buf_cleanup,
+	.buf_queue		= vpif_buffer_queue,
+};
+
+static void process_progressive_mode(struct common_obj *common)
+{
+	unsigned long addr = 0;
+
+	/* Get the next buffer from buffer queue */
+	common->next_frm = list_entry(common->dma_queue.next,
+				struct vpif_disp_buffer, list);
+	/* Remove that buffer from the buffer queue */
+	list_del(&common->next_frm->list);
+	/* Mark status of the buffer as active */
+	common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+
+	/* Set top and bottom field addrs in VPIF registers */
+	addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
+	common->set_addr(addr + common->ytop_off,
+				 addr + common->ybtm_off,
+				 addr + common->ctop_off,
+				 addr + common->cbtm_off);
+}
+
+static void process_interlaced_mode(int fid, struct common_obj *common)
+{
+	/* device field id and local field id are in sync */
+	/* If this is even field */
+	if (0 == fid) {
+		if (common->cur_frm == common->next_frm)
+			return;
+
+		/* one frame is displayed If next frame is
+		 *  available, release cur_frm and move on */
+		/* Copy frame display time */
+		do_gettimeofday(&common->cur_frm->vb.v4l2_buf.timestamp);
+		/* Change status of the cur_frm */
+		vb2_buffer_done(&common->cur_frm->vb,
+					    VB2_BUF_STATE_DONE);
+		/* Make cur_frm pointing to next_frm */
+		common->cur_frm = common->next_frm;
+
+	} else if (1 == fid) {	/* odd field */
+		if (list_empty(&common->dma_queue)
+		    || (common->cur_frm != common->next_frm)) {
+			return;
+		}
+		/* one field is displayed configure the next
+		 * frame if it is available else hold on current
+		 * frame */
+		/* Get next from the buffer queue */
+		process_progressive_mode(common);
+
+	}
+}
+
+/*
+ * vpif_channel_isr: It changes status of the displayed buffer, takes next
+ * buffer from the queue and sets its address in VPIF registers
+ */
+static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
+{
+	struct vpif_device *dev = &vpif_obj;
+	struct channel_obj *ch;
+	struct common_obj *common;
+	enum v4l2_field field;
+	int fid = -1, i;
+	int channel_id = 0;
+
+	channel_id = *(int *)(dev_id);
+	if (!vpif_intr_status(channel_id + 2))
+		return IRQ_NONE;
+
+	ch = dev->dev[channel_id];
+	field = ch->common[VPIF_VIDEO_INDEX].fmt.fmt.pix.field;
+	for (i = 0; i < VPIF_NUMOBJECTS; i++) {
+		common = &ch->common[i];
+		/* If streaming is started in this channel */
+		if (0 == common->started)
+			continue;
+
+		if (1 == ch->vpifparams.std_info.frm_fmt) {
+			if (list_empty(&common->dma_queue))
+				continue;
+
+			/* Progressive mode */
+			if (!channel_first_int[i][channel_id]) {
+				/* Mark status of the cur_frm to
+				 * done and unlock semaphore on it */
+				do_gettimeofday(&common->cur_frm->vb.
+						v4l2_buf.timestamp);
+				vb2_buffer_done(&common->cur_frm->vb,
+					    VB2_BUF_STATE_DONE);
+				/* Make cur_frm pointing to next_frm */
+				common->cur_frm = common->next_frm;
+			}
+
+			channel_first_int[i][channel_id] = 0;
+			process_progressive_mode(common);
+		} else {
+			/* Interlaced mode */
+			/* If it is first interrupt, ignore it */
+
+			if (channel_first_int[i][channel_id]) {
+				channel_first_int[i][channel_id] = 0;
+				continue;
+			}
+
+			if (0 == i) {
+				ch->field_id ^= 1;
+				/* Get field id from VPIF registers */
+				fid = vpif_channel_getfid(ch->channel_id + 2);
+				/* If fid does not match with stored field id */
+				if (fid != ch->field_id) {
+					/* Make them in sync */
+					if (0 == fid)
+						ch->field_id = fid;
+
+					return IRQ_HANDLED;
+				}
+			}
+			process_interlaced_mode(fid, common);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int vpif_update_std_info(struct channel_obj *ch)
+{
+	struct video_obj *vid_ch = &ch->video;
+	struct vpif_params *vpifparams = &ch->vpifparams;
+	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+	const struct vpif_channel_config_params *config;
+
+	int i;
+
+	for (i = 0; i < vpif_ch_params_count; i++) {
+		config = &ch_params[i];
+		if (config->hd_sd == 0) {
+			vpif_dbg(2, debug, "SD format\n");
+			if (config->stdid & vid_ch->stdid) {
+				memcpy(std_info, config, sizeof(*config));
+				break;
+			}
+		} else {
+			vpif_dbg(2, debug, "HD format\n");
+			if (config->dv_preset == vid_ch->dv_preset) {
+				memcpy(std_info, config, sizeof(*config));
+				break;
+			}
+		}
+	}
+
+	if (i == vpif_ch_params_count) {
+		vpif_dbg(1, debug, "Format not found\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vpif_update_resolution(struct channel_obj *ch)
+{
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct video_obj *vid_ch = &ch->video;
+	struct vpif_params *vpifparams = &ch->vpifparams;
+	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+
+	if (!vid_ch->stdid && !vid_ch->dv_preset && !vid_ch->bt_timings.height)
+		return -EINVAL;
+
+	if (vid_ch->stdid || vid_ch->dv_preset) {
+		if (vpif_update_std_info(ch))
+			return -EINVAL;
+	}
+
+	common->fmt.fmt.pix.width = std_info->width;
+	common->fmt.fmt.pix.height = std_info->height;
+	vpif_dbg(1, debug, "Pixel details: Width = %d,Height = %d\n",
+			common->fmt.fmt.pix.width, common->fmt.fmt.pix.height);
+
+	/* Set height and width paramateres */
+	common->height = std_info->height;
+	common->width = std_info->width;
+
+	return 0;
+}
+
+/*
+ * vpif_calculate_offsets: This function calculates buffers offset for Y and C
+ * in the top and bottom field
+ */
+static void vpif_calculate_offsets(struct channel_obj *ch)
+{
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct vpif_params *vpifparams = &ch->vpifparams;
+	enum v4l2_field field = common->fmt.fmt.pix.field;
+	struct video_obj *vid_ch = &ch->video;
+	unsigned int hpitch, vpitch, sizeimage;
+
+	if (V4L2_FIELD_ANY == common->fmt.fmt.pix.field) {
+		if (ch->vpifparams.std_info.frm_fmt)
+			vid_ch->buf_field = V4L2_FIELD_NONE;
+		else
+			vid_ch->buf_field = V4L2_FIELD_INTERLACED;
+	} else {
+		vid_ch->buf_field = common->fmt.fmt.pix.field;
+	}
+
+	sizeimage = common->fmt.fmt.pix.sizeimage;
+
+	hpitch = common->fmt.fmt.pix.bytesperline;
+	vpitch = sizeimage / (hpitch * 2);
+	if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
+	    (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) {
+		common->ytop_off = 0;
+		common->ybtm_off = hpitch;
+		common->ctop_off = sizeimage / 2;
+		common->cbtm_off = sizeimage / 2 + hpitch;
+	} else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) {
+		common->ytop_off = 0;
+		common->ybtm_off = sizeimage / 4;
+		common->ctop_off = sizeimage / 2;
+		common->cbtm_off = common->ctop_off + sizeimage / 4;
+	} else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) {
+		common->ybtm_off = 0;
+		common->ytop_off = sizeimage / 4;
+		common->cbtm_off = sizeimage / 2;
+		common->ctop_off = common->cbtm_off + sizeimage / 4;
+	}
+
+	if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
+	    (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) {
+		vpifparams->video_params.storage_mode = 1;
+	} else {
+		vpifparams->video_params.storage_mode = 0;
+	}
+
+	if (ch->vpifparams.std_info.frm_fmt == 1) {
+		vpifparams->video_params.hpitch =
+		    common->fmt.fmt.pix.bytesperline;
+	} else {
+		if ((field == V4L2_FIELD_ANY) ||
+			(field == V4L2_FIELD_INTERLACED))
+			vpifparams->video_params.hpitch =
+			    common->fmt.fmt.pix.bytesperline * 2;
+		else
+			vpifparams->video_params.hpitch =
+			    common->fmt.fmt.pix.bytesperline;
+	}
+
+	ch->vpifparams.video_params.stdid = ch->vpifparams.std_info.stdid;
+}
+
+static void vpif_config_format(struct channel_obj *ch)
+{
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+	common->fmt.fmt.pix.field = V4L2_FIELD_ANY;
+	if (config_params.numbuffers[ch->channel_id] == 0)
+		common->memory = V4L2_MEMORY_USERPTR;
+	else
+		common->memory = V4L2_MEMORY_MMAP;
+
+	common->fmt.fmt.pix.sizeimage =
+			config_params.channel_bufsize[ch->channel_id];
+	common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P;
+	common->fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+}
+
+static int vpif_check_format(struct channel_obj *ch,
+			     struct v4l2_pix_format *pixfmt)
+{
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	enum v4l2_field field = pixfmt->field;
+	u32 sizeimage, hpitch, vpitch;
+
+	if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P)
+		goto invalid_fmt_exit;
+
+	if (!(VPIF_VALID_FIELD(field)))
+		goto invalid_fmt_exit;
+
+	if (pixfmt->bytesperline <= 0)
+		goto invalid_pitch_exit;
+
+	sizeimage = pixfmt->sizeimage;
+
+	if (vpif_update_resolution(ch))
+		return -EINVAL;
+
+	hpitch = pixfmt->bytesperline;
+	vpitch = sizeimage / (hpitch * 2);
+
+	/* Check for valid value of pitch */
+	if ((hpitch < ch->vpifparams.std_info.width) ||
+	    (vpitch < ch->vpifparams.std_info.height))
+		goto invalid_pitch_exit;
+
+	/* Check for 8 byte alignment */
+	if (!ISALIGNED(hpitch)) {
+		vpif_err("invalid pitch alignment\n");
+		return -EINVAL;
+	}
+	pixfmt->width = common->fmt.fmt.pix.width;
+	pixfmt->height = common->fmt.fmt.pix.height;
+
+	return 0;
+
+invalid_fmt_exit:
+	vpif_err("invalid field format\n");
+	return -EINVAL;
+
+invalid_pitch_exit:
+	vpif_err("invalid pitch\n");
+	return -EINVAL;
+}
+
+static void vpif_config_addr(struct channel_obj *ch, int muxmode)
+{
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+	if (VPIF_CHANNEL3_VIDEO == ch->channel_id) {
+		common->set_addr = ch3_set_videobuf_addr;
+	} else {
+		if (2 == muxmode)
+			common->set_addr = ch2_set_videobuf_addr_yc_nmux;
+		else
+			common->set_addr = ch2_set_videobuf_addr;
+	}
+}
+
+/*
+ * vpif_mmap: It is used to map kernel space buffers into user spaces
+ */
+static int vpif_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+	struct vpif_fh *fh = filep->private_data;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]);
+	int ret;
+
+	vpif_dbg(2, debug, "vpif_mmap\n");
+
+	if (mutex_lock_interruptible(&common->lock))
+		return -ERESTARTSYS;
+	ret = vb2_mmap(&common->buffer_queue, vma);
+	mutex_unlock(&common->lock);
+	return ret;
+}
+
+/*
+ * vpif_poll: It is used for select/poll system call
+ */
+static unsigned int vpif_poll(struct file *filep, poll_table *wait)
+{
+	struct vpif_fh *fh = filep->private_data;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	unsigned int res = 0;
+
+	if (common->started) {
+		mutex_lock(&common->lock);
+		res = vb2_poll(&common->buffer_queue, filep, wait);
+		mutex_unlock(&common->lock);
+	}
+
+	return res;
+}
+
+/*
+ * vpif_open: It creates object of file handle structure and stores it in
+ * private_data member of filepointer
+ */
+static int vpif_open(struct file *filep)
+{
+	struct video_device *vdev = video_devdata(filep);
+	struct channel_obj *ch = video_get_drvdata(vdev);
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct vpif_fh *fh;
+
+	/* Allocate memory for the file handle object */
+	fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL);
+	if (fh == NULL) {
+		vpif_err("unable to allocate memory for file handle object\n");
+		return -ENOMEM;
+	}
+
+	if (mutex_lock_interruptible(&common->lock)) {
+		kfree(fh);
+		return -ERESTARTSYS;
+	}
+	/* store pointer to fh in private_data member of filep */
+	filep->private_data = fh;
+	fh->channel = ch;
+	fh->initialized = 0;
+	if (!ch->initialized) {
+		fh->initialized = 1;
+		ch->initialized = 1;
+		memset(&ch->vpifparams, 0, sizeof(ch->vpifparams));
+	}
+
+	/* Increment channel usrs counter */
+	atomic_inc(&ch->usrs);
+	/* Set io_allowed[VPIF_VIDEO_INDEX] member to false */
+	fh->io_allowed[VPIF_VIDEO_INDEX] = 0;
+	/* Initialize priority of this instance to default priority */
+	fh->prio = V4L2_PRIORITY_UNSET;
+	v4l2_prio_open(&ch->prio, &fh->prio);
+	mutex_unlock(&common->lock);
+
+	return 0;
+}
+
+/*
+ * vpif_release: This function deletes buffer queue, frees the buffers and
+ * the vpif file handle
+ */
+static int vpif_release(struct file *filep)
+{
+	struct vpif_fh *fh = filep->private_data;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+	mutex_lock(&common->lock);
+	/* if this instance is doing IO */
+	if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
+		/* Reset io_usrs member of channel object */
+		common->io_usrs = 0;
+		/* Disable channel */
+		if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
+			enable_channel2(0);
+			channel2_intr_enable(0);
+		}
+		if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
+		    (2 == common->started)) {
+			enable_channel3(0);
+			channel3_intr_enable(0);
+		}
+		common->started = 0;
+
+		/* Free buffers allocated */
+		vb2_queue_release(&common->buffer_queue);
+		vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
+
+		common->numbuffers =
+		    config_params.numbuffers[ch->channel_id];
+	}
+
+	/* Decrement channel usrs counter */
+	atomic_dec(&ch->usrs);
+	/* If this file handle has initialize encoder device, reset it */
+	if (fh->initialized)
+		ch->initialized = 0;
+
+	/* Close the priority */
+	v4l2_prio_close(&ch->prio, fh->prio);
+	filep->private_data = NULL;
+	fh->initialized = 0;
+	mutex_unlock(&common->lock);
+	kfree(fh);
+
+	return 0;
+}
+
+/* functions implementing ioctls */
+/**
+ * vpif_querycap() - QUERYCAP handler
+ * @file: file ptr
+ * @priv: file handle
+ * @cap: ptr to v4l2_capability structure
+ */
+static int vpif_querycap(struct file *file, void  *priv,
+				struct v4l2_capability *cap)
+{
+	struct vpif_display_config *config = vpif_dev->platform_data;
+
+	cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+	strlcpy(cap->driver, "vpif display", sizeof(cap->driver));
+	strlcpy(cap->bus_info, "Platform", sizeof(cap->bus_info));
+	strlcpy(cap->card, config->card_name, sizeof(cap->card));
+
+	return 0;
+}
+
+static int vpif_enum_fmt_vid_out(struct file *file, void  *priv,
+					struct v4l2_fmtdesc *fmt)
+{
+	if (fmt->index != 0) {
+		vpif_err("Invalid format index\n");
+		return -EINVAL;
+	}
+
+	/* Fill in the information about format */
+	fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	strcpy(fmt->description, "YCbCr4:2:2 YC Planar");
+	fmt->pixelformat = V4L2_PIX_FMT_YUV422P;
+
+	return 0;
+}
+
+static int vpif_g_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *fmt)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+	/* Check the validity of the buffer type */
+	if (common->fmt.type != fmt->type)
+		return -EINVAL;
+
+	if (vpif_update_resolution(ch))
+		return -EINVAL;
+	*fmt = common->fmt;
+	return 0;
+}
+
+static int vpif_s_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *fmt)
+{
+	struct vpif_fh *fh = priv;
+	struct v4l2_pix_format *pixfmt;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	int ret = 0;
+
+	if ((VPIF_CHANNEL2_VIDEO == ch->channel_id)
+	    || (VPIF_CHANNEL3_VIDEO == ch->channel_id)) {
+		if (!fh->initialized) {
+			vpif_dbg(1, debug, "Channel Busy\n");
+			return -EBUSY;
+		}
+
+		/* Check for the priority */
+		ret = v4l2_prio_check(&ch->prio, fh->prio);
+		if (0 != ret)
+			return ret;
+		fh->initialized = 1;
+	}
+
+	if (common->started) {
+		vpif_dbg(1, debug, "Streaming in progress\n");
+		return -EBUSY;
+	}
+
+	pixfmt = &fmt->fmt.pix;
+	/* Check for valid field format */
+	ret = vpif_check_format(ch, pixfmt);
+	if (ret)
+		return ret;
+
+	/* store the pix format in the channel object */
+	common->fmt.fmt.pix = *pixfmt;
+	/* store the format in the channel object */
+	common->fmt = *fmt;
+	return 0;
+}
+
+static int vpif_try_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *fmt)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+	int ret = 0;
+
+	ret = vpif_check_format(ch, pixfmt);
+	if (ret) {
+		*pixfmt = common->fmt.fmt.pix;
+		pixfmt->sizeimage = pixfmt->width * pixfmt->height * 2;
+	}
+
+	return ret;
+}
+
+static int vpif_reqbufs(struct file *file, void *priv,
+			struct v4l2_requestbuffers *reqbuf)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common;
+	enum v4l2_field field;
+	struct vb2_queue *q;
+	u8 index = 0;
+
+	/* This file handle has not initialized the channel,
+	   It is not allowed to do settings */
+	if ((VPIF_CHANNEL2_VIDEO == ch->channel_id)
+	    || (VPIF_CHANNEL3_VIDEO == ch->channel_id)) {
+		if (!fh->initialized) {
+			vpif_err("Channel Busy\n");
+			return -EBUSY;
+		}
+	}
+
+	if (V4L2_BUF_TYPE_VIDEO_OUTPUT != reqbuf->type)
+		return -EINVAL;
+
+	index = VPIF_VIDEO_INDEX;
+
+	common = &ch->common[index];
+
+	if (common->fmt.type != reqbuf->type || !vpif_dev)
+		return -EINVAL;
+	if (0 != common->io_usrs)
+		return -EBUSY;
+
+	if (reqbuf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		if (common->fmt.fmt.pix.field == V4L2_FIELD_ANY)
+			field = V4L2_FIELD_INTERLACED;
+		else
+			field = common->fmt.fmt.pix.field;
+	} else {
+		field = V4L2_VBI_INTERLACED;
+	}
+	/* Initialize videobuf2 queue as per the buffer type */
+	common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev);
+	if (!common->alloc_ctx) {
+		vpif_err("Failed to get the context\n");
+		return -EINVAL;
+	}
+	q = &common->buffer_queue;
+	q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	q->io_modes = VB2_MMAP | VB2_USERPTR;
+	q->drv_priv = fh;
+	q->ops = &video_qops;
+	q->mem_ops = &vb2_dma_contig_memops;
+	q->buf_struct_size = sizeof(struct vpif_disp_buffer);
+
+	vb2_queue_init(q);
+
+	/* Set io allowed member of file handle to TRUE */
+	fh->io_allowed[index] = 1;
+	/* Increment io usrs member of channel object to 1 */
+	common->io_usrs = 1;
+	/* Store type of memory requested in channel object */
+	common->memory = reqbuf->memory;
+	INIT_LIST_HEAD(&common->dma_queue);
+	/* Allocate buffers */
+	return vb2_reqbufs(&common->buffer_queue, reqbuf);
+}
+
+static int vpif_querybuf(struct file *file, void *priv,
+				struct v4l2_buffer *tbuf)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+	if (common->fmt.type != tbuf->type)
+		return -EINVAL;
+
+	return vb2_querybuf(&common->buffer_queue, tbuf);
+}
+
+static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct vpif_fh *fh = NULL;
+	struct channel_obj *ch = NULL;
+	struct common_obj *common = NULL;
+
+	if (!buf || !priv)
+		return -EINVAL;
+
+	fh = priv;
+	ch = fh->channel;
+	if (!ch)
+		return -EINVAL;
+
+	common = &(ch->common[VPIF_VIDEO_INDEX]);
+	if (common->fmt.type != buf->type)
+		return -EINVAL;
+
+	if (!fh->io_allowed[VPIF_VIDEO_INDEX]) {
+		vpif_err("fh->io_allowed\n");
+		return -EACCES;
+	}
+
+	return vb2_qbuf(&common->buffer_queue, buf);
+}
+
+static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	int ret = 0;
+
+	if (!(*std_id & VPIF_V4L2_STD))
+		return -EINVAL;
+
+	if (common->started) {
+		vpif_err("streaming in progress\n");
+		return -EBUSY;
+	}
+
+	/* Call encoder subdevice function to set the standard */
+	ch->video.stdid = *std_id;
+	ch->video.dv_preset = V4L2_DV_INVALID;
+	memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
+	/* Get the information about the standard */
+	if (vpif_update_resolution(ch))
+		return -EINVAL;
+
+	if ((ch->vpifparams.std_info.width *
+		ch->vpifparams.std_info.height * 2) >
+		config_params.channel_bufsize[ch->channel_id]) {
+		vpif_err("invalid std for this size\n");
+		return -EINVAL;
+	}
+
+	common->fmt.fmt.pix.bytesperline = common->fmt.fmt.pix.width;
+	/* Configure the default format information */
+	vpif_config_format(ch);
+
+	ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video,
+						s_std_output, *std_id);
+	if (ret < 0) {
+		vpif_err("Failed to set output standard\n");
+		return ret;
+	}
+
+	ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, core,
+							s_std, *std_id);
+	if (ret < 0)
+		vpif_err("Failed to set standard for sub devices\n");
+	return ret;
+}
+
+static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	*std = ch->video.stdid;
+	return 0;
+}
+
+static int vpif_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+
+	return vb2_dqbuf(&common->buffer_queue, p,
+					(file->f_flags & O_NONBLOCK));
+}
+
+static int vpif_streamon(struct file *file, void *priv,
+				enum v4l2_buf_type buftype)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct channel_obj *oth_ch = vpif_obj.dev[!ch->channel_id];
+	int ret = 0;
+
+	if (buftype != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		vpif_err("buffer type not supported\n");
+		return -EINVAL;
+	}
+
+	if (!fh->io_allowed[VPIF_VIDEO_INDEX]) {
+		vpif_err("fh->io_allowed\n");
+		return -EACCES;
+	}
+
+	/* If Streaming is already started, return error */
+	if (common->started) {
+		vpif_err("channel->started\n");
+		return -EBUSY;
+	}
+
+	if ((ch->channel_id == VPIF_CHANNEL2_VIDEO
+		&& oth_ch->common[VPIF_VIDEO_INDEX].started &&
+		ch->vpifparams.std_info.ycmux_mode == 0)
+		|| ((ch->channel_id == VPIF_CHANNEL3_VIDEO)
+		&& (2 == oth_ch->common[VPIF_VIDEO_INDEX].started))) {
+		vpif_err("other channel is using\n");
+		return -EBUSY;
+	}
+
+	ret = vpif_check_format(ch, &common->fmt.fmt.pix);
+	if (ret < 0)
+		return ret;
+
+	/* Call vb2_streamon to start streaming in videobuf2 */
+	ret = vb2_streamon(&common->buffer_queue, buftype);
+	if (ret < 0) {
+		vpif_err("vb2_streamon\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static int vpif_streamoff(struct file *file, void *priv,
+				enum v4l2_buf_type buftype)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct vpif_display_config *vpif_config_data =
+					vpif_dev->platform_data;
+
+	if (buftype != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		vpif_err("buffer type not supported\n");
+		return -EINVAL;
+	}
+
+	if (!fh->io_allowed[VPIF_VIDEO_INDEX]) {
+		vpif_err("fh->io_allowed\n");
+		return -EACCES;
+	}
+
+	if (!common->started) {
+		vpif_err("channel->started\n");
+		return -EINVAL;
+	}
+
+	if (buftype == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		/* disable channel */
+		if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
+			if (vpif_config_data->ch2_clip_en)
+				channel2_clipping_enable(0);
+			enable_channel2(0);
+			channel2_intr_enable(0);
+		}
+		if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
+					(2 == common->started)) {
+			if (vpif_config_data->ch3_clip_en)
+				channel3_clipping_enable(0);
+			enable_channel3(0);
+			channel3_intr_enable(0);
+		}
+	}
+
+	common->started = 0;
+	return vb2_streamoff(&common->buffer_queue, buftype);
+}
+
+static int vpif_cropcap(struct file *file, void *priv,
+			struct v4l2_cropcap *crop)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	if (V4L2_BUF_TYPE_VIDEO_OUTPUT != crop->type)
+		return -EINVAL;
+
+	crop->bounds.left = crop->bounds.top = 0;
+	crop->defrect.left = crop->defrect.top = 0;
+	crop->defrect.height = crop->bounds.height = common->height;
+	crop->defrect.width = crop->bounds.width = common->width;
+
+	return 0;
+}
+
+static int vpif_enum_output(struct file *file, void *fh,
+				struct v4l2_output *output)
+{
+
+	struct vpif_display_config *config = vpif_dev->platform_data;
+
+	if (output->index >= config->output_count) {
+		vpif_dbg(1, debug, "Invalid output index\n");
+		return -EINVAL;
+	}
+
+	strcpy(output->name, config->output[output->index]);
+	output->type = V4L2_OUTPUT_TYPE_ANALOG;
+	output->std = VPIF_V4L2_STD;
+
+	return 0;
+}
+
+static int vpif_s_output(struct file *file, void *priv, unsigned int i)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	int ret = 0;
+
+	if (common->started) {
+		vpif_err("Streaming in progress\n");
+		return -EBUSY;
+	}
+
+	ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video,
+							s_routing, 0, i, 0);
+
+	if (ret < 0)
+		vpif_err("Failed to set output standard\n");
+
+	vid_ch->output_id = i;
+	return ret;
+}
+
+static int vpif_g_output(struct file *file, void *priv, unsigned int *i)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+
+	*i = vid_ch->output_id;
+
+	return 0;
+}
+
+static int vpif_g_priority(struct file *file, void *priv, enum v4l2_priority *p)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	*p = v4l2_prio_max(&ch->prio);
+
+	return 0;
+}
+
+static int vpif_s_priority(struct file *file, void *priv, enum v4l2_priority p)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	return v4l2_prio_change(&ch->prio, &fh->prio, p);
+}
+
+/**
+ * vpif_enum_dv_presets() - ENUM_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_enum_dv_presets(struct file *file, void *priv,
+		struct v4l2_dv_enum_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+
+	return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+			video, enum_dv_presets, preset);
+}
+
+/**
+ * vpif_s_dv_presets() - S_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_s_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct video_obj *vid_ch = &ch->video;
+	int ret = 0;
+
+	if (common->started) {
+		vpif_dbg(1, debug, "streaming in progress\n");
+		return -EBUSY;
+	}
+
+	ret = v4l2_prio_check(&ch->prio, fh->prio);
+	if (ret != 0)
+		return ret;
+
+	fh->initialized = 1;
+
+	/* Call encoder subdevice function to set the standard */
+	if (mutex_lock_interruptible(&common->lock))
+		return -ERESTARTSYS;
+
+	ch->video.dv_preset = preset->preset;
+	ch->video.stdid = V4L2_STD_UNKNOWN;
+	memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
+	/* Get the information about the standard */
+	if (vpif_update_resolution(ch)) {
+		ret = -EINVAL;
+	} else {
+		/* Configure the default format information */
+		vpif_config_format(ch);
+
+		ret = v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+				video, s_dv_preset, preset);
+	}
+
+	mutex_unlock(&common->lock);
+
+	return ret;
+}
+/**
+ * vpif_g_dv_presets() - G_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_g_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	preset->preset = ch->video.dv_preset;
+
+	return 0;
+}
+/**
+ * vpif_s_dv_timings() - S_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_s_dv_timings(struct file *file, void *priv,
+		struct v4l2_dv_timings *timings)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct vpif_params *vpifparams = &ch->vpifparams;
+	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+	struct video_obj *vid_ch = &ch->video;
+	struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+	int ret;
+
+	if (timings->type != V4L2_DV_BT_656_1120) {
+		vpif_dbg(2, debug, "Timing type not defined\n");
+		return -EINVAL;
+	}
+
+	/* Configure subdevice timings, if any */
+	ret = v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+			video, s_dv_timings, timings);
+	if (ret == -ENOIOCTLCMD) {
+		vpif_dbg(2, debug, "Custom DV timings not supported by "
+				"subdevice\n");
+		return -EINVAL;
+	}
+	if (ret < 0) {
+		vpif_dbg(2, debug, "Error setting custom DV timings\n");
+		return ret;
+	}
+
+	if (!(timings->bt.width && timings->bt.height &&
+				(timings->bt.hbackporch ||
+				 timings->bt.hfrontporch ||
+				 timings->bt.hsync) &&
+				timings->bt.vfrontporch &&
+				(timings->bt.vbackporch ||
+				 timings->bt.vsync))) {
+		vpif_dbg(2, debug, "Timings for width, height, "
+				"horizontal back porch, horizontal sync, "
+				"horizontal front porch, vertical back porch, "
+				"vertical sync and vertical back porch "
+				"must be defined\n");
+		return -EINVAL;
+	}
+
+	*bt = timings->bt;
+
+	/* Configure video port timings */
+
+	std_info->eav2sav = bt->hbackporch + bt->hfrontporch +
+		bt->hsync - 8;
+	std_info->sav2eav = bt->width;
+
+	std_info->l1 = 1;
+	std_info->l3 = bt->vsync + bt->vbackporch + 1;
+
+	if (bt->interlaced) {
+		if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
+			std_info->vsize = bt->height * 2 +
+				bt->vfrontporch + bt->vsync + bt->vbackporch +
+				bt->il_vfrontporch + bt->il_vsync +
+				bt->il_vbackporch;
+			std_info->l5 = std_info->vsize/2 -
+				(bt->vfrontporch - 1);
+			std_info->l7 = std_info->vsize/2 + 1;
+			std_info->l9 = std_info->l7 + bt->il_vsync +
+				bt->il_vbackporch + 1;
+			std_info->l11 = std_info->vsize -
+				(bt->il_vfrontporch - 1);
+		} else {
+			vpif_dbg(2, debug, "Required timing values for "
+					"interlaced BT format missing\n");
+			return -EINVAL;
+		}
+	} else {
+		std_info->vsize = bt->height + bt->vfrontporch +
+			bt->vsync + bt->vbackporch;
+		std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
+	}
+	strncpy(std_info->name, "Custom timings BT656/1120",
+			VPIF_MAX_NAME);
+	std_info->width = bt->width;
+	std_info->height = bt->height;
+	std_info->frm_fmt = bt->interlaced ? 0 : 1;
+	std_info->ycmux_mode = 0;
+	std_info->capture_format = 0;
+	std_info->vbi_supported = 0;
+	std_info->hd_sd = 1;
+	std_info->stdid = 0;
+	std_info->dv_preset = V4L2_DV_INVALID;
+
+	vid_ch->stdid = 0;
+	vid_ch->dv_preset = V4L2_DV_INVALID;
+
+	return 0;
+}
+
+/**
+ * vpif_g_dv_timings() - G_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_g_dv_timings(struct file *file, void *priv,
+		struct v4l2_dv_timings *timings)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+	struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+
+	timings->bt = *bt;
+
+	return 0;
+}
+
+/*
+ * vpif_g_chip_ident() - Identify the chip
+ * @file: file ptr
+ * @priv: file handle
+ * @chip: chip identity
+ *
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_g_chip_ident(struct file *file, void *priv,
+		struct v4l2_dbg_chip_ident *chip)
+{
+	chip->ident = V4L2_IDENT_NONE;
+	chip->revision = 0;
+	if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+			chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) {
+		vpif_dbg(2, debug, "match_type is invalid.\n");
+		return -EINVAL;
+	}
+
+	return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core,
+			g_chip_ident, chip);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+/*
+ * vpif_dbg_g_register() - Read register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be read
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_dbg_g_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg){
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+
+	return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], core,
+			g_register, reg);
+}
+
+/*
+ * vpif_dbg_s_register() - Write to register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be modified
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if write operations fails.
+ */
+static int vpif_dbg_s_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg){
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+
+	return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], core,
+			s_register, reg);
+}
+#endif
+
+/*
+ * vpif_log_status() - Status information
+ * @file: file ptr
+ * @priv: file handle
+ *
+ * Returns zero.
+ */
+static int vpif_log_status(struct file *filep, void *priv)
+{
+	/* status for sub devices */
+	v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
+
+	return 0;
+}
+
+/* vpif display ioctl operations */
+static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
+	.vidioc_querycap        	= vpif_querycap,
+	.vidioc_g_priority		= vpif_g_priority,
+	.vidioc_s_priority		= vpif_s_priority,
+	.vidioc_enum_fmt_vid_out	= vpif_enum_fmt_vid_out,
+	.vidioc_g_fmt_vid_out  		= vpif_g_fmt_vid_out,
+	.vidioc_s_fmt_vid_out   	= vpif_s_fmt_vid_out,
+	.vidioc_try_fmt_vid_out 	= vpif_try_fmt_vid_out,
+	.vidioc_reqbufs         	= vpif_reqbufs,
+	.vidioc_querybuf        	= vpif_querybuf,
+	.vidioc_qbuf            	= vpif_qbuf,
+	.vidioc_dqbuf           	= vpif_dqbuf,
+	.vidioc_streamon        	= vpif_streamon,
+	.vidioc_streamoff       	= vpif_streamoff,
+	.vidioc_s_std           	= vpif_s_std,
+	.vidioc_g_std			= vpif_g_std,
+	.vidioc_enum_output		= vpif_enum_output,
+	.vidioc_s_output		= vpif_s_output,
+	.vidioc_g_output		= vpif_g_output,
+	.vidioc_cropcap         	= vpif_cropcap,
+	.vidioc_enum_dv_presets         = vpif_enum_dv_presets,
+	.vidioc_s_dv_preset             = vpif_s_dv_preset,
+	.vidioc_g_dv_preset             = vpif_g_dv_preset,
+	.vidioc_s_dv_timings            = vpif_s_dv_timings,
+	.vidioc_g_dv_timings            = vpif_g_dv_timings,
+	.vidioc_g_chip_ident		= vpif_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.vidioc_g_register		= vpif_dbg_g_register,
+	.vidioc_s_register		= vpif_dbg_s_register,
+#endif
+	.vidioc_log_status		= vpif_log_status,
+};
+
+static const struct v4l2_file_operations vpif_fops = {
+	.owner		= THIS_MODULE,
+	.open		= vpif_open,
+	.release	= vpif_release,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= vpif_mmap,
+	.poll		= vpif_poll
+};
+
+static struct video_device vpif_video_template = {
+	.name		= "vpif",
+	.fops		= &vpif_fops,
+	.ioctl_ops	= &vpif_ioctl_ops,
+	.tvnorms	= VPIF_V4L2_STD,
+	.current_norm	= V4L2_STD_625_50,
+
+};
+
+/*Configure the channels, buffer sizei, request irq */
+static int initialize_vpif(void)
+{
+	int free_channel_objects_index;
+	int free_buffer_channel_index;
+	int free_buffer_index;
+	int err = 0, i, j;
+
+	/* Default number of buffers should be 3 */
+	if ((ch2_numbuffers > 0) &&
+	    (ch2_numbuffers < config_params.min_numbuffers))
+		ch2_numbuffers = config_params.min_numbuffers;
+	if ((ch3_numbuffers > 0) &&
+	    (ch3_numbuffers < config_params.min_numbuffers))
+		ch3_numbuffers = config_params.min_numbuffers;
+
+	/* Set buffer size to min buffers size if invalid buffer size is
+	 * given */
+	if (ch2_bufsize < config_params.min_bufsize[VPIF_CHANNEL2_VIDEO])
+		ch2_bufsize =
+		    config_params.min_bufsize[VPIF_CHANNEL2_VIDEO];
+	if (ch3_bufsize < config_params.min_bufsize[VPIF_CHANNEL3_VIDEO])
+		ch3_bufsize =
+		    config_params.min_bufsize[VPIF_CHANNEL3_VIDEO];
+
+	config_params.numbuffers[VPIF_CHANNEL2_VIDEO] = ch2_numbuffers;
+
+	if (ch2_numbuffers) {
+		config_params.channel_bufsize[VPIF_CHANNEL2_VIDEO] =
+							ch2_bufsize;
+	}
+	config_params.numbuffers[VPIF_CHANNEL3_VIDEO] = ch3_numbuffers;
+
+	if (ch3_numbuffers) {
+		config_params.channel_bufsize[VPIF_CHANNEL3_VIDEO] =
+							ch3_bufsize;
+	}
+
+	/* Allocate memory for six channel objects */
+	for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+		vpif_obj.dev[i] =
+		    kzalloc(sizeof(struct channel_obj), GFP_KERNEL);
+		/* If memory allocation fails, return error */
+		if (!vpif_obj.dev[i]) {
+			free_channel_objects_index = i;
+			err = -ENOMEM;
+			goto vpif_init_free_channel_objects;
+		}
+	}
+
+	free_channel_objects_index = VPIF_DISPLAY_MAX_DEVICES;
+	free_buffer_channel_index = VPIF_DISPLAY_NUM_CHANNELS;
+	free_buffer_index = config_params.numbuffers[i - 1];
+
+	return 0;
+
+vpif_init_free_channel_objects:
+	for (j = 0; j < free_channel_objects_index; j++)
+		kfree(vpif_obj.dev[j]);
+	return err;
+}
+
+/*
+ * vpif_probe: This function creates device entries by register itself to the
+ * V4L2 driver and initializes fields of each channel objects
+ */
+static __init int vpif_probe(struct platform_device *pdev)
+{
+	struct vpif_subdev_info *subdevdata;
+	struct vpif_display_config *config;
+	int i, j = 0, k, q, m, err = 0;
+	struct i2c_adapter *i2c_adap;
+	struct common_obj *common;
+	struct channel_obj *ch;
+	struct video_device *vfd;
+	struct resource *res;
+	int subdev_count;
+	size_t size;
+
+	vpif_dev = &pdev->dev;
+	err = initialize_vpif();
+
+	if (err) {
+		v4l2_err(vpif_dev->driver, "Error initializing vpif\n");
+		return err;
+	}
+
+	err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
+	if (err) {
+		v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
+		return err;
+	}
+
+	k = 0;
+	while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, k))) {
+		for (i = res->start; i <= res->end; i++) {
+			if (request_irq(i, vpif_channel_isr, IRQF_SHARED,
+					"VPIF_Display",
+				(void *)(&vpif_obj.dev[k]->channel_id))) {
+				err = -EBUSY;
+				goto vpif_int_err;
+			}
+		}
+		k++;
+	}
+
+	for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+
+		/* Get the pointer to the channel object */
+		ch = vpif_obj.dev[i];
+
+		/* Allocate memory for video device */
+		vfd = video_device_alloc();
+		if (vfd == NULL) {
+			for (j = 0; j < i; j++) {
+				ch = vpif_obj.dev[j];
+				video_device_release(ch->video_dev);
+			}
+			err = -ENOMEM;
+			goto vpif_int_err;
+		}
+
+		/* Initialize field of video device */
+		*vfd = vpif_video_template;
+		vfd->v4l2_dev = &vpif_obj.v4l2_dev;
+		vfd->release = video_device_release;
+		snprintf(vfd->name, sizeof(vfd->name),
+			 "VPIF_Display_DRIVER_V%s",
+			 VPIF_DISPLAY_VERSION);
+
+		/* Set video_dev to the video device */
+		ch->video_dev = vfd;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res) {
+		size = resource_size(res);
+		/* The resources are divided into two equal memory and when
+		 * we have HD output we can add them together
+		 */
+		for (j = 0; j < VPIF_DISPLAY_MAX_DEVICES; j++) {
+			ch = vpif_obj.dev[j];
+			ch->channel_id = j;
+
+			/* only enabled if second resource exists */
+			config_params.video_limit[ch->channel_id] = 0;
+			if (size)
+				config_params.video_limit[ch->channel_id] =
+									size/2;
+		}
+	}
+
+	for (j = 0; j < VPIF_DISPLAY_MAX_DEVICES; j++) {
+		ch = vpif_obj.dev[j];
+		/* Initialize field of the channel objects */
+		atomic_set(&ch->usrs, 0);
+		for (k = 0; k < VPIF_NUMOBJECTS; k++) {
+			ch->common[k].numbuffers = 0;
+			common = &ch->common[k];
+			common->io_usrs = 0;
+			common->started = 0;
+			spin_lock_init(&common->irqlock);
+			mutex_init(&common->lock);
+			common->numbuffers = 0;
+			common->set_addr = NULL;
+			common->ytop_off = common->ybtm_off = 0;
+			common->ctop_off = common->cbtm_off = 0;
+			common->cur_frm = common->next_frm = NULL;
+			memset(&common->fmt, 0, sizeof(common->fmt));
+			common->numbuffers = config_params.numbuffers[k];
+
+		}
+		ch->initialized = 0;
+		ch->channel_id = j;
+		if (j < 2)
+			ch->common[VPIF_VIDEO_INDEX].numbuffers =
+			    config_params.numbuffers[ch->channel_id];
+		else
+			ch->common[VPIF_VIDEO_INDEX].numbuffers = 0;
+
+		memset(&ch->vpifparams, 0, sizeof(ch->vpifparams));
+
+		/* Initialize prio member of channel object */
+		v4l2_prio_init(&ch->prio);
+		ch->common[VPIF_VIDEO_INDEX].fmt.type =
+						V4L2_BUF_TYPE_VIDEO_OUTPUT;
+		ch->video_dev->lock = &common->lock;
+
+		/* register video device */
+		vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n",
+				(int)ch, (int)&ch->video_dev);
+
+		err = video_register_device(ch->video_dev,
+					  VFL_TYPE_GRABBER, (j ? 3 : 2));
+		if (err < 0)
+			goto probe_out;
+
+		video_set_drvdata(ch->video_dev, ch);
+	}
+
+	i2c_adap = i2c_get_adapter(1);
+	config = pdev->dev.platform_data;
+	subdev_count = config->subdev_count;
+	subdevdata = config->subdevinfo;
+	vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
+								GFP_KERNEL);
+	if (vpif_obj.sd == NULL) {
+		vpif_err("unable to allocate memory for subdevice pointers\n");
+		err = -ENOMEM;
+		goto probe_out;
+	}
+
+	for (i = 0; i < subdev_count; i++) {
+		vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
+						i2c_adap,
+						&subdevdata[i].board_info,
+						NULL);
+		if (!vpif_obj.sd[i]) {
+			vpif_err("Error registering v4l2 subdevice\n");
+			goto probe_subdev_out;
+		}
+
+		if (vpif_obj.sd[i])
+			vpif_obj.sd[i]->grp_id = 1 << i;
+	}
+
+	v4l2_info(&vpif_obj.v4l2_dev,
+			" VPIF display driver initialized\n");
+	return 0;
+
+probe_subdev_out:
+	kfree(vpif_obj.sd);
+probe_out:
+	for (k = 0; k < j; k++) {
+		ch = vpif_obj.dev[k];
+		video_unregister_device(ch->video_dev);
+		video_device_release(ch->video_dev);
+		ch->video_dev = NULL;
+	}
+vpif_int_err:
+	v4l2_device_unregister(&vpif_obj.v4l2_dev);
+	vpif_err("VPIF IRQ request failed\n");
+	for (q = k; k >= 0; k--) {
+		for (m = i; m >= res->start; m--)
+			free_irq(m, (void *)(&vpif_obj.dev[k]->channel_id));
+		res = platform_get_resource(pdev, IORESOURCE_IRQ, k-1);
+		m = res->end;
+	}
+
+	return err;
+}
+
+/*
+ * vpif_remove: It un-register channels from V4L2 driver
+ */
+static int vpif_remove(struct platform_device *device)
+{
+	struct channel_obj *ch;
+	int i;
+
+	v4l2_device_unregister(&vpif_obj.v4l2_dev);
+
+	/* un-register device */
+	for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+		/* Get the pointer to the channel object */
+		ch = vpif_obj.dev[i];
+		/* Unregister video device */
+		video_unregister_device(ch->video_dev);
+
+		ch->video_dev = NULL;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int vpif_suspend(struct device *dev)
+{
+	struct common_obj *common;
+	struct channel_obj *ch;
+	int i;
+
+	for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+		/* Get the pointer to the channel object */
+		ch = vpif_obj.dev[i];
+		common = &ch->common[VPIF_VIDEO_INDEX];
+		mutex_lock(&common->lock);
+		if (atomic_read(&ch->usrs) && common->io_usrs) {
+			/* Disable channel */
+			if (ch->channel_id == VPIF_CHANNEL2_VIDEO) {
+				enable_channel2(0);
+				channel2_intr_enable(0);
+			}
+			if (ch->channel_id == VPIF_CHANNEL3_VIDEO ||
+					common->started == 2) {
+				enable_channel3(0);
+				channel3_intr_enable(0);
+			}
+		}
+		mutex_unlock(&common->lock);
+	}
+
+	return 0;
+}
+
+static int vpif_resume(struct device *dev)
+{
+
+	struct common_obj *common;
+	struct channel_obj *ch;
+	int i;
+
+	for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+		/* Get the pointer to the channel object */
+		ch = vpif_obj.dev[i];
+		common = &ch->common[VPIF_VIDEO_INDEX];
+		mutex_lock(&common->lock);
+		if (atomic_read(&ch->usrs) && common->io_usrs) {
+			/* Enable channel */
+			if (ch->channel_id == VPIF_CHANNEL2_VIDEO) {
+				enable_channel2(1);
+				channel2_intr_enable(1);
+			}
+			if (ch->channel_id == VPIF_CHANNEL3_VIDEO ||
+					common->started == 2) {
+				enable_channel3(1);
+				channel3_intr_enable(1);
+			}
+		}
+		mutex_unlock(&common->lock);
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops vpif_pm = {
+	.suspend        = vpif_suspend,
+	.resume         = vpif_resume,
+};
+
+#define vpif_pm_ops (&vpif_pm)
+#else
+#define vpif_pm_ops NULL
+#endif
+
+static __refdata struct platform_driver vpif_driver = {
+	.driver	= {
+			.name	= "vpif_display",
+			.owner	= THIS_MODULE,
+			.pm	= vpif_pm_ops,
+	},
+	.probe	= vpif_probe,
+	.remove	= vpif_remove,
+};
+
+static __init int vpif_init(void)
+{
+	return platform_driver_register(&vpif_driver);
+}
+
+/*
+ * vpif_cleanup: This function un-registers device and driver to the kernel,
+ * frees requested irq handler and de-allocates memory allocated for channel
+ * objects.
+ */
+static void vpif_cleanup(void)
+{
+	struct platform_device *pdev;
+	struct resource *res;
+	int irq_num;
+	int i = 0;
+
+	pdev = container_of(vpif_dev, struct platform_device, dev);
+
+	while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, i))) {
+		for (irq_num = res->start; irq_num <= res->end; irq_num++)
+			free_irq(irq_num,
+				 (void *)(&vpif_obj.dev[i]->channel_id));
+		i++;
+	}
+
+	platform_driver_unregister(&vpif_driver);
+	kfree(vpif_obj.sd);
+	for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++)
+		kfree(vpif_obj.dev[i]);
+}
+
+module_init(vpif_init);
+module_exit(vpif_cleanup);
diff --git a/drivers/media/platform/davinci/vpif_display.h b/drivers/media/platform/davinci/vpif_display.h
new file mode 100644
index 0000000..8967ffb
--- /dev/null
+++ b/drivers/media/platform/davinci/vpif_display.h
@@ -0,0 +1,181 @@
+/*
+ * VPIF display header file
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DAVINCIHD_DISPLAY_H
+#define DAVINCIHD_DISPLAY_H
+
+/* Header files */
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/davinci/vpif_types.h>
+
+#include "vpif.h"
+
+/* Macros */
+#define VPIF_DISPLAY_VERSION	"0.0.2"
+
+#define VPIF_VALID_FIELD(field) \
+	(((V4L2_FIELD_ANY == field) || (V4L2_FIELD_NONE == field)) || \
+	(((V4L2_FIELD_INTERLACED == field) || (V4L2_FIELD_SEQ_TB == field)) || \
+	(V4L2_FIELD_SEQ_BT == field)))
+
+#define VPIF_DISPLAY_MAX_DEVICES	(2)
+#define VPIF_SLICED_BUF_SIZE		(256)
+#define VPIF_SLICED_MAX_SERVICES	(3)
+#define VPIF_VIDEO_INDEX		(0)
+#define VPIF_VBI_INDEX			(1)
+#define VPIF_HBI_INDEX			(2)
+
+/* Setting it to 1 as HBI/VBI support yet to be added , else 3*/
+#define VPIF_NUMOBJECTS	(1)
+
+/* Macros */
+#define ISALIGNED(a)    (0 == ((a) & 7))
+
+/* enumerated data types */
+/* Enumerated data type to give id to each device per channel */
+enum vpif_channel_id {
+	VPIF_CHANNEL2_VIDEO = 0,	/* Channel2 Video */
+	VPIF_CHANNEL3_VIDEO,		/* Channel3 Video */
+};
+
+/* structures */
+
+struct video_obj {
+	enum v4l2_field buf_field;
+	u32 latest_only;		/* indicate whether to return
+					 * most recent displayed frame only */
+	v4l2_std_id stdid;		/* Currently selected or default
+					 * standard */
+	u32 dv_preset;
+	struct v4l2_bt_timings bt_timings;
+	u32 output_id;			/* Current output id */
+};
+
+struct vbi_obj {
+	int num_services;
+	struct vpif_vbi_params vbiparams;	/* vpif parameters for the raw
+						 * vbi data */
+};
+
+struct vpif_disp_buffer {
+	struct vb2_buffer vb;
+	struct list_head list;
+};
+
+struct common_obj {
+	/* Buffer specific parameters */
+	u8 *fbuffers[VIDEO_MAX_FRAME];		/* List of buffer pointers for
+						 * storing frames */
+	u32 numbuffers;				/* number of buffers */
+	struct vpif_disp_buffer *cur_frm;	/* Pointer pointing to current
+						 * vb2_buffer */
+	struct vpif_disp_buffer *next_frm;	/* Pointer pointing to next
+						 * vb2_buffer */
+	enum v4l2_memory memory;		/* This field keeps track of
+						 * type of buffer exchange
+						 * method user has selected */
+	struct v4l2_format fmt;			/* Used to store the format */
+	struct vb2_queue buffer_queue;		/* Buffer queue used in
+						 * video-buf */
+	/* allocator-specific contexts for each plane */
+	struct vb2_alloc_ctx *alloc_ctx;
+
+	struct list_head dma_queue;		/* Queue of filled frames */
+	spinlock_t irqlock;			/* Used in video-buf */
+
+	/* channel specific parameters */
+	struct mutex lock;			/* lock used to access this
+						 * structure */
+	u32 io_usrs;				/* number of users performing
+						 * IO */
+	u8 started;				/* Indicates whether streaming
+						 * started */
+	u32 ytop_off;				/* offset of Y top from the
+						 * starting of the buffer */
+	u32 ybtm_off;				/* offset of Y bottom from the
+						 * starting of the buffer */
+	u32 ctop_off;				/* offset of C top from the
+						 * starting of the buffer */
+	u32 cbtm_off;				/* offset of C bottom from the
+						 * starting of the buffer */
+	/* Function pointer to set the addresses */
+	void (*set_addr) (unsigned long, unsigned long,
+				unsigned long, unsigned long);
+	u32 height;
+	u32 width;
+};
+
+struct channel_obj {
+	/* V4l2 specific parameters */
+	struct video_device *video_dev;	/* Identifies video device for
+					 * this channel */
+	struct v4l2_prio_state prio;	/* Used to keep track of state of
+					 * the priority */
+	atomic_t usrs;			/* number of open instances of
+					 * the channel */
+	u32 field_id;			/* Indicates id of the field
+					 * which is being displayed */
+	u8 initialized;			/* flag to indicate whether
+					 * encoder is initialized */
+
+	enum vpif_channel_id channel_id;/* Identifies channel */
+	struct vpif_params vpifparams;
+	struct common_obj common[VPIF_NUMOBJECTS];
+	struct video_obj video;
+	struct vbi_obj vbi;
+};
+
+/* File handle structure */
+struct vpif_fh {
+	struct channel_obj *channel;	/* pointer to channel object for
+					 * opened device */
+	u8 io_allowed[VPIF_NUMOBJECTS];	/* Indicates whether this file handle
+					 * is doing IO */
+	enum v4l2_priority prio;	/* Used to keep track priority of
+					 * this instance */
+	u8 initialized;			/* Used to keep track of whether this
+					 * file handle has initialized
+					 * channel or not */
+};
+
+/* vpif device structure */
+struct vpif_device {
+	struct v4l2_device v4l2_dev;
+	struct channel_obj *dev[VPIF_DISPLAY_NUM_CHANNELS];
+	struct v4l2_subdev **sd;
+
+};
+
+struct vpif_config_params {
+	u32 min_bufsize[VPIF_DISPLAY_NUM_CHANNELS];
+	u32 channel_bufsize[VPIF_DISPLAY_NUM_CHANNELS];
+	u8 numbuffers[VPIF_DISPLAY_NUM_CHANNELS];
+	u32 video_limit[VPIF_DISPLAY_NUM_CHANNELS];
+	u8 min_numbuffers;
+};
+
+/* Struct which keeps track of the line numbers for the sliced vbi service */
+struct vpif_service_line {
+	u16 service_id;
+	u16 service_line[2];
+	u16 enc_service_id;
+	u8 bytestowrite;
+};
+
+#endif				/* DAVINCIHD_DISPLAY_H */
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
new file mode 100644
index 0000000..146e4b0
--- /dev/null
+++ b/drivers/media/platform/davinci/vpss.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * common vpss system module platform driver for all video drivers.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/compiler.h>
+#include <linux/io.h>
+#include <mach/hardware.h>
+#include <media/davinci/vpss.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VPSS Driver");
+MODULE_AUTHOR("Texas Instruments");
+
+/* DM644x defines */
+#define DM644X_SBL_PCR_VPSS		(4)
+
+#define DM355_VPSSBL_INTSEL		0x10
+#define DM355_VPSSBL_EVTSEL		0x14
+/* vpss BL register offsets */
+#define DM355_VPSSBL_CCDCMUX		0x1c
+/* vpss CLK register offsets */
+#define DM355_VPSSCLK_CLKCTRL		0x04
+/* masks and shifts */
+#define VPSS_HSSISEL_SHIFT		4
+/*
+ * VDINT0 - vpss_int0, VDINT1 - vpss_int1, H3A - vpss_int4,
+ * IPIPE_INT1_SDR - vpss_int5
+ */
+#define DM355_VPSSBL_INTSEL_DEFAULT	0xff83ff10
+/* VENCINT - vpss_int8 */
+#define DM355_VPSSBL_EVTSEL_DEFAULT	0x4
+
+#define DM365_ISP5_PCCR 		0x04
+#define DM365_ISP5_INTSEL1		0x10
+#define DM365_ISP5_INTSEL2		0x14
+#define DM365_ISP5_INTSEL3		0x18
+#define DM365_ISP5_CCDCMUX 		0x20
+#define DM365_ISP5_PG_FRAME_SIZE 	0x28
+#define DM365_VPBE_CLK_CTRL 		0x00
+/*
+ * vpss interrupts. VDINT0 - vpss_int0, VDINT1 - vpss_int1,
+ * AF - vpss_int3
+ */
+#define DM365_ISP5_INTSEL1_DEFAULT	0x0b1f0100
+/* AEW - vpss_int6, RSZ_INT_DMA - vpss_int5 */
+#define DM365_ISP5_INTSEL2_DEFAULT	0x1f0a0f1f
+/* VENC - vpss_int8 */
+#define DM365_ISP5_INTSEL3_DEFAULT	0x00000015
+
+/* masks and shifts for DM365*/
+#define DM365_CCDC_PG_VD_POL_SHIFT 	0
+#define DM365_CCDC_PG_HD_POL_SHIFT 	1
+
+#define CCD_SRC_SEL_MASK		(BIT_MASK(5) | BIT_MASK(4))
+#define CCD_SRC_SEL_SHIFT		4
+
+/* Different SoC platforms supported by this driver */
+enum vpss_platform_type {
+	DM644X,
+	DM355,
+	DM365,
+};
+
+/*
+ * vpss operations. Depends on platform. Not all functions are available
+ * on all platforms. The api, first check if a functio is available before
+ * invoking it. In the probe, the function ptrs are initialized based on
+ * vpss name. vpss name can be "dm355_vpss", "dm644x_vpss" etc.
+ */
+struct vpss_hw_ops {
+	/* enable clock */
+	int (*enable_clock)(enum vpss_clock_sel clock_sel, int en);
+	/* select input to ccdc */
+	void (*select_ccdc_source)(enum vpss_ccdc_source_sel src_sel);
+	/* clear wbl overflow bit */
+	int (*clear_wbl_overflow)(enum vpss_wbl_sel wbl_sel);
+};
+
+/* vpss configuration */
+struct vpss_oper_config {
+	__iomem void *vpss_regs_base0;
+	__iomem void *vpss_regs_base1;
+	enum vpss_platform_type platform;
+	spinlock_t vpss_lock;
+	struct vpss_hw_ops hw_ops;
+};
+
+static struct vpss_oper_config oper_cfg;
+
+/* register access routines */
+static inline u32 bl_regr(u32 offset)
+{
+	return __raw_readl(oper_cfg.vpss_regs_base0 + offset);
+}
+
+static inline void bl_regw(u32 val, u32 offset)
+{
+	__raw_writel(val, oper_cfg.vpss_regs_base0 + offset);
+}
+
+static inline u32 vpss_regr(u32 offset)
+{
+	return __raw_readl(oper_cfg.vpss_regs_base1 + offset);
+}
+
+static inline void vpss_regw(u32 val, u32 offset)
+{
+	__raw_writel(val, oper_cfg.vpss_regs_base1 + offset);
+}
+
+/* For DM365 only */
+static inline u32 isp5_read(u32 offset)
+{
+	return __raw_readl(oper_cfg.vpss_regs_base0 + offset);
+}
+
+/* For DM365 only */
+static inline void isp5_write(u32 val, u32 offset)
+{
+	__raw_writel(val, oper_cfg.vpss_regs_base0 + offset);
+}
+
+static void dm365_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
+{
+	u32 temp = isp5_read(DM365_ISP5_CCDCMUX) & ~CCD_SRC_SEL_MASK;
+
+	/* if we are using pattern generator, enable it */
+	if (src_sel == VPSS_PGLPBK || src_sel == VPSS_CCDCPG)
+		temp |= 0x08;
+
+	temp |= (src_sel << CCD_SRC_SEL_SHIFT);
+	isp5_write(temp, DM365_ISP5_CCDCMUX);
+}
+
+static void dm355_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
+{
+	bl_regw(src_sel << VPSS_HSSISEL_SHIFT, DM355_VPSSBL_CCDCMUX);
+}
+
+int vpss_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
+{
+	if (!oper_cfg.hw_ops.select_ccdc_source)
+		return -EINVAL;
+
+	oper_cfg.hw_ops.select_ccdc_source(src_sel);
+	return 0;
+}
+EXPORT_SYMBOL(vpss_select_ccdc_source);
+
+static int dm644x_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel)
+{
+	u32 mask = 1, val;
+
+	if (wbl_sel < VPSS_PCR_AEW_WBL_0 ||
+	    wbl_sel > VPSS_PCR_CCDC_WBL_O)
+		return -EINVAL;
+
+	/* writing a 0 clear the overflow */
+	mask = ~(mask << wbl_sel);
+	val = bl_regr(DM644X_SBL_PCR_VPSS) & mask;
+	bl_regw(val, DM644X_SBL_PCR_VPSS);
+	return 0;
+}
+
+int vpss_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel)
+{
+	if (!oper_cfg.hw_ops.clear_wbl_overflow)
+		return -EINVAL;
+
+	return oper_cfg.hw_ops.clear_wbl_overflow(wbl_sel);
+}
+EXPORT_SYMBOL(vpss_clear_wbl_overflow);
+
+/*
+ *  dm355_enable_clock - Enable VPSS Clock
+ *  @clock_sel: CLock to be enabled/disabled
+ *  @en: enable/disable flag
+ *
+ *  This is called to enable or disable a vpss clock
+ */
+static int dm355_enable_clock(enum vpss_clock_sel clock_sel, int en)
+{
+	unsigned long flags;
+	u32 utemp, mask = 0x1, shift = 0;
+
+	switch (clock_sel) {
+	case VPSS_VPBE_CLOCK:
+		/* nothing since lsb */
+		break;
+	case VPSS_VENC_CLOCK_SEL:
+		shift = 2;
+		break;
+	case VPSS_CFALD_CLOCK:
+		shift = 3;
+		break;
+	case VPSS_H3A_CLOCK:
+		shift = 4;
+		break;
+	case VPSS_IPIPE_CLOCK:
+		shift = 5;
+		break;
+	case VPSS_CCDC_CLOCK:
+		shift = 6;
+		break;
+	default:
+		printk(KERN_ERR "dm355_enable_clock:"
+				" Invalid selector: %d\n", clock_sel);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&oper_cfg.vpss_lock, flags);
+	utemp = vpss_regr(DM355_VPSSCLK_CLKCTRL);
+	if (!en)
+		utemp &= ~(mask << shift);
+	else
+		utemp |= (mask << shift);
+
+	vpss_regw(utemp, DM355_VPSSCLK_CLKCTRL);
+	spin_unlock_irqrestore(&oper_cfg.vpss_lock, flags);
+	return 0;
+}
+
+static int dm365_enable_clock(enum vpss_clock_sel clock_sel, int en)
+{
+	unsigned long flags;
+	u32 utemp, mask = 0x1, shift = 0, offset = DM365_ISP5_PCCR;
+	u32 (*read)(u32 offset) = isp5_read;
+	void(*write)(u32 val, u32 offset) = isp5_write;
+
+	switch (clock_sel) {
+	case VPSS_BL_CLOCK:
+		break;
+	case VPSS_CCDC_CLOCK:
+		shift = 1;
+		break;
+	case VPSS_H3A_CLOCK:
+		shift = 2;
+		break;
+	case VPSS_RSZ_CLOCK:
+		shift = 3;
+		break;
+	case VPSS_IPIPE_CLOCK:
+		shift = 4;
+		break;
+	case VPSS_IPIPEIF_CLOCK:
+		shift = 5;
+		break;
+	case VPSS_PCLK_INTERNAL:
+		shift = 6;
+		break;
+	case VPSS_PSYNC_CLOCK_SEL:
+		shift = 7;
+		break;
+	case VPSS_VPBE_CLOCK:
+		read = vpss_regr;
+		write = vpss_regw;
+		offset = DM365_VPBE_CLK_CTRL;
+		break;
+	case VPSS_VENC_CLOCK_SEL:
+		shift = 2;
+		read = vpss_regr;
+		write = vpss_regw;
+		offset = DM365_VPBE_CLK_CTRL;
+		break;
+	case VPSS_LDC_CLOCK:
+		shift = 3;
+		read = vpss_regr;
+		write = vpss_regw;
+		offset = DM365_VPBE_CLK_CTRL;
+		break;
+	case VPSS_FDIF_CLOCK:
+		shift = 4;
+		read = vpss_regr;
+		write = vpss_regw;
+		offset = DM365_VPBE_CLK_CTRL;
+		break;
+	case VPSS_OSD_CLOCK_SEL:
+		shift = 6;
+		read = vpss_regr;
+		write = vpss_regw;
+		offset = DM365_VPBE_CLK_CTRL;
+		break;
+	case VPSS_LDC_CLOCK_SEL:
+		shift = 7;
+		read = vpss_regr;
+		write = vpss_regw;
+		offset = DM365_VPBE_CLK_CTRL;
+		break;
+	default:
+		printk(KERN_ERR "dm365_enable_clock: Invalid selector: %d\n",
+		       clock_sel);
+		return -1;
+	}
+
+	spin_lock_irqsave(&oper_cfg.vpss_lock, flags);
+	utemp = read(offset);
+	if (!en) {
+		mask = ~mask;
+		utemp &= (mask << shift);
+	} else
+		utemp |= (mask << shift);
+
+	write(utemp, offset);
+	spin_unlock_irqrestore(&oper_cfg.vpss_lock, flags);
+
+	return 0;
+}
+
+int vpss_enable_clock(enum vpss_clock_sel clock_sel, int en)
+{
+	if (!oper_cfg.hw_ops.enable_clock)
+		return -EINVAL;
+
+	return oper_cfg.hw_ops.enable_clock(clock_sel, en);
+}
+EXPORT_SYMBOL(vpss_enable_clock);
+
+void dm365_vpss_set_sync_pol(struct vpss_sync_pol sync)
+{
+	int val = 0;
+	val = isp5_read(DM365_ISP5_CCDCMUX);
+
+	val |= (sync.ccdpg_hdpol << DM365_CCDC_PG_HD_POL_SHIFT);
+	val |= (sync.ccdpg_vdpol << DM365_CCDC_PG_VD_POL_SHIFT);
+
+	isp5_write(val, DM365_ISP5_CCDCMUX);
+}
+EXPORT_SYMBOL(dm365_vpss_set_sync_pol);
+
+void dm365_vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size)
+{
+	int current_reg = ((frame_size.hlpfr >> 1) - 1) << 16;
+
+	current_reg |= (frame_size.pplen - 1);
+	isp5_write(current_reg, DM365_ISP5_PG_FRAME_SIZE);
+}
+EXPORT_SYMBOL(dm365_vpss_set_pg_frame_size);
+
+static int __devinit vpss_probe(struct platform_device *pdev)
+{
+	struct resource		*r1, *r2;
+	char *platform_name;
+	int status;
+
+	if (!pdev->dev.platform_data) {
+		dev_err(&pdev->dev, "no platform data\n");
+		return -ENOENT;
+	}
+
+	platform_name = pdev->dev.platform_data;
+	if (!strcmp(platform_name, "dm355_vpss"))
+		oper_cfg.platform = DM355;
+	else if (!strcmp(platform_name, "dm365_vpss"))
+		oper_cfg.platform = DM365;
+	else if (!strcmp(platform_name, "dm644x_vpss"))
+		oper_cfg.platform = DM644X;
+	else {
+		dev_err(&pdev->dev, "vpss driver not supported on"
+			" this platform\n");
+		return -ENODEV;
+	}
+
+	dev_info(&pdev->dev, "%s vpss probed\n", platform_name);
+	r1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r1)
+		return -ENOENT;
+
+	r1 = request_mem_region(r1->start, resource_size(r1), r1->name);
+	if (!r1)
+		return -EBUSY;
+
+	oper_cfg.vpss_regs_base0 = ioremap(r1->start, resource_size(r1));
+	if (!oper_cfg.vpss_regs_base0) {
+		status = -EBUSY;
+		goto fail1;
+	}
+
+	if (oper_cfg.platform == DM355 || oper_cfg.platform == DM365) {
+		r2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		if (!r2) {
+			status = -ENOENT;
+			goto fail2;
+		}
+		r2 = request_mem_region(r2->start, resource_size(r2), r2->name);
+		if (!r2) {
+			status = -EBUSY;
+			goto fail2;
+		}
+
+		oper_cfg.vpss_regs_base1 = ioremap(r2->start,
+						   resource_size(r2));
+		if (!oper_cfg.vpss_regs_base1) {
+			status = -EBUSY;
+			goto fail3;
+		}
+	}
+
+	if (oper_cfg.platform == DM355) {
+		oper_cfg.hw_ops.enable_clock = dm355_enable_clock;
+		oper_cfg.hw_ops.select_ccdc_source = dm355_select_ccdc_source;
+		/* Setup vpss interrupts */
+		bl_regw(DM355_VPSSBL_INTSEL_DEFAULT, DM355_VPSSBL_INTSEL);
+		bl_regw(DM355_VPSSBL_EVTSEL_DEFAULT, DM355_VPSSBL_EVTSEL);
+	} else if (oper_cfg.platform == DM365) {
+		oper_cfg.hw_ops.enable_clock = dm365_enable_clock;
+		oper_cfg.hw_ops.select_ccdc_source = dm365_select_ccdc_source;
+		/* Setup vpss interrupts */
+		isp5_write(DM365_ISP5_INTSEL1_DEFAULT, DM365_ISP5_INTSEL1);
+		isp5_write(DM365_ISP5_INTSEL2_DEFAULT, DM365_ISP5_INTSEL2);
+		isp5_write(DM365_ISP5_INTSEL3_DEFAULT, DM365_ISP5_INTSEL3);
+	} else
+		oper_cfg.hw_ops.clear_wbl_overflow = dm644x_clear_wbl_overflow;
+
+	spin_lock_init(&oper_cfg.vpss_lock);
+	dev_info(&pdev->dev, "%s vpss probe success\n", platform_name);
+	return 0;
+
+fail3:
+	release_mem_region(r2->start, resource_size(r2));
+fail2:
+	iounmap(oper_cfg.vpss_regs_base0);
+fail1:
+	release_mem_region(r1->start, resource_size(r1));
+	return status;
+}
+
+static int __devexit vpss_remove(struct platform_device *pdev)
+{
+	struct resource		*res;
+
+	iounmap(oper_cfg.vpss_regs_base0);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, resource_size(res));
+	if (oper_cfg.platform == DM355 || oper_cfg.platform == DM365) {
+		iounmap(oper_cfg.vpss_regs_base1);
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		release_mem_region(res->start, resource_size(res));
+	}
+	return 0;
+}
+
+static struct platform_driver vpss_driver = {
+	.driver = {
+		.name	= "vpss",
+		.owner = THIS_MODULE,
+	},
+	.remove = __devexit_p(vpss_remove),
+	.probe = vpss_probe,
+};
+
+static void vpss_exit(void)
+{
+	platform_driver_unregister(&vpss_driver);
+}
+
+static int __init vpss_init(void)
+{
+	return platform_driver_register(&vpss_driver);
+}
+subsys_initcall(vpss_init);
+module_exit(vpss_exit);
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
new file mode 100644
index 0000000..20f9810
--- /dev/null
+++ b/drivers/media/platform/fsl-viu.c
@@ -0,0 +1,1690 @@
+/*
+ * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ *  Freescale VIU video driver
+ *
+ *  Authors: Hongjun Chen <hong-jun.chen@freescale.com>
+ *	     Porting to 2.6.35 by DENX Software Engineering,
+ *	     Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf-dma-contig.h>
+
+#define DRV_NAME		"fsl_viu"
+#define VIU_VERSION		"0.5.1"
+
+#define BUFFER_TIMEOUT		msecs_to_jiffies(500)  /* 0.5 seconds */
+
+#define	VIU_VID_MEM_LIMIT	4	/* Video memory limit, in Mb */
+
+/* I2C address of video decoder chip is 0x4A */
+#define VIU_VIDEO_DECODER_ADDR	0x25
+
+/* supported controls */
+static struct v4l2_queryctrl viu_qctrl[] = {
+	{
+		.id            = V4L2_CID_BRIGHTNESS,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "Brightness",
+		.minimum       = 0,
+		.maximum       = 255,
+		.step          = 1,
+		.default_value = 127,
+		.flags         = 0,
+	}, {
+		.id            = V4L2_CID_CONTRAST,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "Contrast",
+		.minimum       = 0,
+		.maximum       = 255,
+		.step          = 0x1,
+		.default_value = 0x10,
+		.flags         = 0,
+	}, {
+		.id            = V4L2_CID_SATURATION,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "Saturation",
+		.minimum       = 0,
+		.maximum       = 255,
+		.step          = 0x1,
+		.default_value = 127,
+		.flags         = 0,
+	}, {
+		.id            = V4L2_CID_HUE,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "Hue",
+		.minimum       = -128,
+		.maximum       = 127,
+		.step          = 0x1,
+		.default_value = 0,
+		.flags         = 0,
+	}
+};
+
+static int qctl_regs[ARRAY_SIZE(viu_qctrl)];
+
+static int info_level;
+
+#define dprintk(level, fmt, arg...)					\
+	do {								\
+		if (level <= info_level)				\
+			printk(KERN_DEBUG "viu: " fmt , ## arg);	\
+	} while (0)
+
+/*
+ * Basic structures
+ */
+struct viu_fmt {
+	char  name[32];
+	u32   fourcc;		/* v4l2 format id */
+	u32   pixelformat;
+	int   depth;
+};
+
+static struct viu_fmt formats[] = {
+	{
+		.name		= "RGB-16 (5/B-6/G-5/R)",
+		.fourcc		= V4L2_PIX_FMT_RGB565,
+		.pixelformat	= V4L2_PIX_FMT_RGB565,
+		.depth		= 16,
+	}, {
+		.name		= "RGB-32 (A-R-G-B)",
+		.fourcc		= V4L2_PIX_FMT_RGB32,
+		.pixelformat	= V4L2_PIX_FMT_RGB32,
+		.depth		= 32,
+	}
+};
+
+struct viu_dev;
+struct viu_buf;
+
+/* buffer for one video frame */
+struct viu_buf {
+	/* common v4l buffer stuff -- must be first */
+	struct videobuf_buffer vb;
+	struct viu_fmt *fmt;
+};
+
+struct viu_dmaqueue {
+	struct viu_dev		*dev;
+	struct list_head	active;
+	struct list_head	queued;
+	struct timer_list	timeout;
+};
+
+struct viu_status {
+	u32 field_irq;
+	u32 vsync_irq;
+	u32 hsync_irq;
+	u32 vstart_irq;
+	u32 dma_end_irq;
+	u32 error_irq;
+};
+
+struct viu_reg {
+	u32 status_cfg;
+	u32 luminance;
+	u32 chroma_r;
+	u32 chroma_g;
+	u32 chroma_b;
+	u32 field_base_addr;
+	u32 dma_inc;
+	u32 picture_count;
+	u32 req_alarm;
+	u32 alpha;
+} __attribute__ ((packed));
+
+struct viu_dev {
+	struct v4l2_device	v4l2_dev;
+	struct mutex		lock;
+	spinlock_t		slock;
+	int			users;
+
+	struct device		*dev;
+	/* various device info */
+	struct video_device	*vdev;
+	struct viu_dmaqueue	vidq;
+	enum v4l2_field		capfield;
+	int			field;
+	int			first;
+	int			dma_done;
+
+	/* Hardware register area */
+	struct viu_reg		*vr;
+
+	/* Interrupt vector */
+	int			irq;
+	struct viu_status	irqs;
+
+	/* video overlay */
+	struct v4l2_framebuffer	ovbuf;
+	struct viu_fmt		*ovfmt;
+	unsigned int		ovenable;
+	enum v4l2_field		ovfield;
+
+	/* crop */
+	struct v4l2_rect	crop_current;
+
+	/* clock pointer */
+	struct clk		*clk;
+
+	/* decoder */
+	struct v4l2_subdev	*decoder;
+
+	v4l2_std_id		std;
+};
+
+struct viu_fh {
+	struct viu_dev		*dev;
+
+	/* video capture */
+	struct videobuf_queue	vb_vidq;
+	spinlock_t		vbq_lock; /* spinlock for the videobuf queue */
+
+	/* video overlay */
+	struct v4l2_window	win;
+	struct v4l2_clip	clips[1];
+
+	/* video capture */
+	struct viu_fmt		*fmt;
+	int			width, height, sizeimage;
+	enum v4l2_buf_type	type;
+};
+
+static struct viu_reg reg_val;
+
+/*
+ * Macro definitions of VIU registers
+ */
+
+/* STATUS_CONFIG register */
+enum status_config {
+	SOFT_RST		= 1 << 0,
+
+	ERR_MASK		= 0x0f << 4,	/* Error code mask */
+	ERR_NO			= 0x00,		/* No error */
+	ERR_DMA_V		= 0x01 << 4,	/* DMA in vertical active */
+	ERR_DMA_VB		= 0x02 << 4,	/* DMA in vertical blanking */
+	ERR_LINE_TOO_LONG	= 0x04 << 4,	/* Line too long */
+	ERR_TOO_MANG_LINES	= 0x05 << 4,	/* Too many lines in field */
+	ERR_LINE_TOO_SHORT	= 0x06 << 4,	/* Line too short */
+	ERR_NOT_ENOUGH_LINE	= 0x07 << 4,	/* Not enough lines in field */
+	ERR_FIFO_OVERFLOW	= 0x08 << 4,	/* FIFO overflow */
+	ERR_FIFO_UNDERFLOW	= 0x09 << 4,	/* FIFO underflow */
+	ERR_1bit_ECC		= 0x0a << 4,	/* One bit ECC error */
+	ERR_MORE_ECC		= 0x0b << 4,	/* Two/more bits ECC error */
+
+	INT_FIELD_EN		= 0x01 << 8,	/* Enable field interrupt */
+	INT_VSYNC_EN		= 0x01 << 9,	/* Enable vsync interrupt */
+	INT_HSYNC_EN		= 0x01 << 10,	/* Enable hsync interrupt */
+	INT_VSTART_EN		= 0x01 << 11,	/* Enable vstart interrupt */
+	INT_DMA_END_EN		= 0x01 << 12,	/* Enable DMA end interrupt */
+	INT_ERROR_EN		= 0x01 << 13,	/* Enable error interrupt */
+	INT_ECC_EN		= 0x01 << 14,	/* Enable ECC interrupt */
+
+	INT_FIELD_STATUS	= 0x01 << 16,	/* field interrupt status */
+	INT_VSYNC_STATUS	= 0x01 << 17,	/* vsync interrupt status */
+	INT_HSYNC_STATUS	= 0x01 << 18,	/* hsync interrupt status */
+	INT_VSTART_STATUS	= 0x01 << 19,	/* vstart interrupt status */
+	INT_DMA_END_STATUS	= 0x01 << 20,	/* DMA end interrupt status */
+	INT_ERROR_STATUS	= 0x01 << 21,	/* error interrupt status */
+
+	DMA_ACT			= 0x01 << 27,	/* Enable DMA transfer */
+	FIELD_NO		= 0x01 << 28,	/* Field number */
+	DITHER_ON		= 0x01 << 29,	/* Dithering is on */
+	ROUND_ON		= 0x01 << 30,	/* Round is on */
+	MODE_32BIT		= 0x01 << 31,	/* Data in RGBa888,
+						 * 0 in RGB565
+						 */
+};
+
+#define norm_maxw()	720
+#define norm_maxh()	576
+
+#define INT_ALL_STATUS	(INT_FIELD_STATUS | INT_VSYNC_STATUS | \
+			 INT_HSYNC_STATUS | INT_VSTART_STATUS | \
+			 INT_DMA_END_STATUS | INT_ERROR_STATUS)
+
+#define NUM_FORMATS	ARRAY_SIZE(formats)
+
+static irqreturn_t viu_intr(int irq, void *dev_id);
+
+struct viu_fmt *format_by_fourcc(int fourcc)
+{
+	int i;
+
+	for (i = 0; i < NUM_FORMATS; i++) {
+		if (formats[i].pixelformat == fourcc)
+			return formats + i;
+	}
+
+	dprintk(0, "unknown pixelformat:'%4.4s'\n", (char *)&fourcc);
+	return NULL;
+}
+
+void viu_start_dma(struct viu_dev *dev)
+{
+	struct viu_reg *vr = dev->vr;
+
+	dev->field = 0;
+
+	/* Enable DMA operation */
+	out_be32(&vr->status_cfg, SOFT_RST);
+	out_be32(&vr->status_cfg, INT_FIELD_EN);
+}
+
+void viu_stop_dma(struct viu_dev *dev)
+{
+	struct viu_reg *vr = dev->vr;
+	int cnt = 100;
+	u32 status_cfg;
+
+	out_be32(&vr->status_cfg, 0);
+
+	/* Clear pending interrupts */
+	status_cfg = in_be32(&vr->status_cfg);
+	if (status_cfg & 0x3f0000)
+		out_be32(&vr->status_cfg, status_cfg & 0x3f0000);
+
+	if (status_cfg & DMA_ACT) {
+		do {
+			status_cfg = in_be32(&vr->status_cfg);
+			if (status_cfg & INT_DMA_END_STATUS)
+				break;
+		} while (cnt--);
+
+		if (cnt < 0) {
+			/* timed out, issue soft reset */
+			out_be32(&vr->status_cfg, SOFT_RST);
+			out_be32(&vr->status_cfg, 0);
+		} else {
+			/* clear DMA_END and other pending irqs */
+			out_be32(&vr->status_cfg, status_cfg & 0x3f0000);
+		}
+	}
+
+	dev->field = 0;
+}
+
+static int restart_video_queue(struct viu_dmaqueue *vidq)
+{
+	struct viu_buf *buf, *prev;
+
+	dprintk(1, "%s vidq=0x%08lx\n", __func__, (unsigned long)vidq);
+	if (!list_empty(&vidq->active)) {
+		buf = list_entry(vidq->active.next, struct viu_buf, vb.queue);
+		dprintk(2, "restart_queue [%p/%d]: restart dma\n",
+			buf, buf->vb.i);
+
+		viu_stop_dma(vidq->dev);
+
+		/* cancel all outstanding capture requests */
+		list_for_each_entry_safe(buf, prev, &vidq->active, vb.queue) {
+			list_del(&buf->vb.queue);
+			buf->vb.state = VIDEOBUF_ERROR;
+			wake_up(&buf->vb.done);
+		}
+		mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT);
+		return 0;
+	}
+
+	prev = NULL;
+	for (;;) {
+		if (list_empty(&vidq->queued))
+			return 0;
+		buf = list_entry(vidq->queued.next, struct viu_buf, vb.queue);
+		if (prev == NULL) {
+			list_del(&buf->vb.queue);
+			list_add_tail(&buf->vb.queue, &vidq->active);
+
+			dprintk(1, "Restarting video dma\n");
+			viu_stop_dma(vidq->dev);
+			viu_start_dma(vidq->dev);
+
+			buf->vb.state = VIDEOBUF_ACTIVE;
+			mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT);
+			dprintk(2, "[%p/%d] restart_queue - first active\n",
+				buf, buf->vb.i);
+
+		} else if (prev->vb.width  == buf->vb.width  &&
+			   prev->vb.height == buf->vb.height &&
+			   prev->fmt       == buf->fmt) {
+			list_del(&buf->vb.queue);
+			list_add_tail(&buf->vb.queue, &vidq->active);
+			buf->vb.state = VIDEOBUF_ACTIVE;
+			dprintk(2, "[%p/%d] restart_queue - move to active\n",
+				buf, buf->vb.i);
+		} else {
+			return 0;
+		}
+		prev = buf;
+	}
+}
+
+static void viu_vid_timeout(unsigned long data)
+{
+	struct viu_dev *dev = (struct viu_dev *)data;
+	struct viu_buf *buf;
+	struct viu_dmaqueue *vidq = &dev->vidq;
+
+	while (!list_empty(&vidq->active)) {
+		buf = list_entry(vidq->active.next, struct viu_buf, vb.queue);
+		list_del(&buf->vb.queue);
+		buf->vb.state = VIDEOBUF_ERROR;
+		wake_up(&buf->vb.done);
+		dprintk(1, "viu/0: [%p/%d] timeout\n", buf, buf->vb.i);
+	}
+
+	restart_video_queue(vidq);
+}
+
+/*
+ * Videobuf operations
+ */
+static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
+			unsigned int *size)
+{
+	struct viu_fh *fh = vq->priv_data;
+
+	*size = fh->width * fh->height * fh->fmt->depth >> 3;
+	if (*count == 0)
+		*count = 32;
+
+	while (*size * *count > VIU_VID_MEM_LIMIT * 1024 * 1024)
+		(*count)--;
+
+	dprintk(1, "%s, count=%d, size=%d\n", __func__, *count, *size);
+	return 0;
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct viu_buf *buf)
+{
+	struct videobuf_buffer *vb = &buf->vb;
+	void *vaddr = NULL;
+
+	BUG_ON(in_interrupt());
+
+	videobuf_waiton(vq, &buf->vb, 0, 0);
+
+	if (vq->int_ops && vq->int_ops->vaddr)
+		vaddr = vq->int_ops->vaddr(vb);
+
+	if (vaddr)
+		videobuf_dma_contig_free(vq, &buf->vb);
+
+	buf->vb.state = VIDEOBUF_NEEDS_INIT;
+}
+
+inline int buffer_activate(struct viu_dev *dev, struct viu_buf *buf)
+{
+	struct viu_reg *vr = dev->vr;
+	int bpp;
+
+	/* setup the DMA base address */
+	reg_val.field_base_addr = videobuf_to_dma_contig(&buf->vb);
+
+	dprintk(1, "buffer_activate [%p/%d]: dma addr 0x%lx\n",
+		buf, buf->vb.i, (unsigned long)reg_val.field_base_addr);
+
+	/* interlace is on by default, set horizontal DMA increment */
+	reg_val.status_cfg = 0;
+	bpp = buf->fmt->depth >> 3;
+	switch (bpp) {
+	case 2:
+		reg_val.status_cfg &= ~MODE_32BIT;
+		reg_val.dma_inc = buf->vb.width * 2;
+		break;
+	case 4:
+		reg_val.status_cfg |= MODE_32BIT;
+		reg_val.dma_inc = buf->vb.width * 4;
+		break;
+	default:
+		dprintk(0, "doesn't support color depth(%d)\n",
+			bpp * 8);
+		return -EINVAL;
+	}
+
+	/* setup picture_count register */
+	reg_val.picture_count = (buf->vb.height / 2) << 16 |
+				buf->vb.width;
+
+	reg_val.status_cfg |= DMA_ACT | INT_DMA_END_EN | INT_FIELD_EN;
+
+	buf->vb.state = VIDEOBUF_ACTIVE;
+	dev->capfield = buf->vb.field;
+
+	/* reset dma increment if needed */
+	if (!V4L2_FIELD_HAS_BOTH(buf->vb.field))
+		reg_val.dma_inc = 0;
+
+	out_be32(&vr->dma_inc, reg_val.dma_inc);
+	out_be32(&vr->picture_count, reg_val.picture_count);
+	out_be32(&vr->field_base_addr, reg_val.field_base_addr);
+	mod_timer(&dev->vidq.timeout, jiffies + BUFFER_TIMEOUT);
+	return 0;
+}
+
+static int buffer_prepare(struct videobuf_queue *vq,
+			  struct videobuf_buffer *vb,
+			  enum v4l2_field field)
+{
+	struct viu_fh  *fh  = vq->priv_data;
+	struct viu_buf *buf = container_of(vb, struct viu_buf, vb);
+	int rc;
+
+	BUG_ON(fh->fmt == NULL);
+
+	if (fh->width  < 48 || fh->width  > norm_maxw() ||
+	    fh->height < 32 || fh->height > norm_maxh())
+		return -EINVAL;
+	buf->vb.size = (fh->width * fh->height * fh->fmt->depth) >> 3;
+	if (buf->vb.baddr != 0 && buf->vb.bsize < buf->vb.size)
+		return -EINVAL;
+
+	if (buf->fmt       != fh->fmt	 ||
+	    buf->vb.width  != fh->width  ||
+	    buf->vb.height != fh->height ||
+	    buf->vb.field  != field) {
+		buf->fmt       = fh->fmt;
+		buf->vb.width  = fh->width;
+		buf->vb.height = fh->height;
+		buf->vb.field  = field;
+	}
+
+	if (buf->vb.state == VIDEOBUF_NEEDS_INIT) {
+		rc = videobuf_iolock(vq, &buf->vb, NULL);
+		if (rc != 0)
+			goto fail;
+
+		buf->vb.width  = fh->width;
+		buf->vb.height = fh->height;
+		buf->vb.field  = field;
+		buf->fmt       = fh->fmt;
+	}
+
+	buf->vb.state = VIDEOBUF_PREPARED;
+	return 0;
+
+fail:
+	free_buffer(vq, buf);
+	return rc;
+}
+
+static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+	struct viu_buf       *buf     = container_of(vb, struct viu_buf, vb);
+	struct viu_fh        *fh      = vq->priv_data;
+	struct viu_dev       *dev     = fh->dev;
+	struct viu_dmaqueue  *vidq    = &dev->vidq;
+	struct viu_buf       *prev;
+
+	if (!list_empty(&vidq->queued)) {
+		dprintk(1, "adding vb queue=0x%08lx\n",
+				(unsigned long)&buf->vb.queue);
+		dprintk(1, "vidq pointer 0x%p, queued 0x%p\n",
+				vidq, &vidq->queued);
+		dprintk(1, "dev %p, queued: self %p, next %p, head %p\n",
+			dev, &vidq->queued, vidq->queued.next,
+			vidq->queued.prev);
+		list_add_tail(&buf->vb.queue, &vidq->queued);
+		buf->vb.state = VIDEOBUF_QUEUED;
+		dprintk(2, "[%p/%d] buffer_queue - append to queued\n",
+			buf, buf->vb.i);
+	} else if (list_empty(&vidq->active)) {
+		dprintk(1, "adding vb active=0x%08lx\n",
+				(unsigned long)&buf->vb.queue);
+		list_add_tail(&buf->vb.queue, &vidq->active);
+		buf->vb.state = VIDEOBUF_ACTIVE;
+		mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT);
+		dprintk(2, "[%p/%d] buffer_queue - first active\n",
+			buf, buf->vb.i);
+
+		buffer_activate(dev, buf);
+	} else {
+		dprintk(1, "adding vb queue2=0x%08lx\n",
+				(unsigned long)&buf->vb.queue);
+		prev = list_entry(vidq->active.prev, struct viu_buf, vb.queue);
+		if (prev->vb.width  == buf->vb.width  &&
+		    prev->vb.height == buf->vb.height &&
+		    prev->fmt       == buf->fmt) {
+			list_add_tail(&buf->vb.queue, &vidq->active);
+			buf->vb.state = VIDEOBUF_ACTIVE;
+			dprintk(2, "[%p/%d] buffer_queue - append to active\n",
+				buf, buf->vb.i);
+		} else {
+			list_add_tail(&buf->vb.queue, &vidq->queued);
+			buf->vb.state = VIDEOBUF_QUEUED;
+			dprintk(2, "[%p/%d] buffer_queue - first queued\n",
+				buf, buf->vb.i);
+		}
+	}
+}
+
+static void buffer_release(struct videobuf_queue *vq,
+				struct videobuf_buffer *vb)
+{
+	struct viu_buf *buf  = container_of(vb, struct viu_buf, vb);
+	struct viu_fh  *fh   = vq->priv_data;
+	struct viu_dev *dev  = (struct viu_dev *)fh->dev;
+
+	viu_stop_dma(dev);
+	free_buffer(vq, buf);
+}
+
+static struct videobuf_queue_ops viu_video_qops = {
+	.buf_setup      = buffer_setup,
+	.buf_prepare    = buffer_prepare,
+	.buf_queue      = buffer_queue,
+	.buf_release    = buffer_release,
+};
+
+/*
+ * IOCTL vidioc handling
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+			   struct v4l2_capability *cap)
+{
+	strcpy(cap->driver, "viu");
+	strcpy(cap->card, "viu");
+	cap->capabilities =	V4L2_CAP_VIDEO_CAPTURE |
+				V4L2_CAP_STREAMING     |
+				V4L2_CAP_VIDEO_OVERLAY |
+				V4L2_CAP_READWRITE;
+	return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void  *priv,
+					struct v4l2_fmtdesc *f)
+{
+	int index = f->index;
+
+	if (f->index > NUM_FORMATS)
+		return -EINVAL;
+
+	strlcpy(f->description, formats[index].name, sizeof(f->description));
+	f->pixelformat = formats[index].fourcc;
+	return 0;
+}
+
+static int vidioc_g_fmt_cap(struct file *file, void *priv,
+					struct v4l2_format *f)
+{
+	struct viu_fh *fh = priv;
+
+	f->fmt.pix.width        = fh->width;
+	f->fmt.pix.height       = fh->height;
+	f->fmt.pix.field        = fh->vb_vidq.field;
+	f->fmt.pix.pixelformat  = fh->fmt->pixelformat;
+	f->fmt.pix.bytesperline =
+			(f->fmt.pix.width * fh->fmt->depth) >> 3;
+	f->fmt.pix.sizeimage	= fh->sizeimage;
+	return 0;
+}
+
+static int vidioc_try_fmt_cap(struct file *file, void *priv,
+					struct v4l2_format *f)
+{
+	struct viu_fmt *fmt;
+	enum v4l2_field field;
+	unsigned int maxw, maxh;
+
+	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+	if (!fmt) {
+		dprintk(1, "Fourcc format (0x%08x) invalid.",
+			f->fmt.pix.pixelformat);
+		return -EINVAL;
+	}
+
+	field = f->fmt.pix.field;
+
+	if (field == V4L2_FIELD_ANY) {
+		field = V4L2_FIELD_INTERLACED;
+	} else if (field != V4L2_FIELD_INTERLACED) {
+		dprintk(1, "Field type invalid.\n");
+		return -EINVAL;
+	}
+
+	maxw  = norm_maxw();
+	maxh  = norm_maxh();
+
+	f->fmt.pix.field = field;
+	if (f->fmt.pix.height < 32)
+		f->fmt.pix.height = 32;
+	if (f->fmt.pix.height > maxh)
+		f->fmt.pix.height = maxh;
+	if (f->fmt.pix.width < 48)
+		f->fmt.pix.width = 48;
+	if (f->fmt.pix.width > maxw)
+		f->fmt.pix.width = maxw;
+	f->fmt.pix.width &= ~0x03;
+	f->fmt.pix.bytesperline =
+		(f->fmt.pix.width * fmt->depth) >> 3;
+
+	return 0;
+}
+
+static int vidioc_s_fmt_cap(struct file *file, void *priv,
+					struct v4l2_format *f)
+{
+	struct viu_fh *fh = priv;
+	int ret;
+
+	ret = vidioc_try_fmt_cap(file, fh, f);
+	if (ret < 0)
+		return ret;
+
+	fh->fmt           = format_by_fourcc(f->fmt.pix.pixelformat);
+	fh->width         = f->fmt.pix.width;
+	fh->height        = f->fmt.pix.height;
+	fh->sizeimage     = f->fmt.pix.sizeimage;
+	fh->vb_vidq.field = f->fmt.pix.field;
+	fh->type          = f->type;
+	dprintk(1, "set to pixelformat '%4.6s'\n", (char *)&fh->fmt->name);
+	return 0;
+}
+
+static int vidioc_g_fmt_overlay(struct file *file, void *priv,
+					struct v4l2_format *f)
+{
+	struct viu_fh *fh = priv;
+
+	f->fmt.win = fh->win;
+	return 0;
+}
+
+static int verify_preview(struct viu_dev *dev, struct v4l2_window *win)
+{
+	enum v4l2_field field;
+	int maxw, maxh;
+
+	if (dev->ovbuf.base == NULL)
+		return -EINVAL;
+	if (dev->ovfmt == NULL)
+		return -EINVAL;
+	if (win->w.width < 48 || win->w.height < 32)
+		return -EINVAL;
+
+	field = win->field;
+	maxw  = dev->crop_current.width;
+	maxh  = dev->crop_current.height;
+
+	if (field == V4L2_FIELD_ANY) {
+		field = (win->w.height > maxh/2)
+			? V4L2_FIELD_INTERLACED
+			: V4L2_FIELD_TOP;
+	}
+	switch (field) {
+	case V4L2_FIELD_TOP:
+	case V4L2_FIELD_BOTTOM:
+		maxh = maxh / 2;
+		break;
+	case V4L2_FIELD_INTERLACED:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	win->field = field;
+	if (win->w.width > maxw)
+		win->w.width = maxw;
+	if (win->w.height > maxh)
+		win->w.height = maxh;
+	return 0;
+}
+
+inline void viu_activate_overlay(struct viu_reg *viu_reg)
+{
+	struct viu_reg *vr = viu_reg;
+
+	out_be32(&vr->field_base_addr, reg_val.field_base_addr);
+	out_be32(&vr->dma_inc, reg_val.dma_inc);
+	out_be32(&vr->picture_count, reg_val.picture_count);
+}
+
+static int viu_setup_preview(struct viu_dev *dev, struct viu_fh *fh)
+{
+	int bpp;
+
+	dprintk(1, "%s %dx%d %s\n", __func__,
+		fh->win.w.width, fh->win.w.height, dev->ovfmt->name);
+
+	reg_val.status_cfg = 0;
+
+	/* setup window */
+	reg_val.picture_count = (fh->win.w.height / 2) << 16 |
+				fh->win.w.width;
+
+	/* setup color depth and dma increment */
+	bpp = dev->ovfmt->depth / 8;
+	switch (bpp) {
+	case 2:
+		reg_val.status_cfg &= ~MODE_32BIT;
+		reg_val.dma_inc = fh->win.w.width * 2;
+		break;
+	case 4:
+		reg_val.status_cfg |= MODE_32BIT;
+		reg_val.dma_inc = fh->win.w.width * 4;
+		break;
+	default:
+		dprintk(0, "device doesn't support color depth(%d)\n",
+			bpp * 8);
+		return -EINVAL;
+	}
+
+	dev->ovfield = fh->win.field;
+	if (!V4L2_FIELD_HAS_BOTH(dev->ovfield))
+		reg_val.dma_inc = 0;
+
+	reg_val.status_cfg |= DMA_ACT | INT_DMA_END_EN | INT_FIELD_EN;
+
+	/* setup the base address of the overlay buffer */
+	reg_val.field_base_addr = (u32)dev->ovbuf.base;
+
+	return 0;
+}
+
+static int vidioc_s_fmt_overlay(struct file *file, void *priv,
+					struct v4l2_format *f)
+{
+	struct viu_fh  *fh  = priv;
+	struct viu_dev *dev = (struct viu_dev *)fh->dev;
+	unsigned long  flags;
+	int err;
+
+	err = verify_preview(dev, &f->fmt.win);
+	if (err)
+		return err;
+
+	fh->win = f->fmt.win;
+
+	spin_lock_irqsave(&dev->slock, flags);
+	viu_setup_preview(dev, fh);
+	spin_unlock_irqrestore(&dev->slock, flags);
+	return 0;
+}
+
+static int vidioc_try_fmt_overlay(struct file *file, void *priv,
+					struct v4l2_format *f)
+{
+	return 0;
+}
+
+static int vidioc_overlay(struct file *file, void *priv, unsigned int on)
+{
+	struct viu_fh  *fh  = priv;
+	struct viu_dev *dev = (struct viu_dev *)fh->dev;
+	unsigned long  flags;
+
+	if (on) {
+		spin_lock_irqsave(&dev->slock, flags);
+		viu_activate_overlay(dev->vr);
+		dev->ovenable = 1;
+
+		/* start dma */
+		viu_start_dma(dev);
+		spin_unlock_irqrestore(&dev->slock, flags);
+	} else {
+		viu_stop_dma(dev);
+		dev->ovenable = 0;
+	}
+
+	return 0;
+}
+
+int vidioc_g_fbuf(struct file *file, void *priv, struct v4l2_framebuffer *arg)
+{
+	struct viu_fh  *fh = priv;
+	struct viu_dev *dev = fh->dev;
+	struct v4l2_framebuffer *fb = arg;
+
+	*fb = dev->ovbuf;
+	fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
+	return 0;
+}
+
+int vidioc_s_fbuf(struct file *file, void *priv, struct v4l2_framebuffer *arg)
+{
+	struct viu_fh  *fh = priv;
+	struct viu_dev *dev = fh->dev;
+	struct v4l2_framebuffer *fb = arg;
+	struct viu_fmt *fmt;
+
+	if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	/* check args */
+	fmt = format_by_fourcc(fb->fmt.pixelformat);
+	if (fmt == NULL)
+		return -EINVAL;
+
+	/* ok, accept it */
+	dev->ovbuf = *fb;
+	dev->ovfmt = fmt;
+	if (dev->ovbuf.fmt.bytesperline == 0) {
+		dev->ovbuf.fmt.bytesperline =
+			dev->ovbuf.fmt.width * fmt->depth / 8;
+	}
+	return 0;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+				struct v4l2_requestbuffers *p)
+{
+	struct viu_fh *fh = priv;
+
+	return videobuf_reqbufs(&fh->vb_vidq, p);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+					struct v4l2_buffer *p)
+{
+	struct viu_fh *fh = priv;
+
+	return videobuf_querybuf(&fh->vb_vidq, p);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+	struct viu_fh *fh = priv;
+
+	return videobuf_qbuf(&fh->vb_vidq, p);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+	struct viu_fh *fh = priv;
+
+	return videobuf_dqbuf(&fh->vb_vidq, p,
+				file->f_flags & O_NONBLOCK);
+}
+
+static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+	struct viu_fh *fh = priv;
+	struct viu_dev *dev = fh->dev;
+
+	if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+	if (fh->type != i)
+		return -EINVAL;
+
+	if (dev->ovenable)
+		dev->ovenable = 0;
+
+	viu_start_dma(fh->dev);
+
+	return videobuf_streamon(&fh->vb_vidq);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+	struct viu_fh  *fh = priv;
+
+	if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+	if (fh->type != i)
+		return -EINVAL;
+
+	viu_stop_dma(fh->dev);
+
+	return videobuf_streamoff(&fh->vb_vidq);
+}
+
+#define decoder_call(viu, o, f, args...) \
+	v4l2_subdev_call(viu->decoder, o, f, ##args)
+
+static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+	struct viu_fh *fh = priv;
+
+	decoder_call(fh->dev, video, querystd, std_id);
+	return 0;
+}
+
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+	struct viu_fh *fh = priv;
+
+	fh->dev->std = *id;
+	decoder_call(fh->dev, core, s_std, *id);
+	return 0;
+}
+
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+	struct viu_fh *fh = priv;
+
+	*std_id = fh->dev->std;
+	return 0;
+}
+
+/* only one input in this driver */
+static int vidioc_enum_input(struct file *file, void *priv,
+					struct v4l2_input *inp)
+{
+	struct viu_fh *fh = priv;
+
+	if (inp->index != 0)
+		return -EINVAL;
+
+	inp->type = V4L2_INPUT_TYPE_CAMERA;
+	inp->std = fh->dev->vdev->tvnorms;
+	strcpy(inp->name, "Camera");
+	return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+	*i = 0;
+	return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+	struct viu_fh *fh = priv;
+
+	if (i > 1)
+		return -EINVAL;
+
+	decoder_call(fh->dev, video, s_routing, i, 0, 0);
+	return 0;
+}
+
+/* Controls */
+static int vidioc_queryctrl(struct file *file, void *priv,
+				struct v4l2_queryctrl *qc)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) {
+		if (qc->id && qc->id == viu_qctrl[i].id) {
+			memcpy(qc, &(viu_qctrl[i]), sizeof(*qc));
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *priv,
+				struct v4l2_control *ctrl)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) {
+		if (ctrl->id == viu_qctrl[i].id) {
+			ctrl->value = qctl_regs[i];
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+static int vidioc_s_ctrl(struct file *file, void *priv,
+				struct v4l2_control *ctrl)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) {
+		if (ctrl->id == viu_qctrl[i].id) {
+			if (ctrl->value < viu_qctrl[i].minimum
+				|| ctrl->value > viu_qctrl[i].maximum)
+					return -ERANGE;
+			qctl_regs[i] = ctrl->value;
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+inline void viu_activate_next_buf(struct viu_dev *dev,
+				struct viu_dmaqueue *viuq)
+{
+	struct viu_dmaqueue *vidq = viuq;
+	struct viu_buf *buf;
+
+	/* launch another DMA operation for an active/queued buffer */
+	if (!list_empty(&vidq->active)) {
+		buf = list_entry(vidq->active.next, struct viu_buf,
+					vb.queue);
+		dprintk(1, "start another queued buffer: 0x%p\n", buf);
+		buffer_activate(dev, buf);
+	} else if (!list_empty(&vidq->queued)) {
+		buf = list_entry(vidq->queued.next, struct viu_buf,
+					vb.queue);
+		list_del(&buf->vb.queue);
+
+		dprintk(1, "start another queued buffer: 0x%p\n", buf);
+		list_add_tail(&buf->vb.queue, &vidq->active);
+		buf->vb.state = VIDEOBUF_ACTIVE;
+		buffer_activate(dev, buf);
+	}
+}
+
+inline void viu_default_settings(struct viu_reg *viu_reg)
+{
+	struct viu_reg *vr = viu_reg;
+
+	out_be32(&vr->luminance, 0x9512A254);
+	out_be32(&vr->chroma_r, 0x03310000);
+	out_be32(&vr->chroma_g, 0x06600F38);
+	out_be32(&vr->chroma_b, 0x00000409);
+	out_be32(&vr->alpha, 0x000000ff);
+	out_be32(&vr->req_alarm, 0x00000090);
+	dprintk(1, "status reg: 0x%08x, field base: 0x%08x\n",
+		in_be32(&vr->status_cfg), in_be32(&vr->field_base_addr));
+}
+
+static void viu_overlay_intr(struct viu_dev *dev, u32 status)
+{
+	struct viu_reg *vr = dev->vr;
+
+	if (status & INT_DMA_END_STATUS)
+		dev->dma_done = 1;
+
+	if (status & INT_FIELD_STATUS) {
+		if (dev->dma_done) {
+			u32 addr = reg_val.field_base_addr;
+
+			dev->dma_done = 0;
+			if (status & FIELD_NO)
+				addr += reg_val.dma_inc;
+
+			out_be32(&vr->field_base_addr, addr);
+			out_be32(&vr->dma_inc, reg_val.dma_inc);
+			out_be32(&vr->status_cfg,
+				 (status & 0xffc0ffff) |
+				 (status & INT_ALL_STATUS) |
+				 reg_val.status_cfg);
+		} else if (status & INT_VSYNC_STATUS) {
+			out_be32(&vr->status_cfg,
+				 (status & 0xffc0ffff) |
+				 (status & INT_ALL_STATUS) |
+				 reg_val.status_cfg);
+		}
+	}
+}
+
+static void viu_capture_intr(struct viu_dev *dev, u32 status)
+{
+	struct viu_dmaqueue *vidq = &dev->vidq;
+	struct viu_reg *vr = dev->vr;
+	struct viu_buf *buf;
+	int field_num;
+	int need_two;
+	int dma_done = 0;
+
+	field_num = status & FIELD_NO;
+	need_two = V4L2_FIELD_HAS_BOTH(dev->capfield);
+
+	if (status & INT_DMA_END_STATUS) {
+		dma_done = 1;
+		if (((field_num == 0) && (dev->field == 0)) ||
+		    (field_num && (dev->field == 1)))
+			dev->field++;
+	}
+
+	if (status & INT_FIELD_STATUS) {
+		dprintk(1, "irq: field %d, done %d\n",
+			!!field_num, dma_done);
+		if (unlikely(dev->first)) {
+			if (field_num == 0) {
+				dev->first = 0;
+				dprintk(1, "activate first buf\n");
+				viu_activate_next_buf(dev, vidq);
+			} else
+				dprintk(1, "wait field 0\n");
+			return;
+		}
+
+		/* setup buffer address for next dma operation */
+		if (!list_empty(&vidq->active)) {
+			u32 addr = reg_val.field_base_addr;
+
+			if (field_num && need_two) {
+				addr += reg_val.dma_inc;
+				dprintk(1, "field 1, 0x%lx, dev field %d\n",
+					(unsigned long)addr, dev->field);
+			}
+			out_be32(&vr->field_base_addr, addr);
+			out_be32(&vr->dma_inc, reg_val.dma_inc);
+			out_be32(&vr->status_cfg,
+				 (status & 0xffc0ffff) |
+				 (status & INT_ALL_STATUS) |
+				 reg_val.status_cfg);
+			return;
+		}
+	}
+
+	if (dma_done && field_num && (dev->field == 2)) {
+		dev->field = 0;
+		buf = list_entry(vidq->active.next,
+				 struct viu_buf, vb.queue);
+		dprintk(1, "viu/0: [%p/%d] 0x%lx/0x%lx: dma complete\n",
+			buf, buf->vb.i,
+			(unsigned long)videobuf_to_dma_contig(&buf->vb),
+			(unsigned long)in_be32(&vr->field_base_addr));
+
+		if (waitqueue_active(&buf->vb.done)) {
+			list_del(&buf->vb.queue);
+			do_gettimeofday(&buf->vb.ts);
+			buf->vb.state = VIDEOBUF_DONE;
+			buf->vb.field_count++;
+			wake_up(&buf->vb.done);
+		}
+		/* activate next dma buffer */
+		viu_activate_next_buf(dev, vidq);
+	}
+}
+
+static irqreturn_t viu_intr(int irq, void *dev_id)
+{
+	struct viu_dev *dev  = (struct viu_dev *)dev_id;
+	struct viu_reg *vr = dev->vr;
+	u32 status;
+	u32 error;
+
+	status = in_be32(&vr->status_cfg);
+
+	if (status & INT_ERROR_STATUS) {
+		dev->irqs.error_irq++;
+		error = status & ERR_MASK;
+		if (error)
+			dprintk(1, "Err: error(%d), times:%d!\n",
+				error >> 4, dev->irqs.error_irq);
+		/* Clear interrupt error bit and error flags */
+		out_be32(&vr->status_cfg,
+			 (status & 0xffc0ffff) | INT_ERROR_STATUS);
+	}
+
+	if (status & INT_DMA_END_STATUS) {
+		dev->irqs.dma_end_irq++;
+		dev->dma_done = 1;
+		dprintk(2, "VIU DMA end interrupt times: %d\n",
+					dev->irqs.dma_end_irq);
+	}
+
+	if (status & INT_HSYNC_STATUS)
+		dev->irqs.hsync_irq++;
+
+	if (status & INT_FIELD_STATUS) {
+		dev->irqs.field_irq++;
+		dprintk(2, "VIU field interrupt times: %d\n",
+					dev->irqs.field_irq);
+	}
+
+	if (status & INT_VSTART_STATUS)
+		dev->irqs.vstart_irq++;
+
+	if (status & INT_VSYNC_STATUS) {
+		dev->irqs.vsync_irq++;
+		dprintk(2, "VIU vsync interrupt times: %d\n",
+			dev->irqs.vsync_irq);
+	}
+
+	/* clear all pending irqs */
+	status = in_be32(&vr->status_cfg);
+	out_be32(&vr->status_cfg,
+		 (status & 0xffc0ffff) | (status & INT_ALL_STATUS));
+
+	if (dev->ovenable) {
+		viu_overlay_intr(dev, status);
+		return IRQ_HANDLED;
+	}
+
+	/* Capture mode */
+	viu_capture_intr(dev, status);
+	return IRQ_HANDLED;
+}
+
+/*
+ * File operations for the device
+ */
+static int viu_open(struct file *file)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct viu_dev *dev = video_get_drvdata(vdev);
+	struct viu_fh *fh;
+	struct viu_reg *vr;
+	int minor = vdev->minor;
+	u32 status_cfg;
+	int i;
+
+	dprintk(1, "viu: open (minor=%d)\n", minor);
+
+	dev->users++;
+	if (dev->users > 1) {
+		dev->users--;
+		return -EBUSY;
+	}
+
+	vr = dev->vr;
+
+	dprintk(1, "open minor=%d type=%s users=%d\n", minor,
+		v4l2_type_names[V4L2_BUF_TYPE_VIDEO_CAPTURE], dev->users);
+
+	if (mutex_lock_interruptible(&dev->lock)) {
+		dev->users--;
+		return -ERESTARTSYS;
+	}
+
+	/* allocate and initialize per filehandle data */
+	fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+	if (!fh) {
+		dev->users--;
+		mutex_unlock(&dev->lock);
+		return -ENOMEM;
+	}
+
+	file->private_data = fh;
+	fh->dev = dev;
+
+	fh->type     = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	fh->fmt      = format_by_fourcc(V4L2_PIX_FMT_RGB32);
+	fh->width    = norm_maxw();
+	fh->height   = norm_maxh();
+	dev->crop_current.width  = fh->width;
+	dev->crop_current.height = fh->height;
+
+	/* Put all controls at a sane state */
+	for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++)
+		qctl_regs[i] = viu_qctrl[i].default_value;
+
+	dprintk(1, "Open: fh=0x%08lx, dev=0x%08lx, dev->vidq=0x%08lx\n",
+		(unsigned long)fh, (unsigned long)dev,
+		(unsigned long)&dev->vidq);
+	dprintk(1, "Open: list_empty queued=%d\n",
+		list_empty(&dev->vidq.queued));
+	dprintk(1, "Open: list_empty active=%d\n",
+		list_empty(&dev->vidq.active));
+
+	viu_default_settings(vr);
+
+	status_cfg = in_be32(&vr->status_cfg);
+	out_be32(&vr->status_cfg,
+		 status_cfg & ~(INT_VSYNC_EN | INT_HSYNC_EN |
+				INT_FIELD_EN | INT_VSTART_EN |
+				INT_DMA_END_EN | INT_ERROR_EN | INT_ECC_EN));
+
+	status_cfg = in_be32(&vr->status_cfg);
+	out_be32(&vr->status_cfg, status_cfg | INT_ALL_STATUS);
+
+	spin_lock_init(&fh->vbq_lock);
+	videobuf_queue_dma_contig_init(&fh->vb_vidq, &viu_video_qops,
+				       dev->dev, &fh->vbq_lock,
+				       fh->type, V4L2_FIELD_INTERLACED,
+				       sizeof(struct viu_buf), fh,
+				       &fh->dev->lock);
+	mutex_unlock(&dev->lock);
+	return 0;
+}
+
+static ssize_t viu_read(struct file *file, char __user *data, size_t count,
+			loff_t *ppos)
+{
+	struct viu_fh *fh = file->private_data;
+	struct viu_dev *dev = fh->dev;
+	int ret = 0;
+
+	dprintk(2, "%s\n", __func__);
+	if (dev->ovenable)
+		dev->ovenable = 0;
+
+	if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		if (mutex_lock_interruptible(&dev->lock))
+			return -ERESTARTSYS;
+		viu_start_dma(dev);
+		ret = videobuf_read_stream(&fh->vb_vidq, data, count,
+				ppos, 0, file->f_flags & O_NONBLOCK);
+		mutex_unlock(&dev->lock);
+		return ret;
+	}
+	return 0;
+}
+
+static unsigned int viu_poll(struct file *file, struct poll_table_struct *wait)
+{
+	struct viu_fh *fh = file->private_data;
+	struct videobuf_queue *q = &fh->vb_vidq;
+	struct viu_dev *dev = fh->dev;
+	unsigned int res;
+
+	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
+		return POLLERR;
+
+	mutex_lock(&dev->lock);
+	res = videobuf_poll_stream(file, q, wait);
+	mutex_unlock(&dev->lock);
+	return res;
+}
+
+static int viu_release(struct file *file)
+{
+	struct viu_fh *fh = file->private_data;
+	struct viu_dev *dev = fh->dev;
+	int minor = video_devdata(file)->minor;
+
+	mutex_lock(&dev->lock);
+	viu_stop_dma(dev);
+	videobuf_stop(&fh->vb_vidq);
+	videobuf_mmap_free(&fh->vb_vidq);
+	mutex_unlock(&dev->lock);
+
+	kfree(fh);
+
+	dev->users--;
+	dprintk(1, "close (minor=%d, users=%d)\n",
+		minor, dev->users);
+	return 0;
+}
+
+void viu_reset(struct viu_reg *reg)
+{
+	out_be32(&reg->status_cfg, 0);
+	out_be32(&reg->luminance, 0x9512a254);
+	out_be32(&reg->chroma_r, 0x03310000);
+	out_be32(&reg->chroma_g, 0x06600f38);
+	out_be32(&reg->chroma_b, 0x00000409);
+	out_be32(&reg->field_base_addr, 0);
+	out_be32(&reg->dma_inc, 0);
+	out_be32(&reg->picture_count, 0x01e002d0);
+	out_be32(&reg->req_alarm, 0x00000090);
+	out_be32(&reg->alpha, 0x000000ff);
+}
+
+static int viu_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct viu_fh *fh = file->private_data;
+	struct viu_dev *dev = fh->dev;
+	int ret;
+
+	dprintk(1, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
+
+	if (mutex_lock_interruptible(&dev->lock))
+		return -ERESTARTSYS;
+	ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
+	mutex_unlock(&dev->lock);
+
+	dprintk(1, "vma start=0x%08lx, size=%ld, ret=%d\n",
+		(unsigned long)vma->vm_start,
+		(unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
+		ret);
+
+	return ret;
+}
+
+static struct v4l2_file_operations viu_fops = {
+	.owner		= THIS_MODULE,
+	.open		= viu_open,
+	.release	= viu_release,
+	.read		= viu_read,
+	.poll		= viu_poll,
+	.unlocked_ioctl	= video_ioctl2, /* V4L2 ioctl handler */
+	.mmap		= viu_mmap,
+};
+
+static const struct v4l2_ioctl_ops viu_ioctl_ops = {
+	.vidioc_querycap	= vidioc_querycap,
+	.vidioc_enum_fmt_vid_cap  = vidioc_enum_fmt,
+	.vidioc_g_fmt_vid_cap     = vidioc_g_fmt_cap,
+	.vidioc_try_fmt_vid_cap   = vidioc_try_fmt_cap,
+	.vidioc_s_fmt_vid_cap     = vidioc_s_fmt_cap,
+	.vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt,
+	.vidioc_g_fmt_vid_overlay = vidioc_g_fmt_overlay,
+	.vidioc_try_fmt_vid_overlay = vidioc_try_fmt_overlay,
+	.vidioc_s_fmt_vid_overlay = vidioc_s_fmt_overlay,
+	.vidioc_overlay	      = vidioc_overlay,
+	.vidioc_g_fbuf	      = vidioc_g_fbuf,
+	.vidioc_s_fbuf	      = vidioc_s_fbuf,
+	.vidioc_reqbufs       = vidioc_reqbufs,
+	.vidioc_querybuf      = vidioc_querybuf,
+	.vidioc_qbuf          = vidioc_qbuf,
+	.vidioc_dqbuf         = vidioc_dqbuf,
+	.vidioc_g_std         = vidioc_g_std,
+	.vidioc_s_std         = vidioc_s_std,
+	.vidioc_querystd      = vidioc_querystd,
+	.vidioc_enum_input    = vidioc_enum_input,
+	.vidioc_g_input       = vidioc_g_input,
+	.vidioc_s_input       = vidioc_s_input,
+	.vidioc_queryctrl     = vidioc_queryctrl,
+	.vidioc_g_ctrl        = vidioc_g_ctrl,
+	.vidioc_s_ctrl        = vidioc_s_ctrl,
+	.vidioc_streamon      = vidioc_streamon,
+	.vidioc_streamoff     = vidioc_streamoff,
+};
+
+static struct video_device viu_template = {
+	.name		= "FSL viu",
+	.fops		= &viu_fops,
+	.minor		= -1,
+	.ioctl_ops	= &viu_ioctl_ops,
+	.release	= video_device_release,
+
+	.tvnorms        = V4L2_STD_NTSC_M | V4L2_STD_PAL,
+	.current_norm   = V4L2_STD_NTSC_M,
+};
+
+static int __devinit viu_of_probe(struct platform_device *op)
+{
+	struct viu_dev *viu_dev;
+	struct video_device *vdev;
+	struct resource r;
+	struct viu_reg __iomem *viu_regs;
+	struct i2c_adapter *ad;
+	int ret, viu_irq;
+
+	ret = of_address_to_resource(op->dev.of_node, 0, &r);
+	if (ret) {
+		dev_err(&op->dev, "Can't parse device node resource\n");
+		return -ENODEV;
+	}
+
+	viu_irq = irq_of_parse_and_map(op->dev.of_node, 0);
+	if (viu_irq == NO_IRQ) {
+		dev_err(&op->dev, "Error while mapping the irq\n");
+		return -EINVAL;
+	}
+
+	/* request mem region */
+	if (!devm_request_mem_region(&op->dev, r.start,
+				     sizeof(struct viu_reg), DRV_NAME)) {
+		dev_err(&op->dev, "Error while requesting mem region\n");
+		ret = -EBUSY;
+		goto err;
+	}
+
+	/* remap registers */
+	viu_regs = devm_ioremap(&op->dev, r.start, sizeof(struct viu_reg));
+	if (!viu_regs) {
+		dev_err(&op->dev, "Can't map register set\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	/* Prepare our private structure */
+	viu_dev = devm_kzalloc(&op->dev, sizeof(struct viu_dev), GFP_ATOMIC);
+	if (!viu_dev) {
+		dev_err(&op->dev, "Can't allocate private structure\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	viu_dev->vr = viu_regs;
+	viu_dev->irq = viu_irq;
+	viu_dev->dev = &op->dev;
+
+	/* init video dma queues */
+	INIT_LIST_HEAD(&viu_dev->vidq.active);
+	INIT_LIST_HEAD(&viu_dev->vidq.queued);
+
+	snprintf(viu_dev->v4l2_dev.name,
+		 sizeof(viu_dev->v4l2_dev.name), "%s", "VIU");
+	ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev);
+	if (ret < 0) {
+		dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret);
+		goto err;
+	}
+
+	ad = i2c_get_adapter(0);
+	viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
+			"saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
+
+	viu_dev->vidq.timeout.function = viu_vid_timeout;
+	viu_dev->vidq.timeout.data     = (unsigned long)viu_dev;
+	init_timer(&viu_dev->vidq.timeout);
+	viu_dev->first = 1;
+
+	/* Allocate memory for video device */
+	vdev = video_device_alloc();
+	if (vdev == NULL) {
+		ret = -ENOMEM;
+		goto err_vdev;
+	}
+
+	memcpy(vdev, &viu_template, sizeof(viu_template));
+
+	vdev->v4l2_dev = &viu_dev->v4l2_dev;
+
+	viu_dev->vdev = vdev;
+
+	/* initialize locks */
+	mutex_init(&viu_dev->lock);
+	viu_dev->vdev->lock = &viu_dev->lock;
+	spin_lock_init(&viu_dev->slock);
+
+	video_set_drvdata(viu_dev->vdev, viu_dev);
+
+	mutex_lock(&viu_dev->lock);
+
+	ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
+	if (ret < 0) {
+		video_device_release(viu_dev->vdev);
+		goto err_vdev;
+	}
+
+	/* enable VIU clock */
+	viu_dev->clk = clk_get(&op->dev, "viu_clk");
+	if (IS_ERR(viu_dev->clk)) {
+		dev_err(&op->dev, "failed to find the clock module!\n");
+		ret = -ENODEV;
+		goto err_clk;
+	} else {
+		clk_enable(viu_dev->clk);
+	}
+
+	/* reset VIU module */
+	viu_reset(viu_dev->vr);
+
+	/* install interrupt handler */
+	if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
+		dev_err(&op->dev, "Request VIU IRQ failed.\n");
+		ret = -ENODEV;
+		goto err_irq;
+	}
+
+	mutex_unlock(&viu_dev->lock);
+
+	dev_info(&op->dev, "Freescale VIU Video Capture Board\n");
+	return ret;
+
+err_irq:
+	clk_disable(viu_dev->clk);
+	clk_put(viu_dev->clk);
+err_clk:
+	video_unregister_device(viu_dev->vdev);
+err_vdev:
+	mutex_unlock(&viu_dev->lock);
+	i2c_put_adapter(ad);
+	v4l2_device_unregister(&viu_dev->v4l2_dev);
+err:
+	irq_dispose_mapping(viu_irq);
+	return ret;
+}
+
+static int __devexit viu_of_remove(struct platform_device *op)
+{
+	struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev);
+	struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
+	struct v4l2_subdev *sdev = list_entry(v4l2_dev->subdevs.next,
+					      struct v4l2_subdev, list);
+	struct i2c_client *client = v4l2_get_subdevdata(sdev);
+
+	free_irq(dev->irq, (void *)dev);
+	irq_dispose_mapping(dev->irq);
+
+	clk_disable(dev->clk);
+	clk_put(dev->clk);
+
+	video_unregister_device(dev->vdev);
+	i2c_put_adapter(client->adapter);
+	v4l2_device_unregister(&dev->v4l2_dev);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int viu_suspend(struct platform_device *op, pm_message_t state)
+{
+	struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev);
+	struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
+
+	clk_disable(dev->clk);
+	return 0;
+}
+
+static int viu_resume(struct platform_device *op)
+{
+	struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev);
+	struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
+
+	clk_enable(dev->clk);
+	return 0;
+}
+#endif
+
+/*
+ * Initialization and module stuff
+ */
+static struct of_device_id mpc512x_viu_of_match[] = {
+	{
+		.compatible = "fsl,mpc5121-viu",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, mpc512x_viu_of_match);
+
+static struct platform_driver viu_of_platform_driver = {
+	.probe = viu_of_probe,
+	.remove = __devexit_p(viu_of_remove),
+#ifdef CONFIG_PM
+	.suspend = viu_suspend,
+	.resume = viu_resume,
+#endif
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = mpc512x_viu_of_match,
+	},
+};
+
+module_platform_driver(viu_of_platform_driver);
+
+MODULE_DESCRIPTION("Freescale Video-In(VIU)");
+MODULE_AUTHOR("Hongjun Chen");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VIU_VERSION);
diff --git a/drivers/media/platform/indycam.c b/drivers/media/platform/indycam.c
new file mode 100644
index 0000000..5482363
--- /dev/null
+++ b/drivers/media/platform/indycam.c
@@ -0,0 +1,390 @@
+/*
+ *  indycam.c - Silicon Graphics IndyCam digital camera driver
+ *
+ *  Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
+ *  Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+/* IndyCam decodes stream of photons into digital image representation ;-) */
+#include <linux/videodev2.h>
+#include <linux/i2c.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-chip-ident.h>
+
+#include "indycam.h"
+
+#define INDYCAM_MODULE_VERSION "0.0.5"
+
+MODULE_DESCRIPTION("SGI IndyCam driver");
+MODULE_VERSION(INDYCAM_MODULE_VERSION);
+MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>");
+MODULE_LICENSE("GPL");
+
+
+// #define INDYCAM_DEBUG
+
+#ifdef INDYCAM_DEBUG
+#define dprintk(x...) printk("IndyCam: " x);
+#define indycam_regdump(client) indycam_regdump_debug(client)
+#else
+#define dprintk(x...)
+#define indycam_regdump(client)
+#endif
+
+struct indycam {
+	struct v4l2_subdev sd;
+	u8 version;
+};
+
+static inline struct indycam *to_indycam(struct v4l2_subdev *sd)
+{
+	return container_of(sd, struct indycam, sd);
+}
+
+static const u8 initseq[] = {
+	INDYCAM_CONTROL_AGCENA,		/* INDYCAM_CONTROL */
+	INDYCAM_SHUTTER_60,		/* INDYCAM_SHUTTER */
+	INDYCAM_GAIN_DEFAULT,		/* INDYCAM_GAIN */
+	0x00,				/* INDYCAM_BRIGHTNESS (read-only) */
+	INDYCAM_RED_BALANCE_DEFAULT,	/* INDYCAM_RED_BALANCE */
+	INDYCAM_BLUE_BALANCE_DEFAULT,	/* INDYCAM_BLUE_BALANCE */
+	INDYCAM_RED_SATURATION_DEFAULT,	/* INDYCAM_RED_SATURATION */
+	INDYCAM_BLUE_SATURATION_DEFAULT,/* INDYCAM_BLUE_SATURATION */
+};
+
+/* IndyCam register handling */
+
+static int indycam_read_reg(struct v4l2_subdev *sd, u8 reg, u8 *value)
+{
+	struct i2c_client *client = v4l2_get_subdevdata(sd);
+	int ret;
+
+	if (reg == INDYCAM_REG_RESET) {
+		dprintk("indycam_read_reg(): "
+			"skipping write-only register %d\n", reg);
+		*value = 0;
+		return 0;
+	}
+
+	ret = i2c_smbus_read_byte_data(client, reg);
+
+	if (ret < 0) {
+		printk(KERN_ERR "IndyCam: indycam_read_reg(): read failed, "
+		       "register = 0x%02x\n", reg);
+		return ret;
+	}
+
+	*value = (u8)ret;
+
+	return 0;
+}
+
+static int indycam_write_reg(struct v4l2_subdev *sd, u8 reg, u8 value)
+{
+	struct i2c_client *client = v4l2_get_subdevdata(sd);
+	int err;
+
+	if (reg == INDYCAM_REG_BRIGHTNESS || reg == INDYCAM_REG_VERSION) {
+		dprintk("indycam_write_reg(): "
+			"skipping read-only register %d\n", reg);
+		return 0;
+	}
+
+	dprintk("Writing Reg %d = 0x%02x\n", reg, value);
+	err = i2c_smbus_write_byte_data(client, reg, value);
+
+	if (err) {
+		printk(KERN_ERR "IndyCam: indycam_write_reg(): write failed, "
+		       "register = 0x%02x, value = 0x%02x\n", reg, value);
+	}
+	return err;
+}
+
+static int indycam_write_block(struct v4l2_subdev *sd, u8 reg,
+			       u8 length, u8 *data)
+{
+	int i, err;
+
+	for (i = 0; i < length; i++) {
+		err = indycam_write_reg(sd, reg + i, data[i]);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+/* Helper functions */
+
+#ifdef INDYCAM_DEBUG
+static void indycam_regdump_debug(struct v4l2_subdev *sd)
+{
+	int i;
+	u8 val;
+
+	for (i = 0; i < 9; i++) {
+		indycam_read_reg(sd, i, &val);
+		dprintk("Reg %d = 0x%02x\n", i, val);
+	}
+}
+#endif
+
+static int indycam_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+	struct indycam *camera = to_indycam(sd);
+	u8 reg;
+	int ret = 0;
+
+	switch (ctrl->id) {
+	case V4L2_CID_AUTOGAIN:
+	case V4L2_CID_AUTO_WHITE_BALANCE:
+		ret = indycam_read_reg(sd, INDYCAM_REG_CONTROL, &reg);
+		if (ret)
+			return -EIO;
+		if (ctrl->id == V4L2_CID_AUTOGAIN)
+			ctrl->value = (reg & INDYCAM_CONTROL_AGCENA)
+				? 1 : 0;
+		else
+			ctrl->value = (reg & INDYCAM_CONTROL_AWBCTL)
+				? 1 : 0;
+		break;
+	case V4L2_CID_EXPOSURE:
+		ret = indycam_read_reg(sd, INDYCAM_REG_SHUTTER, &reg);
+		if (ret)
+			return -EIO;
+		ctrl->value = ((s32)reg == 0x00) ? 0xff : ((s32)reg - 1);
+		break;
+	case V4L2_CID_GAIN:
+		ret = indycam_read_reg(sd, INDYCAM_REG_GAIN, &reg);
+		if (ret)
+			return -EIO;
+		ctrl->value = (s32)reg;
+		break;
+	case V4L2_CID_RED_BALANCE:
+		ret = indycam_read_reg(sd, INDYCAM_REG_RED_BALANCE, &reg);
+		if (ret)
+			return -EIO;
+		ctrl->value = (s32)reg;
+		break;
+	case V4L2_CID_BLUE_BALANCE:
+		ret = indycam_read_reg(sd, INDYCAM_REG_BLUE_BALANCE, &reg);
+		if (ret)
+			return -EIO;
+		ctrl->value = (s32)reg;
+		break;
+	case INDYCAM_CONTROL_RED_SATURATION:
+		ret = indycam_read_reg(sd,
+				       INDYCAM_REG_RED_SATURATION, &reg);
+		if (ret)
+			return -EIO;
+		ctrl->value = (s32)reg;
+		break;
+	case INDYCAM_CONTROL_BLUE_SATURATION:
+		ret = indycam_read_reg(sd,
+				       INDYCAM_REG_BLUE_SATURATION, &reg);
+		if (ret)
+			return -EIO;
+		ctrl->value = (s32)reg;
+		break;
+	case V4L2_CID_GAMMA:
+		if (camera->version == CAMERA_VERSION_MOOSE) {
+			ret = indycam_read_reg(sd,
+					       INDYCAM_REG_GAMMA, &reg);
+			if (ret)
+				return -EIO;
+			ctrl->value = (s32)reg;
+		} else {
+			ctrl->value = INDYCAM_GAMMA_DEFAULT;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int indycam_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+	struct indycam *camera = to_indycam(sd);
+	u8 reg;
+	int ret = 0;
+
+	switch (ctrl->id) {
+	case V4L2_CID_AUTOGAIN:
+	case V4L2_CID_AUTO_WHITE_BALANCE:
+		ret = indycam_read_reg(sd, INDYCAM_REG_CONTROL, &reg);
+		if (ret)
+			break;
+
+		if (ctrl->id == V4L2_CID_AUTOGAIN) {
+			if (ctrl->value)
+				reg |= INDYCAM_CONTROL_AGCENA;
+			else
+				reg &= ~INDYCAM_CONTROL_AGCENA;
+		} else {
+			if (ctrl->value)
+				reg |= INDYCAM_CONTROL_AWBCTL;
+			else
+				reg &= ~INDYCAM_CONTROL_AWBCTL;
+		}
+
+		ret = indycam_write_reg(sd, INDYCAM_REG_CONTROL, reg);
+		break;
+	case V4L2_CID_EXPOSURE:
+		reg = (ctrl->value == 0xff) ? 0x00 : (ctrl->value + 1);
+		ret = indycam_write_reg(sd, INDYCAM_REG_SHUTTER, reg);
+		break;
+	case V4L2_CID_GAIN:
+		ret = indycam_write_reg(sd, INDYCAM_REG_GAIN, ctrl->value);
+		break;
+	case V4L2_CID_RED_BALANCE:
+		ret = indycam_write_reg(sd, INDYCAM_REG_RED_BALANCE,
+					ctrl->value);
+		break;
+	case V4L2_CID_BLUE_BALANCE:
+		ret = indycam_write_reg(sd, INDYCAM_REG_BLUE_BALANCE,
+					ctrl->value);
+		break;
+	case INDYCAM_CONTROL_RED_SATURATION:
+		ret = indycam_write_reg(sd, INDYCAM_REG_RED_SATURATION,
+					ctrl->value);
+		break;
+	case INDYCAM_CONTROL_BLUE_SATURATION:
+		ret = indycam_write_reg(sd, INDYCAM_REG_BLUE_SATURATION,
+					ctrl->value);
+		break;
+	case V4L2_CID_GAMMA:
+		if (camera->version == CAMERA_VERSION_MOOSE) {
+			ret = indycam_write_reg(sd, INDYCAM_REG_GAMMA,
+						ctrl->value);
+		}
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/* I2C-interface */
+
+static int indycam_g_chip_ident(struct v4l2_subdev *sd,
+		struct v4l2_dbg_chip_ident *chip)
+{
+	struct i2c_client *client = v4l2_get_subdevdata(sd);
+	struct indycam *camera = to_indycam(sd);
+
+	return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_INDYCAM,
+		       camera->version);
+}
+
+/* ----------------------------------------------------------------------- */
+
+static const struct v4l2_subdev_core_ops indycam_core_ops = {
+	.g_chip_ident = indycam_g_chip_ident,
+	.g_ctrl = indycam_g_ctrl,
+	.s_ctrl = indycam_s_ctrl,
+};
+
+static const struct v4l2_subdev_ops indycam_ops = {
+	.core = &indycam_core_ops,
+};
+
+static int indycam_probe(struct i2c_client *client,
+			  const struct i2c_device_id *id)
+{
+	int err = 0;
+	struct indycam *camera;
+	struct v4l2_subdev *sd;
+
+	v4l_info(client, "chip found @ 0x%x (%s)\n",
+			client->addr << 1, client->adapter->name);
+
+	camera = kzalloc(sizeof(struct indycam), GFP_KERNEL);
+	if (!camera)
+		return -ENOMEM;
+
+	sd = &camera->sd;
+	v4l2_i2c_subdev_init(sd, client, &indycam_ops);
+
+	camera->version = i2c_smbus_read_byte_data(client,
+						   INDYCAM_REG_VERSION);
+	if (camera->version != CAMERA_VERSION_INDY &&
+	    camera->version != CAMERA_VERSION_MOOSE) {
+		kfree(camera);
+		return -ENODEV;
+	}
+
+	printk(KERN_INFO "IndyCam v%d.%d detected\n",
+	       INDYCAM_VERSION_MAJOR(camera->version),
+	       INDYCAM_VERSION_MINOR(camera->version));
+
+	indycam_regdump(sd);
+
+	// initialize
+	err = indycam_write_block(sd, 0, sizeof(initseq), (u8 *)&initseq);
+	if (err) {
+		printk(KERN_ERR "IndyCam initialization failed\n");
+		kfree(camera);
+		return -EIO;
+	}
+
+	indycam_regdump(sd);
+
+	// white balance
+	err = indycam_write_reg(sd, INDYCAM_REG_CONTROL,
+			  INDYCAM_CONTROL_AGCENA | INDYCAM_CONTROL_AWBCTL);
+	if (err) {
+		printk(KERN_ERR "IndyCam: White balancing camera failed\n");
+		kfree(camera);
+		return -EIO;
+	}
+
+	indycam_regdump(sd);
+
+	printk(KERN_INFO "IndyCam initialized\n");
+
+	return 0;
+}
+
+static int indycam_remove(struct i2c_client *client)
+{
+	struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+	v4l2_device_unregister_subdev(sd);
+	kfree(to_indycam(sd));
+	return 0;
+}
+
+static const struct i2c_device_id indycam_id[] = {
+	{ "indycam", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, indycam_id);
+
+static struct i2c_driver indycam_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "indycam",
+	},
+	.probe		= indycam_probe,
+	.remove		= indycam_remove,
+	.id_table	= indycam_id,
+};
+
+module_i2c_driver(indycam_driver);
diff --git a/drivers/media/platform/indycam.h b/drivers/media/platform/indycam.h
new file mode 100644
index 0000000..881f21c
--- /dev/null
+++ b/drivers/media/platform/indycam.h
@@ -0,0 +1,93 @@
+/*
+ *  indycam.h - Silicon Graphics IndyCam digital camera driver
+ *
+ *  Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
+ *  Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#ifndef _INDYCAM_H_
+#define _INDYCAM_H_
+
+/* I2C address for the Guinness Camera */
+#define INDYCAM_ADDR			0x56
+
+/* Camera version */
+#define CAMERA_VERSION_INDY		0x10	/* v1.0 */
+#define CAMERA_VERSION_MOOSE		0x12	/* v1.2 */
+#define INDYCAM_VERSION_MAJOR(x)	(((x) & 0xf0) >> 4)
+#define INDYCAM_VERSION_MINOR(x)	((x) & 0x0f)
+
+/* Register bus addresses */
+#define INDYCAM_REG_CONTROL		0x00
+#define INDYCAM_REG_SHUTTER		0x01
+#define INDYCAM_REG_GAIN		0x02
+#define INDYCAM_REG_BRIGHTNESS		0x03 /* read-only */
+#define INDYCAM_REG_RED_BALANCE		0x04
+#define INDYCAM_REG_BLUE_BALANCE	0x05
+#define INDYCAM_REG_RED_SATURATION	0x06
+#define INDYCAM_REG_BLUE_SATURATION	0x07
+#define INDYCAM_REG_GAMMA		0x08
+#define INDYCAM_REG_VERSION		0x0e /* read-only */
+#define INDYCAM_REG_RESET		0x0f /* write-only */
+
+#define INDYCAM_REG_LED			0x46
+#define INDYCAM_REG_ORIENTATION		0x47
+#define INDYCAM_REG_BUTTON		0x48
+
+/* Field definitions of registers */
+#define INDYCAM_CONTROL_AGCENA		(1<<0) /* automatic gain control */
+#define INDYCAM_CONTROL_AWBCTL		(1<<1) /* automatic white balance */
+						/* 2-3 are reserved */
+#define INDYCAM_CONTROL_EVNFLD		(1<<4)	/* read-only */
+
+#define INDYCAM_SHUTTER_10000		0x02	/* 1/10000 second */
+#define INDYCAM_SHUTTER_4000		0x04	/* 1/4000 second */
+#define INDYCAM_SHUTTER_2000		0x08	/* 1/2000 second */
+#define INDYCAM_SHUTTER_1000		0x10	/* 1/1000 second */
+#define INDYCAM_SHUTTER_500		0x20	/* 1/500 second */
+#define INDYCAM_SHUTTER_250		0x3f	/* 1/250 second */
+#define INDYCAM_SHUTTER_125		0x7e	/* 1/125 second */
+#define INDYCAM_SHUTTER_100		0x9e	/* 1/100 second */
+#define INDYCAM_SHUTTER_60		0x00	/* 1/60 second */
+
+#define INDYCAM_LED_ACTIVE			0x10
+#define INDYCAM_LED_INACTIVE			0x30
+#define INDYCAM_ORIENTATION_BOTTOM_TO_TOP	0x40
+#define INDYCAM_BUTTON_RELEASED			0x10
+
+/* Values for controls */
+#define INDYCAM_SHUTTER_MIN		0x00
+#define INDYCAM_SHUTTER_MAX		0xff
+#define INDYCAM_GAIN_MIN                0x00
+#define INDYCAM_GAIN_MAX                0xff
+#define INDYCAM_RED_BALANCE_MIN		0x00
+#define INDYCAM_RED_BALANCE_MAX		0xff
+#define INDYCAM_BLUE_BALANCE_MIN        0x00
+#define INDYCAM_BLUE_BALANCE_MAX        0xff
+#define INDYCAM_RED_SATURATION_MIN      0x00
+#define INDYCAM_RED_SATURATION_MAX      0xff
+#define INDYCAM_BLUE_SATURATION_MIN	0x00
+#define INDYCAM_BLUE_SATURATION_MAX	0xff
+#define INDYCAM_GAMMA_MIN		0x00
+#define INDYCAM_GAMMA_MAX		0xff
+
+#define INDYCAM_AGC_DEFAULT		1
+#define INDYCAM_AWB_DEFAULT		0
+#define INDYCAM_SHUTTER_DEFAULT		0xff
+#define INDYCAM_GAIN_DEFAULT		0x80
+#define INDYCAM_RED_BALANCE_DEFAULT	0x18
+#define INDYCAM_BLUE_BALANCE_DEFAULT	0xa4
+#define INDYCAM_RED_SATURATION_DEFAULT	0x80
+#define INDYCAM_BLUE_SATURATION_DEFAULT	0xc0
+#define INDYCAM_GAMMA_DEFAULT		0x80
+
+/* Driver interface definitions */
+
+#define INDYCAM_CONTROL_RED_SATURATION		(V4L2_CID_PRIVATE_BASE + 0)
+#define INDYCAM_CONTROL_BLUE_SATURATION		(V4L2_CID_PRIVATE_BASE + 1)
+
+#endif
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
new file mode 100644
index 0000000..a38c152
--- /dev/null
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -0,0 +1,1120 @@
+/*
+ * V4L2 deinterlacing support.
+ *
+ * Copyright (c) 2012 Vista Silicon S.L.
+ * Javier Martin <javier.martin@vista-silicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define MEM2MEM_TEST_MODULE_NAME "mem2mem-deinterlace"
+
+MODULE_DESCRIPTION("mem2mem device which supports deinterlacing using dmaengine");
+MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.1");
+
+static bool debug = true;
+module_param(debug, bool, 0644);
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE	(1 << 0)
+#define MEM2MEM_OUTPUT	(1 << 1)
+
+#define MEM2MEM_NAME		"m2m-deinterlace"
+
+#define dprintk(dev, fmt, arg...) \
+	v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+struct deinterlace_fmt {
+	char	*name;
+	u32	fourcc;
+	/* Types the format can be used for */
+	u32	types;
+};
+
+static struct deinterlace_fmt formats[] = {
+	{
+		.name	= "YUV 4:2:0 Planar",
+		.fourcc	= V4L2_PIX_FMT_YUV420,
+		.types	= MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+	},
+	{
+		.name	= "YUYV 4:2:2",
+		.fourcc	= V4L2_PIX_FMT_YUYV,
+		.types	= MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+	},
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+/* Per-queue, driver-specific private data */
+struct deinterlace_q_data {
+	unsigned int		width;
+	unsigned int		height;
+	unsigned int		sizeimage;
+	struct deinterlace_fmt	*fmt;
+	enum v4l2_field		field;
+};
+
+enum {
+	V4L2_M2M_SRC = 0,
+	V4L2_M2M_DST = 1,
+};
+
+enum {
+	YUV420_DMA_Y_ODD,
+	YUV420_DMA_Y_EVEN,
+	YUV420_DMA_U_ODD,
+	YUV420_DMA_U_EVEN,
+	YUV420_DMA_V_ODD,
+	YUV420_DMA_V_EVEN,
+	YUV420_DMA_Y_ODD_DOUBLING,
+	YUV420_DMA_U_ODD_DOUBLING,
+	YUV420_DMA_V_ODD_DOUBLING,
+	YUYV_DMA_ODD,
+	YUYV_DMA_EVEN,
+	YUYV_DMA_EVEN_DOUBLING,
+};
+
+/* Source and destination queue data */
+static struct deinterlace_q_data q_data[2];
+
+static struct deinterlace_q_data *get_q_data(enum v4l2_buf_type type)
+{
+	switch (type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+		return &q_data[V4L2_M2M_SRC];
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+		return &q_data[V4L2_M2M_DST];
+	default:
+		BUG();
+	}
+	return NULL;
+}
+
+static struct deinterlace_fmt *find_format(struct v4l2_format *f)
+{
+	struct deinterlace_fmt *fmt;
+	unsigned int k;
+
+	for (k = 0; k < NUM_FORMATS; k++) {
+		fmt = &formats[k];
+		if ((fmt->types & f->type) &&
+			(fmt->fourcc == f->fmt.pix.pixelformat))
+			break;
+	}
+
+	if (k == NUM_FORMATS)
+		return NULL;
+
+	return &formats[k];
+}
+
+struct deinterlace_dev {
+	struct v4l2_device	v4l2_dev;
+	struct video_device	*vfd;
+
+	atomic_t		busy;
+	struct mutex		dev_mutex;
+	spinlock_t		irqlock;
+
+	struct dma_chan		*dma_chan;
+
+	struct v4l2_m2m_dev	*m2m_dev;
+	struct vb2_alloc_ctx	*alloc_ctx;
+};
+
+struct deinterlace_ctx {
+	struct deinterlace_dev	*dev;
+
+	/* Abort requested by m2m */
+	int			aborting;
+	enum v4l2_colorspace	colorspace;
+	dma_cookie_t		cookie;
+	struct v4l2_m2m_ctx	*m2m_ctx;
+	struct dma_interleaved_template *xt;
+};
+
+/*
+ * mem2mem callbacks
+ */
+static int deinterlace_job_ready(void *priv)
+{
+	struct deinterlace_ctx *ctx = priv;
+	struct deinterlace_dev *pcdev = ctx->dev;
+
+	if ((v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0)
+	    && (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) > 0)
+	    && (atomic_read(&ctx->dev->busy) == 0)) {
+		dprintk(pcdev, "Task ready\n");
+		return 1;
+	}
+
+	dprintk(pcdev, "Task not ready to run\n");
+
+	return 0;
+}
+
+static void deinterlace_job_abort(void *priv)
+{
+	struct deinterlace_ctx *ctx = priv;
+	struct deinterlace_dev *pcdev = ctx->dev;
+
+	ctx->aborting = 1;
+
+	dprintk(pcdev, "Aborting task\n");
+
+	v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx);
+}
+
+static void deinterlace_lock(void *priv)
+{
+	struct deinterlace_ctx *ctx = priv;
+	struct deinterlace_dev *pcdev = ctx->dev;
+	mutex_lock(&pcdev->dev_mutex);
+}
+
+static void deinterlace_unlock(void *priv)
+{
+	struct deinterlace_ctx *ctx = priv;
+	struct deinterlace_dev *pcdev = ctx->dev;
+	mutex_unlock(&pcdev->dev_mutex);
+}
+
+static void dma_callback(void *data)
+{
+	struct deinterlace_ctx *curr_ctx = data;
+	struct deinterlace_dev *pcdev = curr_ctx->dev;
+	struct vb2_buffer *src_vb, *dst_vb;
+
+	atomic_set(&pcdev->busy, 0);
+
+	src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
+	dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+
+	v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+	v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+
+	v4l2_m2m_job_finish(pcdev->m2m_dev, curr_ctx->m2m_ctx);
+
+	dprintk(pcdev, "dma transfers completed.\n");
+}
+
+static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
+				  int do_callback)
+{
+	struct deinterlace_q_data *s_q_data, *d_q_data;
+	struct vb2_buffer *src_buf, *dst_buf;
+	struct deinterlace_dev *pcdev = ctx->dev;
+	struct dma_chan *chan = pcdev->dma_chan;
+	struct dma_device *dmadev = chan->device;
+	struct dma_async_tx_descriptor *tx;
+	unsigned int s_width, s_height;
+	unsigned int d_width, d_height;
+	unsigned int d_size, s_size;
+	dma_addr_t p_in, p_out;
+	enum dma_ctrl_flags flags;
+
+	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+
+	s_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
+	s_width	= s_q_data->width;
+	s_height = s_q_data->height;
+	s_size = s_width * s_height;
+
+	d_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_CAPTURE);
+	d_width = d_q_data->width;
+	d_height = d_q_data->height;
+	d_size = d_width * d_height;
+
+	p_in = (dma_addr_t)vb2_dma_contig_plane_dma_addr(src_buf, 0);
+	p_out = (dma_addr_t)vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+	if (!p_in || !p_out) {
+		v4l2_err(&pcdev->v4l2_dev,
+			 "Acquiring kernel pointers to buffers failed\n");
+		return;
+	}
+
+	switch (op) {
+	case YUV420_DMA_Y_ODD:
+		ctx->xt->numf = s_height / 2;
+		ctx->xt->sgl[0].size = s_width;
+		ctx->xt->sgl[0].icg = s_width;
+		ctx->xt->src_start = p_in;
+		ctx->xt->dst_start = p_out;
+		break;
+	case YUV420_DMA_Y_EVEN:
+		ctx->xt->numf = s_height / 2;
+		ctx->xt->sgl[0].size = s_width;
+		ctx->xt->sgl[0].icg = s_width;
+		ctx->xt->src_start = p_in + s_size / 2;
+		ctx->xt->dst_start = p_out + s_width;
+		break;
+	case YUV420_DMA_U_ODD:
+		ctx->xt->numf = s_height / 4;
+		ctx->xt->sgl[0].size = s_width / 2;
+		ctx->xt->sgl[0].icg = s_width / 2;
+		ctx->xt->src_start = p_in + s_size;
+		ctx->xt->dst_start = p_out + s_size;
+		break;
+	case YUV420_DMA_U_EVEN:
+		ctx->xt->numf = s_height / 4;
+		ctx->xt->sgl[0].size = s_width / 2;
+		ctx->xt->sgl[0].icg = s_width / 2;
+		ctx->xt->src_start = p_in + (9 * s_size) / 8;
+		ctx->xt->dst_start = p_out + s_size + s_width / 2;
+		break;
+	case YUV420_DMA_V_ODD:
+		ctx->xt->numf = s_height / 4;
+		ctx->xt->sgl[0].size = s_width / 2;
+		ctx->xt->sgl[0].icg = s_width / 2;
+		ctx->xt->src_start = p_in + (5 * s_size) / 4;
+		ctx->xt->dst_start = p_out + (5 * s_size) / 4;
+		break;
+	case YUV420_DMA_V_EVEN:
+		ctx->xt->numf = s_height / 4;
+		ctx->xt->sgl[0].size = s_width / 2;
+		ctx->xt->sgl[0].icg = s_width / 2;
+		ctx->xt->src_start = p_in + (11 * s_size) / 8;
+		ctx->xt->dst_start = p_out + (5 * s_size) / 4 + s_width / 2;
+		break;
+	case YUV420_DMA_Y_ODD_DOUBLING:
+		ctx->xt->numf = s_height / 2;
+		ctx->xt->sgl[0].size = s_width;
+		ctx->xt->sgl[0].icg = s_width;
+		ctx->xt->src_start = p_in;
+		ctx->xt->dst_start = p_out + s_width;
+		break;
+	case YUV420_DMA_U_ODD_DOUBLING:
+		ctx->xt->numf = s_height / 4;
+		ctx->xt->sgl[0].size = s_width / 2;
+		ctx->xt->sgl[0].icg = s_width / 2;
+		ctx->xt->src_start = p_in + s_size;
+		ctx->xt->dst_start = p_out + s_size + s_width / 2;
+		break;
+	case YUV420_DMA_V_ODD_DOUBLING:
+		ctx->xt->numf = s_height / 4;
+		ctx->xt->sgl[0].size = s_width / 2;
+		ctx->xt->sgl[0].icg = s_width / 2;
+		ctx->xt->src_start = p_in + (5 * s_size) / 4;
+		ctx->xt->dst_start = p_out + (5 * s_size) / 4 + s_width / 2;
+		break;
+	case YUYV_DMA_ODD:
+		ctx->xt->numf = s_height / 2;
+		ctx->xt->sgl[0].size = s_width * 2;
+		ctx->xt->sgl[0].icg = s_width * 2;
+		ctx->xt->src_start = p_in;
+		ctx->xt->dst_start = p_out;
+		break;
+	case YUYV_DMA_EVEN:
+		ctx->xt->numf = s_height / 2;
+		ctx->xt->sgl[0].size = s_width * 2;
+		ctx->xt->sgl[0].icg = s_width * 2;
+		ctx->xt->src_start = p_in + s_size;
+		ctx->xt->dst_start = p_out + s_width * 2;
+		break;
+	case YUYV_DMA_EVEN_DOUBLING:
+	default:
+		ctx->xt->numf = s_height / 2;
+		ctx->xt->sgl[0].size = s_width * 2;
+		ctx->xt->sgl[0].icg = s_width * 2;
+		ctx->xt->src_start = p_in;
+		ctx->xt->dst_start = p_out + s_width * 2;
+		break;
+	}
+
+	/* Common parameters for al transfers */
+	ctx->xt->frame_size = 1;
+	ctx->xt->dir = DMA_MEM_TO_MEM;
+	ctx->xt->src_sgl = false;
+	ctx->xt->dst_sgl = true;
+	flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT |
+		DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP;
+
+	tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags);
+	if (tx == NULL) {
+		v4l2_warn(&pcdev->v4l2_dev, "DMA interleaved prep error\n");
+		return;
+	}
+
+	if (do_callback) {
+		tx->callback = dma_callback;
+		tx->callback_param = ctx;
+	}
+
+	ctx->cookie = dmaengine_submit(tx);
+	if (dma_submit_error(ctx->cookie)) {
+		v4l2_warn(&pcdev->v4l2_dev,
+			  "DMA submit error %d with src=0x%x dst=0x%x len=0x%x\n",
+			  ctx->cookie, (unsigned)p_in, (unsigned)p_out,
+			  s_size * 3/2);
+		return;
+	}
+
+	dma_async_issue_pending(chan);
+}
+
+static void deinterlace_device_run(void *priv)
+{
+	struct deinterlace_ctx *ctx = priv;
+	struct deinterlace_q_data *dst_q_data;
+
+	atomic_set(&ctx->dev->busy, 1);
+
+	dprintk(ctx->dev, "%s: DMA try issue.\n", __func__);
+
+	dst_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+	/*
+	 * 4 possible field conversions are possible at the moment:
+	 *  V4L2_FIELD_SEQ_TB --> V4L2_FIELD_INTERLACED_TB:
+	 *	two separate fields in the same input buffer are interlaced
+	 * 	in the output buffer using weaving. Top field comes first.
+	 *  V4L2_FIELD_SEQ_TB --> V4L2_FIELD_NONE:
+	 * 	top field from the input buffer is copied to the output buffer
+	 * 	using line doubling. Bottom field from the input buffer is discarded.
+	 * V4L2_FIELD_SEQ_BT --> V4L2_FIELD_INTERLACED_BT:
+	 *	two separate fields in the same input buffer are interlaced
+	 * 	in the output buffer using weaving. Bottom field comes first.
+	 * V4L2_FIELD_SEQ_BT --> V4L2_FIELD_NONE:
+	 * 	bottom field from the input buffer is copied to the output buffer
+	 * 	using line doubling. Top field from the input buffer is discarded.
+	 */
+	switch (dst_q_data->fmt->fourcc) {
+	case V4L2_PIX_FMT_YUV420:
+		switch (dst_q_data->field) {
+		case V4L2_FIELD_INTERLACED_TB:
+		case V4L2_FIELD_INTERLACED_BT:
+			dprintk(ctx->dev, "%s: yuv420 interlaced tb.\n",
+				__func__);
+			deinterlace_issue_dma(ctx, YUV420_DMA_Y_ODD, 0);
+			deinterlace_issue_dma(ctx, YUV420_DMA_Y_EVEN, 0);
+			deinterlace_issue_dma(ctx, YUV420_DMA_U_ODD, 0);
+			deinterlace_issue_dma(ctx, YUV420_DMA_U_EVEN, 0);
+			deinterlace_issue_dma(ctx, YUV420_DMA_V_ODD, 0);
+			deinterlace_issue_dma(ctx, YUV420_DMA_V_EVEN, 1);
+			break;
+		case V4L2_FIELD_NONE:
+		default:
+			dprintk(ctx->dev, "%s: yuv420 interlaced line doubling.\n",
+				__func__);
+			deinterlace_issue_dma(ctx, YUV420_DMA_Y_ODD, 0);
+			deinterlace_issue_dma(ctx, YUV420_DMA_Y_ODD_DOUBLING, 0);
+			deinterlace_issue_dma(ctx, YUV420_DMA_U_ODD, 0);
+			deinterlace_issue_dma(ctx, YUV420_DMA_U_ODD_DOUBLING, 0);
+			deinterlace_issue_dma(ctx, YUV420_DMA_V_ODD, 0);
+			deinterlace_issue_dma(ctx, YUV420_DMA_V_ODD_DOUBLING, 1);
+			break;
+		}
+		break;
+	case V4L2_PIX_FMT_YUYV:
+	default:
+		switch (dst_q_data->field) {
+		case V4L2_FIELD_INTERLACED_TB:
+		case V4L2_FIELD_INTERLACED_BT:
+			dprintk(ctx->dev, "%s: yuyv interlaced_tb.\n",
+				__func__);
+			deinterlace_issue_dma(ctx, YUYV_DMA_ODD, 0);
+			deinterlace_issue_dma(ctx, YUYV_DMA_EVEN, 1);
+			break;
+		case V4L2_FIELD_NONE:
+		default:
+			dprintk(ctx->dev, "%s: yuyv interlaced line doubling.\n",
+				__func__);
+			deinterlace_issue_dma(ctx, YUYV_DMA_ODD, 0);
+			deinterlace_issue_dma(ctx, YUYV_DMA_EVEN_DOUBLING, 1);
+			break;
+		}
+		break;
+	}
+
+	dprintk(ctx->dev, "%s: DMA issue done.\n", __func__);
+}
+
+/*
+ * video ioctls
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+			   struct v4l2_capability *cap)
+{
+	strlcpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, MEM2MEM_NAME, sizeof(cap->card));
+	strlcpy(cap->bus_info, MEM2MEM_NAME, sizeof(cap->card));
+	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+			  | V4L2_CAP_STREAMING;
+	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+	return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+	int i, num;
+	struct deinterlace_fmt *fmt;
+
+	num = 0;
+
+	for (i = 0; i < NUM_FORMATS; ++i) {
+		if (formats[i].types & type) {
+			/* index-th format of type type found ? */
+			if (num == f->index)
+				break;
+			/* Correct type but haven't reached our index yet,
+			 * just increment per-type index */
+			++num;
+		}
+	}
+
+	if (i < NUM_FORMATS) {
+		/* Format found */
+		fmt = &formats[i];
+		strlcpy(f->description, fmt->name, sizeof(f->description));
+		f->pixelformat = fmt->fourcc;
+		return 0;
+	}
+
+	/* Format not found */
+	return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	return enum_fmt(f, MEM2MEM_CAPTURE);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	return enum_fmt(f, MEM2MEM_OUTPUT);
+}
+
+static int vidioc_g_fmt(struct deinterlace_ctx *ctx, struct v4l2_format *f)
+{
+	struct vb2_queue *vq;
+	struct deinterlace_q_data *q_data;
+
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	if (!vq)
+		return -EINVAL;
+
+	q_data = get_q_data(f->type);
+
+	f->fmt.pix.width	= q_data->width;
+	f->fmt.pix.height	= q_data->height;
+	f->fmt.pix.field	= q_data->field;
+	f->fmt.pix.pixelformat	= q_data->fmt->fourcc;
+
+	switch (q_data->fmt->fourcc) {
+	case V4L2_PIX_FMT_YUV420:
+		f->fmt.pix.bytesperline = q_data->width * 3 / 2;
+		break;
+	case V4L2_PIX_FMT_YUYV:
+	default:
+		f->fmt.pix.bytesperline = q_data->width * 2;
+	}
+
+	f->fmt.pix.sizeimage	= q_data->sizeimage;
+	f->fmt.pix.colorspace	= ctx->colorspace;
+
+	return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct deinterlace_fmt *fmt)
+{
+	switch (f->fmt.pix.pixelformat) {
+	case V4L2_PIX_FMT_YUV420:
+		f->fmt.pix.bytesperline = f->fmt.pix.width * 3 / 2;
+		break;
+	case V4L2_PIX_FMT_YUYV:
+	default:
+		f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
+	}
+	f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+	return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct deinterlace_fmt *fmt;
+	struct deinterlace_ctx *ctx = priv;
+
+	fmt = find_format(f);
+	if (!fmt || !(fmt->types & MEM2MEM_CAPTURE))
+		f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
+
+	f->fmt.pix.colorspace = ctx->colorspace;
+
+	if (f->fmt.pix.field != V4L2_FIELD_INTERLACED_TB &&
+	    f->fmt.pix.field != V4L2_FIELD_INTERLACED_BT &&
+	    f->fmt.pix.field != V4L2_FIELD_NONE)
+		f->fmt.pix.field = V4L2_FIELD_INTERLACED_TB;
+
+	return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct deinterlace_fmt *fmt;
+
+	fmt = find_format(f);
+	if (!fmt || !(fmt->types & MEM2MEM_OUTPUT))
+		f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
+
+	if (!f->fmt.pix.colorspace)
+		f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+
+	if (f->fmt.pix.field != V4L2_FIELD_SEQ_TB &&
+	    f->fmt.pix.field != V4L2_FIELD_SEQ_BT)
+		f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
+
+	return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_s_fmt(struct deinterlace_ctx *ctx, struct v4l2_format *f)
+{
+	struct deinterlace_q_data *q_data;
+	struct vb2_queue *vq;
+
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	if (!vq)
+		return -EINVAL;
+
+	q_data = get_q_data(f->type);
+	if (!q_data)
+		return -EINVAL;
+
+	if (vb2_is_busy(vq)) {
+		v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+		return -EBUSY;
+	}
+
+	q_data->fmt = find_format(f);
+	if (!q_data->fmt) {
+		v4l2_err(&ctx->dev->v4l2_dev,
+			 "Couldn't set format type %d, wxh: %dx%d. fmt: %d, field: %d\n",
+			f->type, f->fmt.pix.width, f->fmt.pix.height,
+			f->fmt.pix.pixelformat, f->fmt.pix.field);
+		return -EINVAL;
+	}
+
+	q_data->width		= f->fmt.pix.width;
+	q_data->height		= f->fmt.pix.height;
+	q_data->field		= f->fmt.pix.field;
+
+	switch (f->fmt.pix.pixelformat) {
+	case V4L2_PIX_FMT_YUV420:
+		f->fmt.pix.bytesperline = f->fmt.pix.width * 3 / 2;
+		q_data->sizeimage = (q_data->width * q_data->height * 3) / 2;
+		break;
+	case V4L2_PIX_FMT_YUYV:
+	default:
+		f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
+		q_data->sizeimage = q_data->width * q_data->height * 2;
+	}
+
+	dprintk(ctx->dev,
+		"Setting format for type %d, wxh: %dx%d, fmt: %d, field: %d\n",
+		f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
+		q_data->field);
+
+	return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	int ret;
+
+	ret = vidioc_try_fmt_vid_cap(file, priv, f);
+	if (ret)
+		return ret;
+	return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct deinterlace_ctx *ctx = priv;
+	int ret;
+
+	ret = vidioc_try_fmt_vid_out(file, priv, f);
+	if (ret)
+		return ret;
+
+	ret = vidioc_s_fmt(priv, f);
+	if (!ret)
+		ctx->colorspace = f->fmt.pix.colorspace;
+
+	return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+			  struct v4l2_requestbuffers *reqbufs)
+{
+	struct deinterlace_ctx *ctx = priv;
+
+	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+			   struct v4l2_buffer *buf)
+{
+	struct deinterlace_ctx *ctx = priv;
+
+	return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct deinterlace_ctx *ctx = priv;
+
+	return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct deinterlace_ctx *ctx = priv;
+
+	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_streamon(struct file *file, void *priv,
+			   enum v4l2_buf_type type)
+{
+	struct deinterlace_q_data *s_q_data, *d_q_data;
+	struct deinterlace_ctx *ctx = priv;
+
+	s_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
+	d_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+	/* Check that src and dst queues have the same pix format */
+	if (s_q_data->fmt->fourcc != d_q_data->fmt->fourcc) {
+		v4l2_err(&ctx->dev->v4l2_dev,
+			 "src and dst formats don't match.\n");
+		return -EINVAL;
+	}
+
+	/* Check that input and output deinterlacing types are compatible */
+	switch (s_q_data->field) {
+	case V4L2_FIELD_SEQ_BT:
+		if (d_q_data->field != V4L2_FIELD_NONE &&
+			d_q_data->field != V4L2_FIELD_INTERLACED_BT) {
+			v4l2_err(&ctx->dev->v4l2_dev,
+			 "src and dst field conversion [(%d)->(%d)] not supported.\n",
+				s_q_data->field, d_q_data->field);
+			return -EINVAL;
+		}
+		break;
+	case V4L2_FIELD_SEQ_TB:
+		if (d_q_data->field != V4L2_FIELD_NONE &&
+			d_q_data->field != V4L2_FIELD_INTERLACED_TB) {
+			v4l2_err(&ctx->dev->v4l2_dev,
+			 "src and dst field conversion [(%d)->(%d)] not supported.\n",
+				s_q_data->field, d_q_data->field);
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv,
+			    enum v4l2_buf_type type)
+{
+	struct deinterlace_ctx *ctx = priv;
+
+	return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops deinterlace_ioctl_ops = {
+	.vidioc_querycap	= vidioc_querycap,
+
+	.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap	= vidioc_g_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap	= vidioc_try_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap	= vidioc_s_fmt_vid_cap,
+
+	.vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+	.vidioc_g_fmt_vid_out	= vidioc_g_fmt_vid_out,
+	.vidioc_try_fmt_vid_out	= vidioc_try_fmt_vid_out,
+	.vidioc_s_fmt_vid_out	= vidioc_s_fmt_vid_out,
+
+	.vidioc_reqbufs		= vidioc_reqbufs,
+	.vidioc_querybuf	= vidioc_querybuf,
+
+	.vidioc_qbuf		= vidioc_qbuf,
+	.vidioc_dqbuf		= vidioc_dqbuf,
+
+	.vidioc_streamon	= vidioc_streamon,
+	.vidioc_streamoff	= vidioc_streamoff,
+};
+
+
+/*
+ * Queue operations
+ */
+struct vb2_dc_conf {
+	struct device           *dev;
+};
+
+static int deinterlace_queue_setup(struct vb2_queue *vq,
+				const struct v4l2_format *fmt,
+				unsigned int *nbuffers, unsigned int *nplanes,
+				unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+	struct deinterlace_q_data *q_data;
+	unsigned int size, count = *nbuffers;
+
+	q_data = get_q_data(vq->type);
+
+	switch (q_data->fmt->fourcc) {
+	case V4L2_PIX_FMT_YUV420:
+		size = q_data->width * q_data->height * 3 / 2;
+		break;
+	case V4L2_PIX_FMT_YUYV:
+	default:
+		size = q_data->width * q_data->height * 2;
+	}
+
+	*nplanes = 1;
+	*nbuffers = count;
+	sizes[0] = size;
+
+	alloc_ctxs[0] = ctx->dev->alloc_ctx;
+
+	dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
+
+	return 0;
+}
+
+static int deinterlace_buf_prepare(struct vb2_buffer *vb)
+{
+	struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct deinterlace_q_data *q_data;
+
+	dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+	q_data = get_q_data(vb->vb2_queue->type);
+
+	if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+		dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
+			__func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage);
+		return -EINVAL;
+	}
+
+	vb2_set_plane_payload(vb, 0, q_data->sizeimage);
+
+	return 0;
+}
+
+static void deinterlace_buf_queue(struct vb2_buffer *vb)
+{
+	struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static struct vb2_ops deinterlace_qops = {
+	.queue_setup	 = deinterlace_queue_setup,
+	.buf_prepare	 = deinterlace_buf_prepare,
+	.buf_queue	 = deinterlace_buf_queue,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+		      struct vb2_queue *dst_vq)
+{
+	struct deinterlace_ctx *ctx = priv;
+	int ret;
+
+	memset(src_vq, 0, sizeof(*src_vq));
+	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+	src_vq->drv_priv = ctx;
+	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	src_vq->ops = &deinterlace_qops;
+	src_vq->mem_ops = &vb2_dma_contig_memops;
+	q_data[V4L2_M2M_SRC].fmt = &formats[0];
+	q_data[V4L2_M2M_SRC].width = 640;
+	q_data[V4L2_M2M_SRC].height = 480;
+	q_data[V4L2_M2M_SRC].sizeimage = (640 * 480 * 3) / 2;
+	q_data[V4L2_M2M_SRC].field = V4L2_FIELD_SEQ_TB;
+
+	ret = vb2_queue_init(src_vq);
+	if (ret)
+		return ret;
+
+	memset(dst_vq, 0, sizeof(*dst_vq));
+	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+	dst_vq->drv_priv = ctx;
+	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	dst_vq->ops = &deinterlace_qops;
+	dst_vq->mem_ops = &vb2_dma_contig_memops;
+	q_data[V4L2_M2M_DST].fmt = &formats[0];
+	q_data[V4L2_M2M_DST].width = 640;
+	q_data[V4L2_M2M_DST].height = 480;
+	q_data[V4L2_M2M_DST].sizeimage = (640 * 480 * 3) / 2;
+	q_data[V4L2_M2M_SRC].field = V4L2_FIELD_INTERLACED_TB;
+
+	return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int deinterlace_open(struct file *file)
+{
+	struct deinterlace_dev *pcdev = video_drvdata(file);
+	struct deinterlace_ctx *ctx = NULL;
+
+	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	file->private_data = ctx;
+	ctx->dev = pcdev;
+
+	ctx->m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init);
+	if (IS_ERR(ctx->m2m_ctx)) {
+		int ret = PTR_ERR(ctx->m2m_ctx);
+
+		kfree(ctx);
+		return ret;
+	}
+
+	ctx->xt = kzalloc(sizeof(struct dma_async_tx_descriptor) +
+				sizeof(struct data_chunk), GFP_KERNEL);
+	if (!ctx->xt) {
+		int ret = PTR_ERR(ctx->xt);
+
+		kfree(ctx);
+		return ret;
+	}
+
+	ctx->colorspace = V4L2_COLORSPACE_REC709;
+
+	dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+
+	return 0;
+}
+
+static int deinterlace_release(struct file *file)
+{
+	struct deinterlace_dev *pcdev = video_drvdata(file);
+	struct deinterlace_ctx *ctx = file->private_data;
+
+	dprintk(pcdev, "Releasing instance %p\n", ctx);
+
+	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+	kfree(ctx->xt);
+	kfree(ctx);
+
+	return 0;
+}
+
+static unsigned int deinterlace_poll(struct file *file,
+				 struct poll_table_struct *wait)
+{
+	struct deinterlace_ctx *ctx = file->private_data;
+	int ret;
+
+	deinterlace_lock(ctx);
+	ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+	deinterlace_unlock(ctx);
+
+	return ret;
+}
+
+static int deinterlace_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct deinterlace_ctx *ctx = file->private_data;
+
+	return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations deinterlace_fops = {
+	.owner		= THIS_MODULE,
+	.open		= deinterlace_open,
+	.release	= deinterlace_release,
+	.poll		= deinterlace_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= deinterlace_mmap,
+};
+
+static struct video_device deinterlace_videodev = {
+	.name		= MEM2MEM_NAME,
+	.fops		= &deinterlace_fops,
+	.ioctl_ops	= &deinterlace_ioctl_ops,
+	.minor		= -1,
+	.release	= video_device_release,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+	.device_run	= deinterlace_device_run,
+	.job_ready	= deinterlace_job_ready,
+	.job_abort	= deinterlace_job_abort,
+	.lock		= deinterlace_lock,
+	.unlock		= deinterlace_unlock,
+};
+
+static int deinterlace_probe(struct platform_device *pdev)
+{
+	struct deinterlace_dev *pcdev;
+	struct video_device *vfd;
+	dma_cap_mask_t mask;
+	int ret = 0;
+
+	pcdev = kzalloc(sizeof *pcdev, GFP_KERNEL);
+	if (!pcdev)
+		return -ENOMEM;
+
+	spin_lock_init(&pcdev->irqlock);
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_INTERLEAVE, mask);
+	pcdev->dma_chan = dma_request_channel(mask, NULL, pcdev);
+	if (!pcdev->dma_chan)
+		goto free_dev;
+
+	if (!dma_has_cap(DMA_INTERLEAVE, pcdev->dma_chan->device->cap_mask)) {
+		v4l2_err(&pcdev->v4l2_dev, "DMA does not support INTERLEAVE\n");
+		goto rel_dma;
+	}
+
+	ret = v4l2_device_register(&pdev->dev, &pcdev->v4l2_dev);
+	if (ret)
+		goto rel_dma;
+
+	atomic_set(&pcdev->busy, 0);
+	mutex_init(&pcdev->dev_mutex);
+
+	vfd = video_device_alloc();
+	if (!vfd) {
+		v4l2_err(&pcdev->v4l2_dev, "Failed to allocate video device\n");
+		ret = -ENOMEM;
+		goto unreg_dev;
+	}
+
+	*vfd = deinterlace_videodev;
+	vfd->lock = &pcdev->dev_mutex;
+
+	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+	if (ret) {
+		v4l2_err(&pcdev->v4l2_dev, "Failed to register video device\n");
+		goto rel_vdev;
+	}
+
+	video_set_drvdata(vfd, pcdev);
+	snprintf(vfd->name, sizeof(vfd->name), "%s", deinterlace_videodev.name);
+	pcdev->vfd = vfd;
+	v4l2_info(&pcdev->v4l2_dev, MEM2MEM_TEST_MODULE_NAME
+			" Device registered as /dev/video%d\n", vfd->num);
+
+	platform_set_drvdata(pdev, pcdev);
+
+	pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+	if (IS_ERR(pcdev->alloc_ctx)) {
+		v4l2_err(&pcdev->v4l2_dev, "Failed to alloc vb2 context\n");
+		ret = PTR_ERR(pcdev->alloc_ctx);
+		goto err_ctx;
+	}
+
+	pcdev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+	if (IS_ERR(pcdev->m2m_dev)) {
+		v4l2_err(&pcdev->v4l2_dev, "Failed to init mem2mem device\n");
+		ret = PTR_ERR(pcdev->m2m_dev);
+		goto err_m2m;
+	}
+
+	return 0;
+
+	v4l2_m2m_release(pcdev->m2m_dev);
+err_m2m:
+	video_unregister_device(pcdev->vfd);
+err_ctx:
+	vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+rel_vdev:
+	video_device_release(vfd);
+unreg_dev:
+	v4l2_device_unregister(&pcdev->v4l2_dev);
+rel_dma:
+	dma_release_channel(pcdev->dma_chan);
+free_dev:
+	kfree(pcdev);
+
+	return ret;
+}
+
+static int deinterlace_remove(struct platform_device *pdev)
+{
+	struct deinterlace_dev *pcdev =
+		(struct deinterlace_dev *)platform_get_drvdata(pdev);
+
+	v4l2_info(&pcdev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
+	v4l2_m2m_release(pcdev->m2m_dev);
+	video_unregister_device(pcdev->vfd);
+	v4l2_device_unregister(&pcdev->v4l2_dev);
+	vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+	dma_release_channel(pcdev->dma_chan);
+	kfree(pcdev);
+
+	return 0;
+}
+
+static struct platform_driver deinterlace_pdrv = {
+	.probe		= deinterlace_probe,
+	.remove		= deinterlace_remove,
+	.driver		= {
+		.name	= MEM2MEM_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static void __exit deinterlace_exit(void)
+{
+	platform_driver_unregister(&deinterlace_pdrv);
+}
+
+static int __init deinterlace_init(void)
+{
+	return platform_driver_register(&deinterlace_pdrv);
+}
+
+module_init(deinterlace_init);
+module_exit(deinterlace_exit);
+
diff --git a/drivers/media/platform/marvell-ccic/Kconfig b/drivers/media/platform/marvell-ccic/Kconfig
new file mode 100644
index 0000000..bf739e3
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/Kconfig
@@ -0,0 +1,23 @@
+config VIDEO_CAFE_CCIC
+	tristate "Marvell 88ALP01 (Cafe) CMOS Camera Controller support"
+	depends on PCI && I2C && VIDEO_V4L2
+	select VIDEO_OV7670
+	select VIDEOBUF2_VMALLOC
+	select VIDEOBUF2_DMA_CONTIG
+	---help---
+	  This is a video4linux2 driver for the Marvell 88ALP01 integrated
+	  CMOS camera controller.  This is the controller found on first-
+	  generation OLPC systems.
+
+config VIDEO_MMP_CAMERA
+	tristate "Marvell Armada 610 integrated camera controller support"
+	depends on ARCH_MMP && I2C && VIDEO_V4L2
+	select VIDEO_OV7670
+	select I2C_GPIO
+	select VIDEOBUF2_DMA_SG
+	---help---
+	  This is a Video4Linux2 driver for the integrated camera
+	  controller found on Marvell Armada 610 application
+	  processors (and likely beyond).  This is the controller found
+	  in OLPC XO 1.75 systems.
+
diff --git a/drivers/media/platform/marvell-ccic/Makefile b/drivers/media/platform/marvell-ccic/Makefile
new file mode 100644
index 0000000..05a792c
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o
+cafe_ccic-y := cafe-driver.o mcam-core.o
+
+obj-$(CONFIG_VIDEO_MMP_CAMERA) += mmp_camera.o
+mmp_camera-y := mmp-driver.o mcam-core.o
+
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
new file mode 100644
index 0000000..d030f9b
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -0,0 +1,654 @@
+/*
+ * A driver for the CMOS camera controller in the Marvell 88ALP01 "cafe"
+ * multifunction chip.  Currently works with the Omnivision OV7670
+ * sensor.
+ *
+ * The data sheet for this device can be found at:
+ *    http://www.marvell.com/products/pc_connectivity/88alp01/
+ *
+ * Copyright 2006-11 One Laptop Per Child Association, Inc.
+ * Copyright 2006-11 Jonathan Corbet <corbet@lwn.net>
+ *
+ * Written by Jonathan Corbet, corbet@lwn.net.
+ *
+ * v4l2_device/v4l2_subdev conversion by:
+ * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License, version 2.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-chip-ident.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "mcam-core.h"
+
+#define CAFE_VERSION 0x000002
+
+
+/*
+ * Parameters.
+ */
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
+MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("Video");
+
+
+
+
+struct cafe_camera {
+	int registered;			/* Fully initialized? */
+	struct mcam_camera mcam;
+	struct pci_dev *pdev;
+	wait_queue_head_t smbus_wait;	/* Waiting on i2c events */
+};
+
+/*
+ * Most of the camera controller registers are defined in mcam-core.h,
+ * but the Cafe platform has some additional registers of its own;
+ * they are described here.
+ */
+
+/*
+ * "General purpose register" has a couple of GPIOs used for sensor
+ * power and reset on OLPC XO 1.0 systems.
+ */
+#define REG_GPR		0xb4
+#define	  GPR_C1EN	  0x00000020	/* Pad 1 (power down) enable */
+#define	  GPR_C0EN	  0x00000010	/* Pad 0 (reset) enable */
+#define	  GPR_C1	  0x00000002	/* Control 1 value */
+/*
+ * Control 0 is wired to reset on OLPC machines.  For ov7x sensors,
+ * it is active low.
+ */
+#define	  GPR_C0	  0x00000001	/* Control 0 value */
+
+/*
+ * These registers control the SMBUS module for communicating
+ * with the sensor.
+ */
+#define REG_TWSIC0	0xb8	/* TWSI (smbus) control 0 */
+#define	  TWSIC0_EN	  0x00000001	/* TWSI enable */
+#define	  TWSIC0_MODE	  0x00000002	/* 1 = 16-bit, 0 = 8-bit */
+#define	  TWSIC0_SID	  0x000003fc	/* Slave ID */
+/*
+ * Subtle trickery: the slave ID field starts with bit 2.  But the
+ * Linux i2c stack wants to treat the bottommost bit as a separate
+ * read/write bit, which is why slave ID's are usually presented
+ * >>1.  For consistency with that behavior, we shift over three
+ * bits instead of two.
+ */
+#define	  TWSIC0_SID_SHIFT 3
+#define	  TWSIC0_CLKDIV	  0x0007fc00	/* Clock divider */
+#define	  TWSIC0_MASKACK  0x00400000	/* Mask ack from sensor */
+#define	  TWSIC0_OVMAGIC  0x00800000	/* Make it work on OV sensors */
+
+#define REG_TWSIC1	0xbc	/* TWSI control 1 */
+#define	  TWSIC1_DATA	  0x0000ffff	/* Data to/from camchip */
+#define	  TWSIC1_ADDR	  0x00ff0000	/* Address (register) */
+#define	  TWSIC1_ADDR_SHIFT 16
+#define	  TWSIC1_READ	  0x01000000	/* Set for read op */
+#define	  TWSIC1_WSTAT	  0x02000000	/* Write status */
+#define	  TWSIC1_RVALID	  0x04000000	/* Read data valid */
+#define	  TWSIC1_ERROR	  0x08000000	/* Something screwed up */
+
+/*
+ * Here's the weird global control registers
+ */
+#define REG_GL_CSR     0x3004  /* Control/status register */
+#define	  GCSR_SRS	 0x00000001	/* SW Reset set */
+#define	  GCSR_SRC	 0x00000002	/* SW Reset clear */
+#define	  GCSR_MRS	 0x00000004	/* Master reset set */
+#define	  GCSR_MRC	 0x00000008	/* HW Reset clear */
+#define	  GCSR_CCIC_EN	 0x00004000    /* CCIC Clock enable */
+#define REG_GL_IMASK   0x300c  /* Interrupt mask register */
+#define	  GIMSK_CCIC_EN		 0x00000004    /* CCIC Interrupt enable */
+
+#define REG_GL_FCR	0x3038	/* GPIO functional control register */
+#define	  GFCR_GPIO_ON	  0x08		/* Camera GPIO enabled */
+#define REG_GL_GPIOR	0x315c	/* GPIO register */
+#define	  GGPIO_OUT		0x80000	/* GPIO output */
+#define	  GGPIO_VAL		0x00008	/* Output pin value */
+
+#define REG_LEN		       (REG_GL_IMASK + 4)
+
+
+/*
+ * Debugging and related.
+ */
+#define cam_err(cam, fmt, arg...) \
+	dev_err(&(cam)->pdev->dev, fmt, ##arg);
+#define cam_warn(cam, fmt, arg...) \
+	dev_warn(&(cam)->pdev->dev, fmt, ##arg);
+
+/* -------------------------------------------------------------------- */
+/*
+ * The I2C/SMBUS interface to the camera itself starts here.  The
+ * controller handles SMBUS itself, presenting a relatively simple register
+ * interface; all we have to do is to tell it where to route the data.
+ */
+#define CAFE_SMBUS_TIMEOUT (HZ)  /* generous */
+
+static inline struct cafe_camera *to_cam(struct v4l2_device *dev)
+{
+	struct mcam_camera *m = container_of(dev, struct mcam_camera, v4l2_dev);
+	return container_of(m, struct cafe_camera, mcam);
+}
+
+
+static int cafe_smbus_write_done(struct mcam_camera *mcam)
+{
+	unsigned long flags;
+	int c1;
+
+	/*
+	 * We must delay after the interrupt, or the controller gets confused
+	 * and never does give us good status.  Fortunately, we don't do this
+	 * often.
+	 */
+	udelay(20);
+	spin_lock_irqsave(&mcam->dev_lock, flags);
+	c1 = mcam_reg_read(mcam, REG_TWSIC1);
+	spin_unlock_irqrestore(&mcam->dev_lock, flags);
+	return (c1 & (TWSIC1_WSTAT|TWSIC1_ERROR)) != TWSIC1_WSTAT;
+}
+
+static int cafe_smbus_write_data(struct cafe_camera *cam,
+		u16 addr, u8 command, u8 value)
+{
+	unsigned int rval;
+	unsigned long flags;
+	struct mcam_camera *mcam = &cam->mcam;
+
+	spin_lock_irqsave(&mcam->dev_lock, flags);
+	rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
+	rval |= TWSIC0_OVMAGIC;  /* Make OV sensors work */
+	/*
+	 * Marvell sez set clkdiv to all 1's for now.
+	 */
+	rval |= TWSIC0_CLKDIV;
+	mcam_reg_write(mcam, REG_TWSIC0, rval);
+	(void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
+	rval = value | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
+	mcam_reg_write(mcam, REG_TWSIC1, rval);
+	spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+	/* Unfortunately, reading TWSIC1 too soon after sending a command
+	 * causes the device to die.
+	 * Use a busy-wait because we often send a large quantity of small
+	 * commands at-once; using msleep() would cause a lot of context
+	 * switches which take longer than 2ms, resulting in a noticeable
+	 * boot-time and capture-start delays.
+	 */
+	mdelay(2);
+
+	/*
+	 * Another sad fact is that sometimes, commands silently complete but
+	 * cafe_smbus_write_done() never becomes aware of this.
+	 * This happens at random and appears to possible occur with any
+	 * command.
+	 * We don't understand why this is. We work around this issue
+	 * with the timeout in the wait below, assuming that all commands
+	 * complete within the timeout.
+	 */
+	wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(mcam),
+			CAFE_SMBUS_TIMEOUT);
+
+	spin_lock_irqsave(&mcam->dev_lock, flags);
+	rval = mcam_reg_read(mcam, REG_TWSIC1);
+	spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+	if (rval & TWSIC1_WSTAT) {
+		cam_err(cam, "SMBUS write (%02x/%02x/%02x) timed out\n", addr,
+				command, value);
+		return -EIO;
+	}
+	if (rval & TWSIC1_ERROR) {
+		cam_err(cam, "SMBUS write (%02x/%02x/%02x) error\n", addr,
+				command, value);
+		return -EIO;
+	}
+	return 0;
+}
+
+
+
+static int cafe_smbus_read_done(struct mcam_camera *mcam)
+{
+	unsigned long flags;
+	int c1;
+
+	/*
+	 * We must delay after the interrupt, or the controller gets confused
+	 * and never does give us good status.  Fortunately, we don't do this
+	 * often.
+	 */
+	udelay(20);
+	spin_lock_irqsave(&mcam->dev_lock, flags);
+	c1 = mcam_reg_read(mcam, REG_TWSIC1);
+	spin_unlock_irqrestore(&mcam->dev_lock, flags);
+	return c1 & (TWSIC1_RVALID|TWSIC1_ERROR);
+}
+
+
+
+static int cafe_smbus_read_data(struct cafe_camera *cam,
+		u16 addr, u8 command, u8 *value)
+{
+	unsigned int rval;
+	unsigned long flags;
+	struct mcam_camera *mcam = &cam->mcam;
+
+	spin_lock_irqsave(&mcam->dev_lock, flags);
+	rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
+	rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
+	/*
+	 * Marvel sez set clkdiv to all 1's for now.
+	 */
+	rval |= TWSIC0_CLKDIV;
+	mcam_reg_write(mcam, REG_TWSIC0, rval);
+	(void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
+	rval = TWSIC1_READ | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
+	mcam_reg_write(mcam, REG_TWSIC1, rval);
+	spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+	wait_event_timeout(cam->smbus_wait,
+			cafe_smbus_read_done(mcam), CAFE_SMBUS_TIMEOUT);
+	spin_lock_irqsave(&mcam->dev_lock, flags);
+	rval = mcam_reg_read(mcam, REG_TWSIC1);
+	spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+	if (rval & TWSIC1_ERROR) {
+		cam_err(cam, "SMBUS read (%02x/%02x) error\n", addr, command);
+		return -EIO;
+	}
+	if (!(rval & TWSIC1_RVALID)) {
+		cam_err(cam, "SMBUS read (%02x/%02x) timed out\n", addr,
+				command);
+		return -EIO;
+	}
+	*value = rval & 0xff;
+	return 0;
+}
+
+/*
+ * Perform a transfer over SMBUS.  This thing is called under
+ * the i2c bus lock, so we shouldn't race with ourselves...
+ */
+static int cafe_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+		unsigned short flags, char rw, u8 command,
+		int size, union i2c_smbus_data *data)
+{
+	struct cafe_camera *cam = i2c_get_adapdata(adapter);
+	int ret = -EINVAL;
+
+	/*
+	 * This interface would appear to only do byte data ops.  OK
+	 * it can do word too, but the cam chip has no use for that.
+	 */
+	if (size != I2C_SMBUS_BYTE_DATA) {
+		cam_err(cam, "funky xfer size %d\n", size);
+		return -EINVAL;
+	}
+
+	if (rw == I2C_SMBUS_WRITE)
+		ret = cafe_smbus_write_data(cam, addr, command, data->byte);
+	else if (rw == I2C_SMBUS_READ)
+		ret = cafe_smbus_read_data(cam, addr, command, &data->byte);
+	return ret;
+}
+
+
+static void cafe_smbus_enable_irq(struct cafe_camera *cam)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cam->mcam.dev_lock, flags);
+	mcam_reg_set_bit(&cam->mcam, REG_IRQMASK, TWSIIRQS);
+	spin_unlock_irqrestore(&cam->mcam.dev_lock, flags);
+}
+
+static u32 cafe_smbus_func(struct i2c_adapter *adapter)
+{
+	return I2C_FUNC_SMBUS_READ_BYTE_DATA  |
+	       I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
+}
+
+static struct i2c_algorithm cafe_smbus_algo = {
+	.smbus_xfer = cafe_smbus_xfer,
+	.functionality = cafe_smbus_func
+};
+
+static int cafe_smbus_setup(struct cafe_camera *cam)
+{
+	struct i2c_adapter *adap;
+	int ret;
+
+	adap = kzalloc(sizeof(*adap), GFP_KERNEL);
+	if (adap == NULL)
+		return -ENOMEM;
+	cam->mcam.i2c_adapter = adap;
+	cafe_smbus_enable_irq(cam);
+	adap->owner = THIS_MODULE;
+	adap->algo = &cafe_smbus_algo;
+	strcpy(adap->name, "cafe_ccic");
+	adap->dev.parent = &cam->pdev->dev;
+	i2c_set_adapdata(adap, cam);
+	ret = i2c_add_adapter(adap);
+	if (ret)
+		printk(KERN_ERR "Unable to register cafe i2c adapter\n");
+	return ret;
+}
+
+static void cafe_smbus_shutdown(struct cafe_camera *cam)
+{
+	i2c_del_adapter(cam->mcam.i2c_adapter);
+	kfree(cam->mcam.i2c_adapter);
+}
+
+
+/*
+ * Controller-level stuff
+ */
+
+static void cafe_ctlr_init(struct mcam_camera *mcam)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&mcam->dev_lock, flags);
+	/*
+	 * Added magic to bring up the hardware on the B-Test board
+	 */
+	mcam_reg_write(mcam, 0x3038, 0x8);
+	mcam_reg_write(mcam, 0x315c, 0x80008);
+	/*
+	 * Go through the dance needed to wake the device up.
+	 * Note that these registers are global and shared
+	 * with the NAND and SD devices.  Interaction between the
+	 * three still needs to be examined.
+	 */
+	mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */
+	mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRC);
+	mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRS);
+	/*
+	 * Here we must wait a bit for the controller to come around.
+	 */
+	spin_unlock_irqrestore(&mcam->dev_lock, flags);
+	msleep(5);
+	spin_lock_irqsave(&mcam->dev_lock, flags);
+
+	mcam_reg_write(mcam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC);
+	mcam_reg_set_bit(mcam, REG_GL_IMASK, GIMSK_CCIC_EN);
+	/*
+	 * Mask all interrupts.
+	 */
+	mcam_reg_write(mcam, REG_IRQMASK, 0);
+	spin_unlock_irqrestore(&mcam->dev_lock, flags);
+}
+
+
+static void cafe_ctlr_power_up(struct mcam_camera *mcam)
+{
+	/*
+	 * Part one of the sensor dance: turn the global
+	 * GPIO signal on.
+	 */
+	mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
+	mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT|GGPIO_VAL);
+	/*
+	 * Put the sensor into operational mode (assumes OLPC-style
+	 * wiring).  Control 0 is reset - set to 1 to operate.
+	 * Control 1 is power down, set to 0 to operate.
+	 */
+	mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
+	mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
+}
+
+static void cafe_ctlr_power_down(struct mcam_camera *mcam)
+{
+	mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C1);
+	mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
+	mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT);
+}
+
+
+
+/*
+ * The platform interrupt handler.
+ */
+static irqreturn_t cafe_irq(int irq, void *data)
+{
+	struct cafe_camera *cam = data;
+	struct mcam_camera *mcam = &cam->mcam;
+	unsigned int irqs, handled;
+
+	spin_lock(&mcam->dev_lock);
+	irqs = mcam_reg_read(mcam, REG_IRQSTAT);
+	handled = cam->registered && mccic_irq(mcam, irqs);
+	if (irqs & TWSIIRQS) {
+		mcam_reg_write(mcam, REG_IRQSTAT, TWSIIRQS);
+		wake_up(&cam->smbus_wait);
+		handled = 1;
+	}
+	spin_unlock(&mcam->dev_lock);
+	return IRQ_RETVAL(handled);
+}
+
+
+/* -------------------------------------------------------------------------- */
+/*
+ * PCI interface stuff.
+ */
+
+static int cafe_pci_probe(struct pci_dev *pdev,
+		const struct pci_device_id *id)
+{
+	int ret;
+	struct cafe_camera *cam;
+	struct mcam_camera *mcam;
+
+	/*
+	 * Start putting together one of our big camera structures.
+	 */
+	ret = -ENOMEM;
+	cam = kzalloc(sizeof(struct cafe_camera), GFP_KERNEL);
+	if (cam == NULL)
+		goto out;
+	cam->pdev = pdev;
+	mcam = &cam->mcam;
+	mcam->chip_id = V4L2_IDENT_CAFE;
+	spin_lock_init(&mcam->dev_lock);
+	init_waitqueue_head(&cam->smbus_wait);
+	mcam->plat_power_up = cafe_ctlr_power_up;
+	mcam->plat_power_down = cafe_ctlr_power_down;
+	mcam->dev = &pdev->dev;
+	/*
+	 * Set the clock speed for the XO 1; I don't believe this
+	 * driver has ever run anywhere else.
+	 */
+	mcam->clock_speed = 45;
+	mcam->use_smbus = 1;
+	/*
+	 * Vmalloc mode for buffers is traditional with this driver.
+	 * We *might* be able to run DMA_contig, especially on a system
+	 * with CMA in it.
+	 */
+	mcam->buffer_mode = B_vmalloc;
+	/*
+	 * Get set up on the PCI bus.
+	 */
+	ret = pci_enable_device(pdev);
+	if (ret)
+		goto out_free;
+	pci_set_master(pdev);
+
+	ret = -EIO;
+	mcam->regs = pci_iomap(pdev, 0, 0);
+	if (!mcam->regs) {
+		printk(KERN_ERR "Unable to ioremap cafe-ccic regs\n");
+		goto out_disable;
+	}
+	ret = request_irq(pdev->irq, cafe_irq, IRQF_SHARED, "cafe-ccic", cam);
+	if (ret)
+		goto out_iounmap;
+
+	/*
+	 * Initialize the controller and leave it powered up.  It will
+	 * stay that way until the sensor driver shows up.
+	 */
+	cafe_ctlr_init(mcam);
+	cafe_ctlr_power_up(mcam);
+	/*
+	 * Set up I2C/SMBUS communications.  We have to drop the mutex here
+	 * because the sensor could attach in this call chain, leading to
+	 * unsightly deadlocks.
+	 */
+	ret = cafe_smbus_setup(cam);
+	if (ret)
+		goto out_pdown;
+
+	ret = mccic_register(mcam);
+	if (ret == 0) {
+		cam->registered = 1;
+		return 0;
+	}
+
+	cafe_smbus_shutdown(cam);
+out_pdown:
+	cafe_ctlr_power_down(mcam);
+	free_irq(pdev->irq, cam);
+out_iounmap:
+	pci_iounmap(pdev, mcam->regs);
+out_disable:
+	pci_disable_device(pdev);
+out_free:
+	kfree(cam);
+out:
+	return ret;
+}
+
+
+/*
+ * Shut down an initialized device
+ */
+static void cafe_shutdown(struct cafe_camera *cam)
+{
+	mccic_shutdown(&cam->mcam);
+	cafe_smbus_shutdown(cam);
+	free_irq(cam->pdev->irq, cam);
+	pci_iounmap(cam->pdev, cam->mcam.regs);
+}
+
+
+static void cafe_pci_remove(struct pci_dev *pdev)
+{
+	struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+	struct cafe_camera *cam = to_cam(v4l2_dev);
+
+	if (cam == NULL) {
+		printk(KERN_WARNING "pci_remove on unknown pdev %p\n", pdev);
+		return;
+	}
+	cafe_shutdown(cam);
+	kfree(cam);
+}
+
+
+#ifdef CONFIG_PM
+/*
+ * Basic power management.
+ */
+static int cafe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+	struct cafe_camera *cam = to_cam(v4l2_dev);
+	int ret;
+
+	ret = pci_save_state(pdev);
+	if (ret)
+		return ret;
+	mccic_suspend(&cam->mcam);
+	pci_disable_device(pdev);
+	return 0;
+}
+
+
+static int cafe_pci_resume(struct pci_dev *pdev)
+{
+	struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+	struct cafe_camera *cam = to_cam(v4l2_dev);
+	int ret = 0;
+
+	pci_restore_state(pdev);
+	ret = pci_enable_device(pdev);
+
+	if (ret) {
+		cam_warn(cam, "Unable to re-enable device on resume!\n");
+		return ret;
+	}
+	cafe_ctlr_init(&cam->mcam);
+	return mccic_resume(&cam->mcam);
+}
+
+#endif  /* CONFIG_PM */
+
+static struct pci_device_id cafe_ids[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL,
+		     PCI_DEVICE_ID_MARVELL_88ALP01_CCIC) },
+	{ 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, cafe_ids);
+
+static struct pci_driver cafe_pci_driver = {
+	.name = "cafe1000-ccic",
+	.id_table = cafe_ids,
+	.probe = cafe_pci_probe,
+	.remove = cafe_pci_remove,
+#ifdef CONFIG_PM
+	.suspend = cafe_pci_suspend,
+	.resume = cafe_pci_resume,
+#endif
+};
+
+
+
+
+static int __init cafe_init(void)
+{
+	int ret;
+
+	printk(KERN_NOTICE "Marvell M88ALP01 'CAFE' Camera Controller version %d\n",
+			CAFE_VERSION);
+	ret = pci_register_driver(&cafe_pci_driver);
+	if (ret) {
+		printk(KERN_ERR "Unable to register cafe_ccic driver\n");
+		goto out;
+	}
+	ret = 0;
+
+out:
+	return ret;
+}
+
+
+static void __exit cafe_exit(void)
+{
+	pci_unregister_driver(&cafe_pci_driver);
+}
+
+module_init(cafe_init);
+module_exit(cafe_exit);
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
new file mode 100644
index 0000000..ce2b7b4
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -0,0 +1,1878 @@
+/*
+ * The Marvell camera core.  This device appears in a number of settings,
+ * so it needs platform-specific support outside of the core.
+ *
+ * Copyright 2011 Jonathan Corbet corbet@lwn.net
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/ov7670.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "mcam-core.h"
+
+/*
+ * Basic frame stats - to be deleted shortly
+ */
+static int frames;
+static int singles;
+static int delivered;
+
+#ifdef MCAM_MODE_VMALLOC
+/*
+ * Internal DMA buffer management.  Since the controller cannot do S/G I/O,
+ * we must have physically contiguous buffers to bring frames into.
+ * These parameters control how many buffers we use, whether we
+ * allocate them at load time (better chance of success, but nails down
+ * memory) or when somebody tries to use the camera (riskier), and,
+ * for load-time allocation, how big they should be.
+ *
+ * The controller can cycle through three buffers.  We could use
+ * more by flipping pointers around, but it probably makes little
+ * sense.
+ */
+
+static bool alloc_bufs_at_read;
+module_param(alloc_bufs_at_read, bool, 0444);
+MODULE_PARM_DESC(alloc_bufs_at_read,
+		"Non-zero value causes DMA buffers to be allocated when the "
+		"video capture device is read, rather than at module load "
+		"time.  This saves memory, but decreases the chances of "
+		"successfully getting those buffers.  This parameter is "
+		"only used in the vmalloc buffer mode");
+
+static int n_dma_bufs = 3;
+module_param(n_dma_bufs, uint, 0644);
+MODULE_PARM_DESC(n_dma_bufs,
+		"The number of DMA buffers to allocate.  Can be either two "
+		"(saves memory, makes timing tighter) or three.");
+
+static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2;  /* Worst case */
+module_param(dma_buf_size, uint, 0444);
+MODULE_PARM_DESC(dma_buf_size,
+		"The size of the allocated DMA buffers.  If actual operating "
+		"parameters require larger buffers, an attempt to reallocate "
+		"will be made.");
+#else /* MCAM_MODE_VMALLOC */
+static const bool alloc_bufs_at_read = 0;
+static const int n_dma_bufs = 3;  /* Used by S/G_PARM */
+#endif /* MCAM_MODE_VMALLOC */
+
+static bool flip;
+module_param(flip, bool, 0444);
+MODULE_PARM_DESC(flip,
+		"If set, the sensor will be instructed to flip the image "
+		"vertically.");
+
+static int buffer_mode = -1;
+module_param(buffer_mode, int, 0444);
+MODULE_PARM_DESC(buffer_mode,
+		"Set the buffer mode to be used; default is to go with what "
+		"the platform driver asks for.  Set to 0 for vmalloc, 1 for "
+		"DMA contiguous.");
+
+/*
+ * Status flags.  Always manipulated with bit operations.
+ */
+#define CF_BUF0_VALID	 0	/* Buffers valid - first three */
+#define CF_BUF1_VALID	 1
+#define CF_BUF2_VALID	 2
+#define CF_DMA_ACTIVE	 3	/* A frame is incoming */
+#define CF_CONFIG_NEEDED 4	/* Must configure hardware */
+#define CF_SINGLE_BUFFER 5	/* Running with a single buffer */
+#define CF_SG_RESTART	 6	/* SG restart needed */
+
+#define sensor_call(cam, o, f, args...) \
+	v4l2_subdev_call(cam->sensor, o, f, ##args)
+
+static struct mcam_format_struct {
+	__u8 *desc;
+	__u32 pixelformat;
+	int bpp;   /* Bytes per pixel */
+	enum v4l2_mbus_pixelcode mbus_code;
+} mcam_formats[] = {
+	{
+		.desc		= "YUYV 4:2:2",
+		.pixelformat	= V4L2_PIX_FMT_YUYV,
+		.mbus_code	= V4L2_MBUS_FMT_YUYV8_2X8,
+		.bpp		= 2,
+	},
+	{
+		.desc		= "RGB 444",
+		.pixelformat	= V4L2_PIX_FMT_RGB444,
+		.mbus_code	= V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
+		.bpp		= 2,
+	},
+	{
+		.desc		= "RGB 565",
+		.pixelformat	= V4L2_PIX_FMT_RGB565,
+		.mbus_code	= V4L2_MBUS_FMT_RGB565_2X8_LE,
+		.bpp		= 2,
+	},
+	{
+		.desc		= "Raw RGB Bayer",
+		.pixelformat	= V4L2_PIX_FMT_SBGGR8,
+		.mbus_code	= V4L2_MBUS_FMT_SBGGR8_1X8,
+		.bpp		= 1
+	},
+};
+#define N_MCAM_FMTS ARRAY_SIZE(mcam_formats)
+
+static struct mcam_format_struct *mcam_find_format(u32 pixelformat)
+{
+	unsigned i;
+
+	for (i = 0; i < N_MCAM_FMTS; i++)
+		if (mcam_formats[i].pixelformat == pixelformat)
+			return mcam_formats + i;
+	/* Not found? Then return the first format. */
+	return mcam_formats;
+}
+
+/*
+ * The default format we use until somebody says otherwise.
+ */
+static const struct v4l2_pix_format mcam_def_pix_format = {
+	.width		= VGA_WIDTH,
+	.height		= VGA_HEIGHT,
+	.pixelformat	= V4L2_PIX_FMT_YUYV,
+	.field		= V4L2_FIELD_NONE,
+	.bytesperline	= VGA_WIDTH*2,
+	.sizeimage	= VGA_WIDTH*VGA_HEIGHT*2,
+};
+
+static const enum v4l2_mbus_pixelcode mcam_def_mbus_code =
+					V4L2_MBUS_FMT_YUYV8_2X8;
+
+
+/*
+ * The two-word DMA descriptor format used by the Armada 610 and like.  There
+ * Is a three-word format as well (set C1_DESC_3WORD) where the third
+ * word is a pointer to the next descriptor, but we don't use it.  Two-word
+ * descriptors have to be contiguous in memory.
+ */
+struct mcam_dma_desc {
+	u32 dma_addr;
+	u32 segment_len;
+};
+
+/*
+ * Our buffer type for working with videobuf2.  Note that the vb2
+ * developers have decreed that struct vb2_buffer must be at the
+ * beginning of this structure.
+ */
+struct mcam_vb_buffer {
+	struct vb2_buffer vb_buf;
+	struct list_head queue;
+	struct mcam_dma_desc *dma_desc;	/* Descriptor virtual address */
+	dma_addr_t dma_desc_pa;		/* Descriptor physical address */
+	int dma_desc_nent;		/* Number of mapped descriptors */
+};
+
+static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_buffer *vb)
+{
+	return container_of(vb, struct mcam_vb_buffer, vb_buf);
+}
+
+/*
+ * Hand a completed buffer back to user space.
+ */
+static void mcam_buffer_done(struct mcam_camera *cam, int frame,
+		struct vb2_buffer *vbuf)
+{
+	vbuf->v4l2_buf.bytesused = cam->pix_format.sizeimage;
+	vbuf->v4l2_buf.sequence = cam->buf_seq[frame];
+	vb2_set_plane_payload(vbuf, 0, cam->pix_format.sizeimage);
+	vb2_buffer_done(vbuf, VB2_BUF_STATE_DONE);
+}
+
+
+
+/*
+ * Debugging and related.
+ */
+#define cam_err(cam, fmt, arg...) \
+	dev_err((cam)->dev, fmt, ##arg);
+#define cam_warn(cam, fmt, arg...) \
+	dev_warn((cam)->dev, fmt, ##arg);
+#define cam_dbg(cam, fmt, arg...) \
+	dev_dbg((cam)->dev, fmt, ##arg);
+
+
+/*
+ * Flag manipulation helpers
+ */
+static void mcam_reset_buffers(struct mcam_camera *cam)
+{
+	int i;
+
+	cam->next_buf = -1;
+	for (i = 0; i < cam->nbufs; i++)
+		clear_bit(i, &cam->flags);
+}
+
+static inline int mcam_needs_config(struct mcam_camera *cam)
+{
+	return test_bit(CF_CONFIG_NEEDED, &cam->flags);
+}
+
+static void mcam_set_config_needed(struct mcam_camera *cam, int needed)
+{
+	if (needed)
+		set_bit(CF_CONFIG_NEEDED, &cam->flags);
+	else
+		clear_bit(CF_CONFIG_NEEDED, &cam->flags);
+}
+
+/* ------------------------------------------------------------------- */
+/*
+ * Make the controller start grabbing images.  Everything must
+ * be set up before doing this.
+ */
+static void mcam_ctlr_start(struct mcam_camera *cam)
+{
+	/* set_bit performs a read, so no other barrier should be
+	   needed here */
+	mcam_reg_set_bit(cam, REG_CTRL0, C0_ENABLE);
+}
+
+static void mcam_ctlr_stop(struct mcam_camera *cam)
+{
+	mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
+}
+
+/* ------------------------------------------------------------------- */
+
+#ifdef MCAM_MODE_VMALLOC
+/*
+ * Code specific to the vmalloc buffer mode.
+ */
+
+/*
+ * Allocate in-kernel DMA buffers for vmalloc mode.
+ */
+static int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
+{
+	int i;
+
+	mcam_set_config_needed(cam, 1);
+	if (loadtime)
+		cam->dma_buf_size = dma_buf_size;
+	else
+		cam->dma_buf_size = cam->pix_format.sizeimage;
+	if (n_dma_bufs > 3)
+		n_dma_bufs = 3;
+
+	cam->nbufs = 0;
+	for (i = 0; i < n_dma_bufs; i++) {
+		cam->dma_bufs[i] = dma_alloc_coherent(cam->dev,
+				cam->dma_buf_size, cam->dma_handles + i,
+				GFP_KERNEL);
+		if (cam->dma_bufs[i] == NULL) {
+			cam_warn(cam, "Failed to allocate DMA buffer\n");
+			break;
+		}
+		(cam->nbufs)++;
+	}
+
+	switch (cam->nbufs) {
+	case 1:
+		dma_free_coherent(cam->dev, cam->dma_buf_size,
+				cam->dma_bufs[0], cam->dma_handles[0]);
+		cam->nbufs = 0;
+	case 0:
+		cam_err(cam, "Insufficient DMA buffers, cannot operate\n");
+		return -ENOMEM;
+
+	case 2:
+		if (n_dma_bufs > 2)
+			cam_warn(cam, "Will limp along with only 2 buffers\n");
+		break;
+	}
+	return 0;
+}
+
+static void mcam_free_dma_bufs(struct mcam_camera *cam)
+{
+	int i;
+
+	for (i = 0; i < cam->nbufs; i++) {
+		dma_free_coherent(cam->dev, cam->dma_buf_size,
+				cam->dma_bufs[i], cam->dma_handles[i]);
+		cam->dma_bufs[i] = NULL;
+	}
+	cam->nbufs = 0;
+}
+
+
+/*
+ * Set up DMA buffers when operating in vmalloc mode
+ */
+static void mcam_ctlr_dma_vmalloc(struct mcam_camera *cam)
+{
+	/*
+	 * Store the first two Y buffers (we aren't supporting
+	 * planar formats for now, so no UV bufs).  Then either
+	 * set the third if it exists, or tell the controller
+	 * to just use two.
+	 */
+	mcam_reg_write(cam, REG_Y0BAR, cam->dma_handles[0]);
+	mcam_reg_write(cam, REG_Y1BAR, cam->dma_handles[1]);
+	if (cam->nbufs > 2) {
+		mcam_reg_write(cam, REG_Y2BAR, cam->dma_handles[2]);
+		mcam_reg_clear_bit(cam, REG_CTRL1, C1_TWOBUFS);
+	} else
+		mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
+	if (cam->chip_id == V4L2_IDENT_CAFE)
+		mcam_reg_write(cam, REG_UBAR, 0); /* 32 bits only */
+}
+
+/*
+ * Copy data out to user space in the vmalloc case
+ */
+static void mcam_frame_tasklet(unsigned long data)
+{
+	struct mcam_camera *cam = (struct mcam_camera *) data;
+	int i;
+	unsigned long flags;
+	struct mcam_vb_buffer *buf;
+
+	spin_lock_irqsave(&cam->dev_lock, flags);
+	for (i = 0; i < cam->nbufs; i++) {
+		int bufno = cam->next_buf;
+
+		if (cam->state != S_STREAMING || bufno < 0)
+			break;  /* I/O got stopped */
+		if (++(cam->next_buf) >= cam->nbufs)
+			cam->next_buf = 0;
+		if (!test_bit(bufno, &cam->flags))
+			continue;
+		if (list_empty(&cam->buffers)) {
+			singles++;
+			break;  /* Leave it valid, hope for better later */
+		}
+		delivered++;
+		clear_bit(bufno, &cam->flags);
+		buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
+				queue);
+		list_del_init(&buf->queue);
+		/*
+		 * Drop the lock during the big copy.  This *should* be safe...
+		 */
+		spin_unlock_irqrestore(&cam->dev_lock, flags);
+		memcpy(vb2_plane_vaddr(&buf->vb_buf, 0), cam->dma_bufs[bufno],
+				cam->pix_format.sizeimage);
+		mcam_buffer_done(cam, bufno, &buf->vb_buf);
+		spin_lock_irqsave(&cam->dev_lock, flags);
+	}
+	spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+
+/*
+ * Make sure our allocated buffers are up to the task.
+ */
+static int mcam_check_dma_buffers(struct mcam_camera *cam)
+{
+	if (cam->nbufs > 0 && cam->dma_buf_size < cam->pix_format.sizeimage)
+			mcam_free_dma_bufs(cam);
+	if (cam->nbufs == 0)
+		return mcam_alloc_dma_bufs(cam, 0);
+	return 0;
+}
+
+static void mcam_vmalloc_done(struct mcam_camera *cam, int frame)
+{
+	tasklet_schedule(&cam->s_tasklet);
+}
+
+#else /* MCAM_MODE_VMALLOC */
+
+static inline int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
+{
+	return 0;
+}
+
+static inline void mcam_free_dma_bufs(struct mcam_camera *cam)
+{
+	return;
+}
+
+static inline int mcam_check_dma_buffers(struct mcam_camera *cam)
+{
+	return 0;
+}
+
+
+
+#endif /* MCAM_MODE_VMALLOC */
+
+
+#ifdef MCAM_MODE_DMA_CONTIG
+/* ---------------------------------------------------------------------- */
+/*
+ * DMA-contiguous code.
+ */
+/*
+ * Set up a contiguous buffer for the given frame.  Here also is where
+ * the underrun strategy is set: if there is no buffer available, reuse
+ * the buffer from the other BAR and set the CF_SINGLE_BUFFER flag to
+ * keep the interrupt handler from giving that buffer back to user
+ * space.  In this way, we always have a buffer to DMA to and don't
+ * have to try to play games stopping and restarting the controller.
+ */
+static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
+{
+	struct mcam_vb_buffer *buf;
+	/*
+	 * If there are no available buffers, go into single mode
+	 */
+	if (list_empty(&cam->buffers)) {
+		buf = cam->vb_bufs[frame ^ 0x1];
+		cam->vb_bufs[frame] = buf;
+		mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
+				vb2_dma_contig_plane_dma_addr(&buf->vb_buf, 0));
+		set_bit(CF_SINGLE_BUFFER, &cam->flags);
+		singles++;
+		return;
+	}
+	/*
+	 * OK, we have a buffer we can use.
+	 */
+	buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
+	list_del_init(&buf->queue);
+	mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
+			vb2_dma_contig_plane_dma_addr(&buf->vb_buf, 0));
+	cam->vb_bufs[frame] = buf;
+	clear_bit(CF_SINGLE_BUFFER, &cam->flags);
+}
+
+/*
+ * Initial B_DMA_contig setup.
+ */
+static void mcam_ctlr_dma_contig(struct mcam_camera *cam)
+{
+	mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
+	cam->nbufs = 2;
+	mcam_set_contig_buffer(cam, 0);
+	mcam_set_contig_buffer(cam, 1);
+}
+
+/*
+ * Frame completion handling.
+ */
+static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
+{
+	struct mcam_vb_buffer *buf = cam->vb_bufs[frame];
+
+	if (!test_bit(CF_SINGLE_BUFFER, &cam->flags)) {
+		delivered++;
+		mcam_buffer_done(cam, frame, &buf->vb_buf);
+	}
+	mcam_set_contig_buffer(cam, frame);
+}
+
+#endif /* MCAM_MODE_DMA_CONTIG */
+
+#ifdef MCAM_MODE_DMA_SG
+/* ---------------------------------------------------------------------- */
+/*
+ * Scatter/gather-specific code.
+ */
+
+/*
+ * Set up the next buffer for S/G I/O; caller should be sure that
+ * the controller is stopped and a buffer is available.
+ */
+static void mcam_sg_next_buffer(struct mcam_camera *cam)
+{
+	struct mcam_vb_buffer *buf;
+
+	buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
+	list_del_init(&buf->queue);
+	/*
+	 * Very Bad Not Good Things happen if you don't clear
+	 * C1_DESC_ENA before making any descriptor changes.
+	 */
+	mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
+	mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
+	mcam_reg_write(cam, REG_DESC_LEN_Y,
+			buf->dma_desc_nent*sizeof(struct mcam_dma_desc));
+	mcam_reg_write(cam, REG_DESC_LEN_U, 0);
+	mcam_reg_write(cam, REG_DESC_LEN_V, 0);
+	mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
+	cam->vb_bufs[0] = buf;
+}
+
+/*
+ * Initial B_DMA_sg setup
+ */
+static void mcam_ctlr_dma_sg(struct mcam_camera *cam)
+{
+	/*
+	 * The list-empty condition can hit us at resume time
+	 * if the buffer list was empty when the system was suspended.
+	 */
+	if (list_empty(&cam->buffers)) {
+		set_bit(CF_SG_RESTART, &cam->flags);
+		return;
+	}
+
+	mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_3WORD);
+	mcam_sg_next_buffer(cam);
+	cam->nbufs = 3;
+}
+
+
+/*
+ * Frame completion with S/G is trickier.  We can't muck with
+ * a descriptor chain on the fly, since the controller buffers it
+ * internally.  So we have to actually stop and restart; Marvell
+ * says this is the way to do it.
+ *
+ * Of course, stopping is easier said than done; experience shows
+ * that the controller can start a frame *after* C0_ENABLE has been
+ * cleared.  So when running in S/G mode, the controller is "stopped"
+ * on receipt of the start-of-frame interrupt.  That means we can
+ * safely change the DMA descriptor array here and restart things
+ * (assuming there's another buffer waiting to go).
+ */
+static void mcam_dma_sg_done(struct mcam_camera *cam, int frame)
+{
+	struct mcam_vb_buffer *buf = cam->vb_bufs[0];
+
+	/*
+	 * If we're no longer supposed to be streaming, don't do anything.
+	 */
+	if (cam->state != S_STREAMING)
+		return;
+	/*
+	 * If we have another buffer available, put it in and
+	 * restart the engine.
+	 */
+	if (!list_empty(&cam->buffers)) {
+		mcam_sg_next_buffer(cam);
+		mcam_ctlr_start(cam);
+	/*
+	 * Otherwise set CF_SG_RESTART and the controller will
+	 * be restarted once another buffer shows up.
+	 */
+	} else {
+		set_bit(CF_SG_RESTART, &cam->flags);
+		singles++;
+		cam->vb_bufs[0] = NULL;
+	}
+	/*
+	 * Now we can give the completed frame back to user space.
+	 */
+	delivered++;
+	mcam_buffer_done(cam, frame, &buf->vb_buf);
+}
+
+
+/*
+ * Scatter/gather mode requires stopping the controller between
+ * frames so we can put in a new DMA descriptor array.  If no new
+ * buffer exists at frame completion, the controller is left stopped;
+ * this function is charged with gettig things going again.
+ */
+static void mcam_sg_restart(struct mcam_camera *cam)
+{
+	mcam_ctlr_dma_sg(cam);
+	mcam_ctlr_start(cam);
+	clear_bit(CF_SG_RESTART, &cam->flags);
+}
+
+#else /* MCAM_MODE_DMA_SG */
+
+static inline void mcam_sg_restart(struct mcam_camera *cam)
+{
+	return;
+}
+
+#endif /* MCAM_MODE_DMA_SG */
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Buffer-mode-independent controller code.
+ */
+
+/*
+ * Image format setup
+ */
+static void mcam_ctlr_image(struct mcam_camera *cam)
+{
+	int imgsz;
+	struct v4l2_pix_format *fmt = &cam->pix_format;
+
+	imgsz = ((fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK) |
+		(fmt->bytesperline & IMGSZ_H_MASK);
+	mcam_reg_write(cam, REG_IMGSIZE, imgsz);
+	mcam_reg_write(cam, REG_IMGOFFSET, 0);
+	/* YPITCH just drops the last two bits */
+	mcam_reg_write_mask(cam, REG_IMGPITCH, fmt->bytesperline,
+			IMGP_YP_MASK);
+	/*
+	 * Tell the controller about the image format we are using.
+	 */
+	switch (cam->pix_format.pixelformat) {
+	case V4L2_PIX_FMT_YUYV:
+	    mcam_reg_write_mask(cam, REG_CTRL0,
+			    C0_DF_YUV|C0_YUV_PACKED|C0_YUVE_YUYV,
+			    C0_DF_MASK);
+	    break;
+
+	case V4L2_PIX_FMT_RGB444:
+	    mcam_reg_write_mask(cam, REG_CTRL0,
+			    C0_DF_RGB|C0_RGBF_444|C0_RGB4_XRGB,
+			    C0_DF_MASK);
+		/* Alpha value? */
+	    break;
+
+	case V4L2_PIX_FMT_RGB565:
+	    mcam_reg_write_mask(cam, REG_CTRL0,
+			    C0_DF_RGB|C0_RGBF_565|C0_RGB5_BGGR,
+			    C0_DF_MASK);
+	    break;
+
+	default:
+	    cam_err(cam, "Unknown format %x\n", cam->pix_format.pixelformat);
+	    break;
+	}
+	/*
+	 * Make sure it knows we want to use hsync/vsync.
+	 */
+	mcam_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC,
+			C0_SIFM_MASK);
+}
+
+
+/*
+ * Configure the controller for operation; caller holds the
+ * device mutex.
+ */
+static int mcam_ctlr_configure(struct mcam_camera *cam)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cam->dev_lock, flags);
+	clear_bit(CF_SG_RESTART, &cam->flags);
+	cam->dma_setup(cam);
+	mcam_ctlr_image(cam);
+	mcam_set_config_needed(cam, 0);
+	spin_unlock_irqrestore(&cam->dev_lock, flags);
+	return 0;
+}
+
+static void mcam_ctlr_irq_enable(struct mcam_camera *cam)
+{
+	/*
+	 * Clear any pending interrupts, since we do not
+	 * expect to have I/O active prior to enabling.
+	 */
+	mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS);
+	mcam_reg_set_bit(cam, REG_IRQMASK, FRAMEIRQS);
+}
+
+static void mcam_ctlr_irq_disable(struct mcam_camera *cam)
+{
+	mcam_reg_clear_bit(cam, REG_IRQMASK, FRAMEIRQS);
+}
+
+
+
+static void mcam_ctlr_init(struct mcam_camera *cam)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cam->dev_lock, flags);
+	/*
+	 * Make sure it's not powered down.
+	 */
+	mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
+	/*
+	 * Turn off the enable bit.  It sure should be off anyway,
+	 * but it's good to be sure.
+	 */
+	mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
+	/*
+	 * Clock the sensor appropriately.  Controller clock should
+	 * be 48MHz, sensor "typical" value is half that.
+	 */
+	mcam_reg_write_mask(cam, REG_CLKCTRL, 2, CLK_DIV_MASK);
+	spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+
+/*
+ * Stop the controller, and don't return until we're really sure that no
+ * further DMA is going on.
+ */
+static void mcam_ctlr_stop_dma(struct mcam_camera *cam)
+{
+	unsigned long flags;
+
+	/*
+	 * Theory: stop the camera controller (whether it is operating
+	 * or not).  Delay briefly just in case we race with the SOF
+	 * interrupt, then wait until no DMA is active.
+	 */
+	spin_lock_irqsave(&cam->dev_lock, flags);
+	clear_bit(CF_SG_RESTART, &cam->flags);
+	mcam_ctlr_stop(cam);
+	cam->state = S_IDLE;
+	spin_unlock_irqrestore(&cam->dev_lock, flags);
+	/*
+	 * This is a brutally long sleep, but experience shows that
+	 * it can take the controller a while to get the message that
+	 * it needs to stop grabbing frames.  In particular, we can
+	 * sometimes (on mmp) get a frame at the end WITHOUT the
+	 * start-of-frame indication.
+	 */
+	msleep(150);
+	if (test_bit(CF_DMA_ACTIVE, &cam->flags))
+		cam_err(cam, "Timeout waiting for DMA to end\n");
+		/* This would be bad news - what now? */
+	spin_lock_irqsave(&cam->dev_lock, flags);
+	mcam_ctlr_irq_disable(cam);
+	spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+/*
+ * Power up and down.
+ */
+static void mcam_ctlr_power_up(struct mcam_camera *cam)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cam->dev_lock, flags);
+	cam->plat_power_up(cam);
+	mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
+	spin_unlock_irqrestore(&cam->dev_lock, flags);
+	msleep(5); /* Just to be sure */
+}
+
+static void mcam_ctlr_power_down(struct mcam_camera *cam)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cam->dev_lock, flags);
+	/*
+	 * School of hard knocks department: be sure we do any register
+	 * twiddling on the controller *before* calling the platform
+	 * power down routine.
+	 */
+	mcam_reg_set_bit(cam, REG_CTRL1, C1_PWRDWN);
+	cam->plat_power_down(cam);
+	spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+/* -------------------------------------------------------------------- */
+/*
+ * Communications with the sensor.
+ */
+
+static int __mcam_cam_reset(struct mcam_camera *cam)
+{
+	return sensor_call(cam, core, reset, 0);
+}
+
+/*
+ * We have found the sensor on the i2c.  Let's try to have a
+ * conversation.
+ */
+static int mcam_cam_init(struct mcam_camera *cam)
+{
+	struct v4l2_dbg_chip_ident chip;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	if (cam->state != S_NOTREADY)
+		cam_warn(cam, "Cam init with device in funky state %d",
+				cam->state);
+	ret = __mcam_cam_reset(cam);
+	if (ret)
+		goto out;
+	chip.ident = V4L2_IDENT_NONE;
+	chip.match.type = V4L2_CHIP_MATCH_I2C_ADDR;
+	chip.match.addr = cam->sensor_addr;
+	ret = sensor_call(cam, core, g_chip_ident, &chip);
+	if (ret)
+		goto out;
+	cam->sensor_type = chip.ident;
+	if (cam->sensor_type != V4L2_IDENT_OV7670) {
+		cam_err(cam, "Unsupported sensor type 0x%x", cam->sensor_type);
+		ret = -EINVAL;
+		goto out;
+	}
+/* Get/set parameters? */
+	ret = 0;
+	cam->state = S_IDLE;
+out:
+	mcam_ctlr_power_down(cam);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+/*
+ * Configure the sensor to match the parameters we have.  Caller should
+ * hold s_mutex
+ */
+static int mcam_cam_set_flip(struct mcam_camera *cam)
+{
+	struct v4l2_control ctrl;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.id = V4L2_CID_VFLIP;
+	ctrl.value = flip;
+	return sensor_call(cam, core, s_ctrl, &ctrl);
+}
+
+
+static int mcam_cam_configure(struct mcam_camera *cam)
+{
+	struct v4l2_mbus_framefmt mbus_fmt;
+	int ret;
+
+	v4l2_fill_mbus_format(&mbus_fmt, &cam->pix_format, cam->mbus_code);
+	ret = sensor_call(cam, core, init, 0);
+	if (ret == 0)
+		ret = sensor_call(cam, video, s_mbus_fmt, &mbus_fmt);
+	/*
+	 * OV7670 does weird things if flip is set *before* format...
+	 */
+	ret += mcam_cam_set_flip(cam);
+	return ret;
+}
+
+/*
+ * Get everything ready, and start grabbing frames.
+ */
+static int mcam_read_setup(struct mcam_camera *cam)
+{
+	int ret;
+	unsigned long flags;
+
+	/*
+	 * Configuration.  If we still don't have DMA buffers,
+	 * make one last, desperate attempt.
+	 */
+	if (cam->buffer_mode == B_vmalloc && cam->nbufs == 0 &&
+			mcam_alloc_dma_bufs(cam, 0))
+		return -ENOMEM;
+
+	if (mcam_needs_config(cam)) {
+		mcam_cam_configure(cam);
+		ret = mcam_ctlr_configure(cam);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * Turn it loose.
+	 */
+	spin_lock_irqsave(&cam->dev_lock, flags);
+	clear_bit(CF_DMA_ACTIVE, &cam->flags);
+	mcam_reset_buffers(cam);
+	mcam_ctlr_irq_enable(cam);
+	cam->state = S_STREAMING;
+	if (!test_bit(CF_SG_RESTART, &cam->flags))
+		mcam_ctlr_start(cam);
+	spin_unlock_irqrestore(&cam->dev_lock, flags);
+	return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+/*
+ * Videobuf2 interface code.
+ */
+
+static int mcam_vb_queue_setup(struct vb2_queue *vq,
+		const struct v4l2_format *fmt, unsigned int *nbufs,
+		unsigned int *num_planes, unsigned int sizes[],
+		void *alloc_ctxs[])
+{
+	struct mcam_camera *cam = vb2_get_drv_priv(vq);
+	int minbufs = (cam->buffer_mode == B_DMA_contig) ? 3 : 2;
+
+	sizes[0] = cam->pix_format.sizeimage;
+	*num_planes = 1; /* Someday we have to support planar formats... */
+	if (*nbufs < minbufs)
+		*nbufs = minbufs;
+	if (cam->buffer_mode == B_DMA_contig)
+		alloc_ctxs[0] = cam->vb_alloc_ctx;
+	return 0;
+}
+
+
+static void mcam_vb_buf_queue(struct vb2_buffer *vb)
+{
+	struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+	struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+	unsigned long flags;
+	int start;
+
+	spin_lock_irqsave(&cam->dev_lock, flags);
+	start = (cam->state == S_BUFWAIT) && !list_empty(&cam->buffers);
+	list_add(&mvb->queue, &cam->buffers);
+	if (cam->state == S_STREAMING && test_bit(CF_SG_RESTART, &cam->flags))
+		mcam_sg_restart(cam);
+	spin_unlock_irqrestore(&cam->dev_lock, flags);
+	if (start)
+		mcam_read_setup(cam);
+}
+
+
+/*
+ * vb2 uses these to release the mutex when waiting in dqbuf.  I'm
+ * not actually sure we need to do this (I'm not sure that vb2_dqbuf() needs
+ * to be called with the mutex held), but better safe than sorry.
+ */
+static void mcam_vb_wait_prepare(struct vb2_queue *vq)
+{
+	struct mcam_camera *cam = vb2_get_drv_priv(vq);
+
+	mutex_unlock(&cam->s_mutex);
+}
+
+static void mcam_vb_wait_finish(struct vb2_queue *vq)
+{
+	struct mcam_camera *cam = vb2_get_drv_priv(vq);
+
+	mutex_lock(&cam->s_mutex);
+}
+
+/*
+ * These need to be called with the mutex held from vb2
+ */
+static int mcam_vb_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct mcam_camera *cam = vb2_get_drv_priv(vq);
+
+	if (cam->state != S_IDLE) {
+		INIT_LIST_HEAD(&cam->buffers);
+		return -EINVAL;
+	}
+	cam->sequence = 0;
+	/*
+	 * Videobuf2 sneakily hoards all the buffers and won't
+	 * give them to us until *after* streaming starts.  But
+	 * we can't actually start streaming until we have a
+	 * destination.  So go into a wait state and hope they
+	 * give us buffers soon.
+	 */
+	if (cam->buffer_mode != B_vmalloc && list_empty(&cam->buffers)) {
+		cam->state = S_BUFWAIT;
+		return 0;
+	}
+	return mcam_read_setup(cam);
+}
+
+static int mcam_vb_stop_streaming(struct vb2_queue *vq)
+{
+	struct mcam_camera *cam = vb2_get_drv_priv(vq);
+	unsigned long flags;
+
+	if (cam->state == S_BUFWAIT) {
+		/* They never gave us buffers */
+		cam->state = S_IDLE;
+		return 0;
+	}
+	if (cam->state != S_STREAMING)
+		return -EINVAL;
+	mcam_ctlr_stop_dma(cam);
+	/*
+	 * VB2 reclaims the buffers, so we need to forget
+	 * about them.
+	 */
+	spin_lock_irqsave(&cam->dev_lock, flags);
+	INIT_LIST_HEAD(&cam->buffers);
+	spin_unlock_irqrestore(&cam->dev_lock, flags);
+	return 0;
+}
+
+
+static const struct vb2_ops mcam_vb2_ops = {
+	.queue_setup		= mcam_vb_queue_setup,
+	.buf_queue		= mcam_vb_buf_queue,
+	.start_streaming	= mcam_vb_start_streaming,
+	.stop_streaming		= mcam_vb_stop_streaming,
+	.wait_prepare		= mcam_vb_wait_prepare,
+	.wait_finish		= mcam_vb_wait_finish,
+};
+
+
+#ifdef MCAM_MODE_DMA_SG
+/*
+ * Scatter/gather mode uses all of the above functions plus a
+ * few extras to deal with DMA mapping.
+ */
+static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
+{
+	struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+	struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+	int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
+
+	mvb->dma_desc = dma_alloc_coherent(cam->dev,
+			ndesc * sizeof(struct mcam_dma_desc),
+			&mvb->dma_desc_pa, GFP_KERNEL);
+	if (mvb->dma_desc == NULL) {
+		cam_err(cam, "Unable to get DMA descriptor array\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
+{
+	struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+	struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+	struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
+	struct mcam_dma_desc *desc = mvb->dma_desc;
+	struct scatterlist *sg;
+	int i;
+
+	mvb->dma_desc_nent = dma_map_sg(cam->dev, sgd->sglist, sgd->num_pages,
+			DMA_FROM_DEVICE);
+	if (mvb->dma_desc_nent <= 0)
+		return -EIO;  /* Not sure what's right here */
+	for_each_sg(sgd->sglist, sg, mvb->dma_desc_nent, i) {
+		desc->dma_addr = sg_dma_address(sg);
+		desc->segment_len = sg_dma_len(sg);
+		desc++;
+	}
+	return 0;
+}
+
+static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
+{
+	struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+	struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
+
+	dma_unmap_sg(cam->dev, sgd->sglist, sgd->num_pages, DMA_FROM_DEVICE);
+	return 0;
+}
+
+static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
+{
+	struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+	struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+	int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
+
+	dma_free_coherent(cam->dev, ndesc * sizeof(struct mcam_dma_desc),
+			mvb->dma_desc, mvb->dma_desc_pa);
+}
+
+
+static const struct vb2_ops mcam_vb2_sg_ops = {
+	.queue_setup		= mcam_vb_queue_setup,
+	.buf_init		= mcam_vb_sg_buf_init,
+	.buf_prepare		= mcam_vb_sg_buf_prepare,
+	.buf_queue		= mcam_vb_buf_queue,
+	.buf_finish		= mcam_vb_sg_buf_finish,
+	.buf_cleanup		= mcam_vb_sg_buf_cleanup,
+	.start_streaming	= mcam_vb_start_streaming,
+	.stop_streaming		= mcam_vb_stop_streaming,
+	.wait_prepare		= mcam_vb_wait_prepare,
+	.wait_finish		= mcam_vb_wait_finish,
+};
+
+#endif /* MCAM_MODE_DMA_SG */
+
+static int mcam_setup_vb2(struct mcam_camera *cam)
+{
+	struct vb2_queue *vq = &cam->vb_queue;
+
+	memset(vq, 0, sizeof(*vq));
+	vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	vq->drv_priv = cam;
+	INIT_LIST_HEAD(&cam->buffers);
+	switch (cam->buffer_mode) {
+	case B_DMA_contig:
+#ifdef MCAM_MODE_DMA_CONTIG
+		vq->ops = &mcam_vb2_ops;
+		vq->mem_ops = &vb2_dma_contig_memops;
+		cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
+		vq->io_modes = VB2_MMAP | VB2_USERPTR;
+		cam->dma_setup = mcam_ctlr_dma_contig;
+		cam->frame_complete = mcam_dma_contig_done;
+#endif
+		break;
+	case B_DMA_sg:
+#ifdef MCAM_MODE_DMA_SG
+		vq->ops = &mcam_vb2_sg_ops;
+		vq->mem_ops = &vb2_dma_sg_memops;
+		vq->io_modes = VB2_MMAP | VB2_USERPTR;
+		cam->dma_setup = mcam_ctlr_dma_sg;
+		cam->frame_complete = mcam_dma_sg_done;
+#endif
+		break;
+	case B_vmalloc:
+#ifdef MCAM_MODE_VMALLOC
+		tasklet_init(&cam->s_tasklet, mcam_frame_tasklet,
+				(unsigned long) cam);
+		vq->ops = &mcam_vb2_ops;
+		vq->mem_ops = &vb2_vmalloc_memops;
+		vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
+		vq->io_modes = VB2_MMAP;
+		cam->dma_setup = mcam_ctlr_dma_vmalloc;
+		cam->frame_complete = mcam_vmalloc_done;
+#endif
+		break;
+	}
+	return vb2_queue_init(vq);
+}
+
+static void mcam_cleanup_vb2(struct mcam_camera *cam)
+{
+	vb2_queue_release(&cam->vb_queue);
+#ifdef MCAM_MODE_DMA_CONTIG
+	if (cam->buffer_mode == B_DMA_contig)
+		vb2_dma_contig_cleanup_ctx(cam->vb_alloc_ctx);
+#endif
+}
+
+
+/* ---------------------------------------------------------------------- */
+/*
+ * The long list of V4L2 ioctl() operations.
+ */
+
+static int mcam_vidioc_streamon(struct file *filp, void *priv,
+		enum v4l2_buf_type type)
+{
+	struct mcam_camera *cam = filp->private_data;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = vb2_streamon(&cam->vb_queue, type);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+
+static int mcam_vidioc_streamoff(struct file *filp, void *priv,
+		enum v4l2_buf_type type)
+{
+	struct mcam_camera *cam = filp->private_data;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = vb2_streamoff(&cam->vb_queue, type);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+
+static int mcam_vidioc_reqbufs(struct file *filp, void *priv,
+		struct v4l2_requestbuffers *req)
+{
+	struct mcam_camera *cam = filp->private_data;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = vb2_reqbufs(&cam->vb_queue, req);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+
+static int mcam_vidioc_querybuf(struct file *filp, void *priv,
+		struct v4l2_buffer *buf)
+{
+	struct mcam_camera *cam = filp->private_data;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = vb2_querybuf(&cam->vb_queue, buf);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+static int mcam_vidioc_qbuf(struct file *filp, void *priv,
+		struct v4l2_buffer *buf)
+{
+	struct mcam_camera *cam = filp->private_data;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = vb2_qbuf(&cam->vb_queue, buf);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+static int mcam_vidioc_dqbuf(struct file *filp, void *priv,
+		struct v4l2_buffer *buf)
+{
+	struct mcam_camera *cam = filp->private_data;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = vb2_dqbuf(&cam->vb_queue, buf, filp->f_flags & O_NONBLOCK);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+
+
+static int mcam_vidioc_queryctrl(struct file *filp, void *priv,
+		struct v4l2_queryctrl *qc)
+{
+	struct mcam_camera *cam = priv;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = sensor_call(cam, core, queryctrl, qc);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+
+static int mcam_vidioc_g_ctrl(struct file *filp, void *priv,
+		struct v4l2_control *ctrl)
+{
+	struct mcam_camera *cam = priv;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = sensor_call(cam, core, g_ctrl, ctrl);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+
+static int mcam_vidioc_s_ctrl(struct file *filp, void *priv,
+		struct v4l2_control *ctrl)
+{
+	struct mcam_camera *cam = priv;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = sensor_call(cam, core, s_ctrl, ctrl);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+
+static int mcam_vidioc_querycap(struct file *file, void *priv,
+		struct v4l2_capability *cap)
+{
+	strcpy(cap->driver, "marvell_ccic");
+	strcpy(cap->card, "marvell_ccic");
+	cap->version = 1;
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+		V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+	return 0;
+}
+
+
+static int mcam_vidioc_enum_fmt_vid_cap(struct file *filp,
+		void *priv, struct v4l2_fmtdesc *fmt)
+{
+	if (fmt->index >= N_MCAM_FMTS)
+		return -EINVAL;
+	strlcpy(fmt->description, mcam_formats[fmt->index].desc,
+			sizeof(fmt->description));
+	fmt->pixelformat = mcam_formats[fmt->index].pixelformat;
+	return 0;
+}
+
+static int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
+		struct v4l2_format *fmt)
+{
+	struct mcam_camera *cam = priv;
+	struct mcam_format_struct *f;
+	struct v4l2_pix_format *pix = &fmt->fmt.pix;
+	struct v4l2_mbus_framefmt mbus_fmt;
+	int ret;
+
+	f = mcam_find_format(pix->pixelformat);
+	pix->pixelformat = f->pixelformat;
+	v4l2_fill_mbus_format(&mbus_fmt, pix, f->mbus_code);
+	mutex_lock(&cam->s_mutex);
+	ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
+	mutex_unlock(&cam->s_mutex);
+	v4l2_fill_pix_format(pix, &mbus_fmt);
+	pix->bytesperline = pix->width * f->bpp;
+	pix->sizeimage = pix->height * pix->bytesperline;
+	return ret;
+}
+
+static int mcam_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
+		struct v4l2_format *fmt)
+{
+	struct mcam_camera *cam = priv;
+	struct mcam_format_struct *f;
+	int ret;
+
+	/*
+	 * Can't do anything if the device is not idle
+	 * Also can't if there are streaming buffers in place.
+	 */
+	if (cam->state != S_IDLE || cam->vb_queue.num_buffers > 0)
+		return -EBUSY;
+
+	f = mcam_find_format(fmt->fmt.pix.pixelformat);
+
+	/*
+	 * See if the formatting works in principle.
+	 */
+	ret = mcam_vidioc_try_fmt_vid_cap(filp, priv, fmt);
+	if (ret)
+		return ret;
+	/*
+	 * Now we start to change things for real, so let's do it
+	 * under lock.
+	 */
+	mutex_lock(&cam->s_mutex);
+	cam->pix_format = fmt->fmt.pix;
+	cam->mbus_code = f->mbus_code;
+
+	/*
+	 * Make sure we have appropriate DMA buffers.
+	 */
+	if (cam->buffer_mode == B_vmalloc) {
+		ret = mcam_check_dma_buffers(cam);
+		if (ret)
+			goto out;
+	}
+	mcam_set_config_needed(cam, 1);
+out:
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+/*
+ * Return our stored notion of how the camera is/should be configured.
+ * The V4l2 spec wants us to be smarter, and actually get this from
+ * the camera (and not mess with it at open time).  Someday.
+ */
+static int mcam_vidioc_g_fmt_vid_cap(struct file *filp, void *priv,
+		struct v4l2_format *f)
+{
+	struct mcam_camera *cam = priv;
+
+	f->fmt.pix = cam->pix_format;
+	return 0;
+}
+
+/*
+ * We only have one input - the sensor - so minimize the nonsense here.
+ */
+static int mcam_vidioc_enum_input(struct file *filp, void *priv,
+		struct v4l2_input *input)
+{
+	if (input->index != 0)
+		return -EINVAL;
+
+	input->type = V4L2_INPUT_TYPE_CAMERA;
+	input->std = V4L2_STD_ALL; /* Not sure what should go here */
+	strcpy(input->name, "Camera");
+	return 0;
+}
+
+static int mcam_vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
+{
+	*i = 0;
+	return 0;
+}
+
+static int mcam_vidioc_s_input(struct file *filp, void *priv, unsigned int i)
+{
+	if (i != 0)
+		return -EINVAL;
+	return 0;
+}
+
+/* from vivi.c */
+static int mcam_vidioc_s_std(struct file *filp, void *priv, v4l2_std_id *a)
+{
+	return 0;
+}
+
+/*
+ * G/S_PARM.  Most of this is done by the sensor, but we are
+ * the level which controls the number of read buffers.
+ */
+static int mcam_vidioc_g_parm(struct file *filp, void *priv,
+		struct v4l2_streamparm *parms)
+{
+	struct mcam_camera *cam = priv;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = sensor_call(cam, video, g_parm, parms);
+	mutex_unlock(&cam->s_mutex);
+	parms->parm.capture.readbuffers = n_dma_bufs;
+	return ret;
+}
+
+static int mcam_vidioc_s_parm(struct file *filp, void *priv,
+		struct v4l2_streamparm *parms)
+{
+	struct mcam_camera *cam = priv;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = sensor_call(cam, video, s_parm, parms);
+	mutex_unlock(&cam->s_mutex);
+	parms->parm.capture.readbuffers = n_dma_bufs;
+	return ret;
+}
+
+static int mcam_vidioc_g_chip_ident(struct file *file, void *priv,
+		struct v4l2_dbg_chip_ident *chip)
+{
+	struct mcam_camera *cam = priv;
+
+	chip->ident = V4L2_IDENT_NONE;
+	chip->revision = 0;
+	if (v4l2_chip_match_host(&chip->match)) {
+		chip->ident = cam->chip_id;
+		return 0;
+	}
+	return sensor_call(cam, core, g_chip_ident, chip);
+}
+
+static int mcam_vidioc_enum_framesizes(struct file *filp, void *priv,
+		struct v4l2_frmsizeenum *sizes)
+{
+	struct mcam_camera *cam = priv;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = sensor_call(cam, video, enum_framesizes, sizes);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+static int mcam_vidioc_enum_frameintervals(struct file *filp, void *priv,
+		struct v4l2_frmivalenum *interval)
+{
+	struct mcam_camera *cam = priv;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = sensor_call(cam, video, enum_frameintervals, interval);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int mcam_vidioc_g_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg)
+{
+	struct mcam_camera *cam = priv;
+
+	if (v4l2_chip_match_host(&reg->match)) {
+		reg->val = mcam_reg_read(cam, reg->reg);
+		reg->size = 4;
+		return 0;
+	}
+	return sensor_call(cam, core, g_register, reg);
+}
+
+static int mcam_vidioc_s_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg)
+{
+	struct mcam_camera *cam = priv;
+
+	if (v4l2_chip_match_host(&reg->match)) {
+		mcam_reg_write(cam, reg->reg, reg->val);
+		return 0;
+	}
+	return sensor_call(cam, core, s_register, reg);
+}
+#endif
+
+static const struct v4l2_ioctl_ops mcam_v4l_ioctl_ops = {
+	.vidioc_querycap	= mcam_vidioc_querycap,
+	.vidioc_enum_fmt_vid_cap = mcam_vidioc_enum_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap	= mcam_vidioc_try_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap	= mcam_vidioc_s_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap	= mcam_vidioc_g_fmt_vid_cap,
+	.vidioc_enum_input	= mcam_vidioc_enum_input,
+	.vidioc_g_input		= mcam_vidioc_g_input,
+	.vidioc_s_input		= mcam_vidioc_s_input,
+	.vidioc_s_std		= mcam_vidioc_s_std,
+	.vidioc_reqbufs		= mcam_vidioc_reqbufs,
+	.vidioc_querybuf	= mcam_vidioc_querybuf,
+	.vidioc_qbuf		= mcam_vidioc_qbuf,
+	.vidioc_dqbuf		= mcam_vidioc_dqbuf,
+	.vidioc_streamon	= mcam_vidioc_streamon,
+	.vidioc_streamoff	= mcam_vidioc_streamoff,
+	.vidioc_queryctrl	= mcam_vidioc_queryctrl,
+	.vidioc_g_ctrl		= mcam_vidioc_g_ctrl,
+	.vidioc_s_ctrl		= mcam_vidioc_s_ctrl,
+	.vidioc_g_parm		= mcam_vidioc_g_parm,
+	.vidioc_s_parm		= mcam_vidioc_s_parm,
+	.vidioc_enum_framesizes = mcam_vidioc_enum_framesizes,
+	.vidioc_enum_frameintervals = mcam_vidioc_enum_frameintervals,
+	.vidioc_g_chip_ident	= mcam_vidioc_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.vidioc_g_register	= mcam_vidioc_g_register,
+	.vidioc_s_register	= mcam_vidioc_s_register,
+#endif
+};
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Our various file operations.
+ */
+static int mcam_v4l_open(struct file *filp)
+{
+	struct mcam_camera *cam = video_drvdata(filp);
+	int ret = 0;
+
+	filp->private_data = cam;
+
+	frames = singles = delivered = 0;
+	mutex_lock(&cam->s_mutex);
+	if (cam->users == 0) {
+		ret = mcam_setup_vb2(cam);
+		if (ret)
+			goto out;
+		mcam_ctlr_power_up(cam);
+		__mcam_cam_reset(cam);
+		mcam_set_config_needed(cam, 1);
+	}
+	(cam->users)++;
+out:
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+
+static int mcam_v4l_release(struct file *filp)
+{
+	struct mcam_camera *cam = filp->private_data;
+
+	cam_dbg(cam, "Release, %d frames, %d singles, %d delivered\n", frames,
+			singles, delivered);
+	mutex_lock(&cam->s_mutex);
+	(cam->users)--;
+	if (cam->users == 0) {
+		mcam_ctlr_stop_dma(cam);
+		mcam_cleanup_vb2(cam);
+		mcam_ctlr_power_down(cam);
+		if (cam->buffer_mode == B_vmalloc && alloc_bufs_at_read)
+			mcam_free_dma_bufs(cam);
+	}
+	mutex_unlock(&cam->s_mutex);
+	return 0;
+}
+
+static ssize_t mcam_v4l_read(struct file *filp,
+		char __user *buffer, size_t len, loff_t *pos)
+{
+	struct mcam_camera *cam = filp->private_data;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = vb2_read(&cam->vb_queue, buffer, len, pos,
+			filp->f_flags & O_NONBLOCK);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+
+
+static unsigned int mcam_v4l_poll(struct file *filp,
+		struct poll_table_struct *pt)
+{
+	struct mcam_camera *cam = filp->private_data;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = vb2_poll(&cam->vb_queue, filp, pt);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+
+static int mcam_v4l_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct mcam_camera *cam = filp->private_data;
+	int ret;
+
+	mutex_lock(&cam->s_mutex);
+	ret = vb2_mmap(&cam->vb_queue, vma);
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+}
+
+
+
+static const struct v4l2_file_operations mcam_v4l_fops = {
+	.owner = THIS_MODULE,
+	.open = mcam_v4l_open,
+	.release = mcam_v4l_release,
+	.read = mcam_v4l_read,
+	.poll = mcam_v4l_poll,
+	.mmap = mcam_v4l_mmap,
+	.unlocked_ioctl = video_ioctl2,
+};
+
+
+/*
+ * This template device holds all of those v4l2 methods; we
+ * clone it for specific real devices.
+ */
+static struct video_device mcam_v4l_template = {
+	.name = "mcam",
+	.tvnorms = V4L2_STD_NTSC_M,
+	.current_norm = V4L2_STD_NTSC_M,  /* make mplayer happy */
+
+	.fops = &mcam_v4l_fops,
+	.ioctl_ops = &mcam_v4l_ioctl_ops,
+	.release = video_device_release_empty,
+};
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Interrupt handler stuff
+ */
+static void mcam_frame_complete(struct mcam_camera *cam, int frame)
+{
+	/*
+	 * Basic frame housekeeping.
+	 */
+	set_bit(frame, &cam->flags);
+	clear_bit(CF_DMA_ACTIVE, &cam->flags);
+	cam->next_buf = frame;
+	cam->buf_seq[frame] = ++(cam->sequence);
+	frames++;
+	/*
+	 * "This should never happen"
+	 */
+	if (cam->state != S_STREAMING)
+		return;
+	/*
+	 * Process the frame and set up the next one.
+	 */
+	cam->frame_complete(cam, frame);
+}
+
+
+/*
+ * The interrupt handler; this needs to be called from the
+ * platform irq handler with the lock held.
+ */
+int mccic_irq(struct mcam_camera *cam, unsigned int irqs)
+{
+	unsigned int frame, handled = 0;
+
+	mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS); /* Clear'em all */
+	/*
+	 * Handle any frame completions.  There really should
+	 * not be more than one of these, or we have fallen
+	 * far behind.
+	 *
+	 * When running in S/G mode, the frame number lacks any
+	 * real meaning - there's only one descriptor array - but
+	 * the controller still picks a different one to signal
+	 * each time.
+	 */
+	for (frame = 0; frame < cam->nbufs; frame++)
+		if (irqs & (IRQ_EOF0 << frame)) {
+			mcam_frame_complete(cam, frame);
+			handled = 1;
+			if (cam->buffer_mode == B_DMA_sg)
+				break;
+		}
+	/*
+	 * If a frame starts, note that we have DMA active.  This
+	 * code assumes that we won't get multiple frame interrupts
+	 * at once; may want to rethink that.
+	 */
+	if (irqs & (IRQ_SOF0 | IRQ_SOF1 | IRQ_SOF2)) {
+		set_bit(CF_DMA_ACTIVE, &cam->flags);
+		handled = 1;
+		if (cam->buffer_mode == B_DMA_sg)
+			mcam_ctlr_stop(cam);
+	}
+	return handled;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Registration and such.
+ */
+static struct ov7670_config sensor_cfg = {
+	/*
+	 * Exclude QCIF mode, because it only captures a tiny portion
+	 * of the sensor FOV
+	 */
+	.min_width = 320,
+	.min_height = 240,
+};
+
+
+int mccic_register(struct mcam_camera *cam)
+{
+	struct i2c_board_info ov7670_info = {
+		.type = "ov7670",
+		.addr = 0x42 >> 1,
+		.platform_data = &sensor_cfg,
+	};
+	int ret;
+
+	/*
+	 * Validate the requested buffer mode.
+	 */
+	if (buffer_mode >= 0)
+		cam->buffer_mode = buffer_mode;
+	if (cam->buffer_mode == B_DMA_sg &&
+			cam->chip_id == V4L2_IDENT_CAFE) {
+		printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, "
+			"attempting vmalloc mode instead\n");
+		cam->buffer_mode = B_vmalloc;
+	}
+	if (!mcam_buffer_mode_supported(cam->buffer_mode)) {
+		printk(KERN_ERR "marvell-cam: buffer mode %d unsupported\n",
+				cam->buffer_mode);
+		return -EINVAL;
+	}
+	/*
+	 * Register with V4L
+	 */
+	ret = v4l2_device_register(cam->dev, &cam->v4l2_dev);
+	if (ret)
+		return ret;
+
+	mutex_init(&cam->s_mutex);
+	cam->state = S_NOTREADY;
+	mcam_set_config_needed(cam, 1);
+	cam->pix_format = mcam_def_pix_format;
+	cam->mbus_code = mcam_def_mbus_code;
+	INIT_LIST_HEAD(&cam->buffers);
+	mcam_ctlr_init(cam);
+
+	/*
+	 * Try to find the sensor.
+	 */
+	sensor_cfg.clock_speed = cam->clock_speed;
+	sensor_cfg.use_smbus = cam->use_smbus;
+	cam->sensor_addr = ov7670_info.addr;
+	cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev,
+			cam->i2c_adapter, &ov7670_info, NULL);
+	if (cam->sensor == NULL) {
+		ret = -ENODEV;
+		goto out_unregister;
+	}
+
+	ret = mcam_cam_init(cam);
+	if (ret)
+		goto out_unregister;
+	/*
+	 * Get the v4l2 setup done.
+	 */
+	mutex_lock(&cam->s_mutex);
+	cam->vdev = mcam_v4l_template;
+	cam->vdev.debug = 0;
+	cam->vdev.v4l2_dev = &cam->v4l2_dev;
+	ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
+	if (ret)
+		goto out;
+	video_set_drvdata(&cam->vdev, cam);
+
+	/*
+	 * If so requested, try to get our DMA buffers now.
+	 */
+	if (cam->buffer_mode == B_vmalloc && !alloc_bufs_at_read) {
+		if (mcam_alloc_dma_bufs(cam, 1))
+			cam_warn(cam, "Unable to alloc DMA buffers at load"
+					" will try again later.");
+	}
+
+out:
+	mutex_unlock(&cam->s_mutex);
+	return ret;
+out_unregister:
+	v4l2_device_unregister(&cam->v4l2_dev);
+	return ret;
+}
+
+
+void mccic_shutdown(struct mcam_camera *cam)
+{
+	/*
+	 * If we have no users (and we really, really should have no
+	 * users) the device will already be powered down.  Trying to
+	 * take it down again will wedge the machine, which is frowned
+	 * upon.
+	 */
+	if (cam->users > 0) {
+		cam_warn(cam, "Removing a device with users!\n");
+		mcam_ctlr_power_down(cam);
+	}
+	vb2_queue_release(&cam->vb_queue);
+	if (cam->buffer_mode == B_vmalloc)
+		mcam_free_dma_bufs(cam);
+	video_unregister_device(&cam->vdev);
+	v4l2_device_unregister(&cam->v4l2_dev);
+}
+
+/*
+ * Power management
+ */
+#ifdef CONFIG_PM
+
+void mccic_suspend(struct mcam_camera *cam)
+{
+	mutex_lock(&cam->s_mutex);
+	if (cam->users > 0) {
+		enum mcam_state cstate = cam->state;
+
+		mcam_ctlr_stop_dma(cam);
+		mcam_ctlr_power_down(cam);
+		cam->state = cstate;
+	}
+	mutex_unlock(&cam->s_mutex);
+}
+
+int mccic_resume(struct mcam_camera *cam)
+{
+	int ret = 0;
+
+	mutex_lock(&cam->s_mutex);
+	if (cam->users > 0) {
+		mcam_ctlr_power_up(cam);
+		__mcam_cam_reset(cam);
+	} else {
+		mcam_ctlr_power_down(cam);
+	}
+	mutex_unlock(&cam->s_mutex);
+
+	set_bit(CF_CONFIG_NEEDED, &cam->flags);
+	if (cam->state == S_STREAMING) {
+		/*
+		 * If there was a buffer in the DMA engine at suspend
+		 * time, put it back on the queue or we'll forget about it.
+		 */
+		if (cam->buffer_mode == B_DMA_sg && cam->vb_bufs[0])
+			list_add(&cam->vb_bufs[0]->queue, &cam->buffers);
+		ret = mcam_read_setup(cam);
+	}
+	return ret;
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
new file mode 100644
index 0000000..bd6acba
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -0,0 +1,322 @@
+/*
+ * Marvell camera core structures.
+ *
+ * Copyright 2011 Jonathan Corbet corbet@lwn.net
+ */
+#ifndef _MCAM_CORE_H
+#define _MCAM_CORE_H
+
+#include <linux/list.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-core.h>
+
+/*
+ * Create our own symbols for the supported buffer modes, but, for now,
+ * base them entirely on which videobuf2 options have been selected.
+ */
+#if defined(CONFIG_VIDEOBUF2_VMALLOC) || defined(CONFIG_VIDEOBUF2_VMALLOC_MODULE)
+#define MCAM_MODE_VMALLOC 1
+#endif
+
+#if defined(CONFIG_VIDEOBUF2_DMA_CONTIG) || defined(CONFIG_VIDEOBUF2_DMA_CONTIG_MODULE)
+#define MCAM_MODE_DMA_CONTIG 1
+#endif
+
+#if defined(CONFIG_VIDEOBUF2_DMA_SG) || defined(CONFIG_VIDEOBUF2_DMA_SG_MODULE)
+#define MCAM_MODE_DMA_SG 1
+#endif
+
+#if !defined(MCAM_MODE_VMALLOC) && !defined(MCAM_MODE_DMA_CONTIG) && \
+	!defined(MCAM_MODE_DMA_SG)
+#error One of the videobuf buffer modes must be selected in the config
+#endif
+
+
+enum mcam_state {
+	S_NOTREADY,	/* Not yet initialized */
+	S_IDLE,		/* Just hanging around */
+	S_FLAKED,	/* Some sort of problem */
+	S_STREAMING,	/* Streaming data */
+	S_BUFWAIT	/* streaming requested but no buffers yet */
+};
+#define MAX_DMA_BUFS 3
+
+/*
+ * Different platforms work best with different buffer modes, so we
+ * let the platform pick.
+ */
+enum mcam_buffer_mode {
+	B_vmalloc = 0,
+	B_DMA_contig = 1,
+	B_DMA_sg = 2
+};
+
+/*
+ * Is a given buffer mode supported by the current kernel configuration?
+ */
+static inline int mcam_buffer_mode_supported(enum mcam_buffer_mode mode)
+{
+	switch (mode) {
+#ifdef MCAM_MODE_VMALLOC
+	case B_vmalloc:
+#endif
+#ifdef MCAM_MODE_DMA_CONTIG
+	case B_DMA_contig:
+#endif
+#ifdef MCAM_MODE_DMA_SG
+	case B_DMA_sg:
+#endif
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+
+/*
+ * A description of one of our devices.
+ * Locking: controlled by s_mutex.  Certain fields, however, require
+ *          the dev_lock spinlock; they are marked as such by comments.
+ *          dev_lock is also required for access to device registers.
+ */
+struct mcam_camera {
+	/*
+	 * These fields should be set by the platform code prior to
+	 * calling mcam_register().
+	 */
+	struct i2c_adapter *i2c_adapter;
+	unsigned char __iomem *regs;
+	spinlock_t dev_lock;
+	struct device *dev; /* For messages, dma alloc */
+	unsigned int chip_id;
+	short int clock_speed;	/* Sensor clock speed, default 30 */
+	short int use_smbus;	/* SMBUS or straight I2c? */
+	enum mcam_buffer_mode buffer_mode;
+	/*
+	 * Callbacks from the core to the platform code.
+	 */
+	void (*plat_power_up) (struct mcam_camera *cam);
+	void (*plat_power_down) (struct mcam_camera *cam);
+
+	/*
+	 * Everything below here is private to the mcam core and
+	 * should not be touched by the platform code.
+	 */
+	struct v4l2_device v4l2_dev;
+	enum mcam_state state;
+	unsigned long flags;		/* Buffer status, mainly (dev_lock) */
+	int users;			/* How many open FDs */
+
+	/*
+	 * Subsystem structures.
+	 */
+	struct video_device vdev;
+	struct v4l2_subdev *sensor;
+	unsigned short sensor_addr;
+
+	/* Videobuf2 stuff */
+	struct vb2_queue vb_queue;
+	struct list_head buffers;	/* Available frames */
+
+	unsigned int nbufs;		/* How many are alloc'd */
+	int next_buf;			/* Next to consume (dev_lock) */
+
+	/* DMA buffers - vmalloc mode */
+#ifdef MCAM_MODE_VMALLOC
+	unsigned int dma_buf_size;	/* allocated size */
+	void *dma_bufs[MAX_DMA_BUFS];	/* Internal buffer addresses */
+	dma_addr_t dma_handles[MAX_DMA_BUFS]; /* Buffer bus addresses */
+	struct tasklet_struct s_tasklet;
+#endif
+	unsigned int sequence;		/* Frame sequence number */
+	unsigned int buf_seq[MAX_DMA_BUFS]; /* Sequence for individual bufs */
+
+	/* DMA buffers - DMA modes */
+	struct mcam_vb_buffer *vb_bufs[MAX_DMA_BUFS];
+	struct vb2_alloc_ctx *vb_alloc_ctx;
+
+	/* Mode-specific ops, set at open time */
+	void (*dma_setup)(struct mcam_camera *cam);
+	void (*frame_complete)(struct mcam_camera *cam, int frame);
+
+	/* Current operating parameters */
+	u32 sensor_type;		/* Currently ov7670 only */
+	struct v4l2_pix_format pix_format;
+	enum v4l2_mbus_pixelcode mbus_code;
+
+	/* Locks */
+	struct mutex s_mutex; /* Access to this structure */
+};
+
+
+/*
+ * Register I/O functions.  These are here because the platform code
+ * may legitimately need to mess with the register space.
+ */
+/*
+ * Device register I/O
+ */
+static inline void mcam_reg_write(struct mcam_camera *cam, unsigned int reg,
+		unsigned int val)
+{
+	iowrite32(val, cam->regs + reg);
+}
+
+static inline unsigned int mcam_reg_read(struct mcam_camera *cam,
+		unsigned int reg)
+{
+	return ioread32(cam->regs + reg);
+}
+
+
+static inline void mcam_reg_write_mask(struct mcam_camera *cam, unsigned int reg,
+		unsigned int val, unsigned int mask)
+{
+	unsigned int v = mcam_reg_read(cam, reg);
+
+	v = (v & ~mask) | (val & mask);
+	mcam_reg_write(cam, reg, v);
+}
+
+static inline void mcam_reg_clear_bit(struct mcam_camera *cam,
+		unsigned int reg, unsigned int val)
+{
+	mcam_reg_write_mask(cam, reg, 0, val);
+}
+
+static inline void mcam_reg_set_bit(struct mcam_camera *cam,
+		unsigned int reg, unsigned int val)
+{
+	mcam_reg_write_mask(cam, reg, val, val);
+}
+
+/*
+ * Functions for use by platform code.
+ */
+int mccic_register(struct mcam_camera *cam);
+int mccic_irq(struct mcam_camera *cam, unsigned int irqs);
+void mccic_shutdown(struct mcam_camera *cam);
+#ifdef CONFIG_PM
+void mccic_suspend(struct mcam_camera *cam);
+int mccic_resume(struct mcam_camera *cam);
+#endif
+
+/*
+ * Register definitions for the m88alp01 camera interface.  Offsets in bytes
+ * as given in the spec.
+ */
+#define REG_Y0BAR	0x00
+#define REG_Y1BAR	0x04
+#define REG_Y2BAR	0x08
+/* ... */
+
+#define REG_IMGPITCH	0x24	/* Image pitch register */
+#define   IMGP_YP_SHFT	  2		/* Y pitch params */
+#define   IMGP_YP_MASK	  0x00003ffc	/* Y pitch field */
+#define	  IMGP_UVP_SHFT	  18		/* UV pitch (planar) */
+#define   IMGP_UVP_MASK   0x3ffc0000
+#define REG_IRQSTATRAW	0x28	/* RAW IRQ Status */
+#define   IRQ_EOF0	  0x00000001	/* End of frame 0 */
+#define   IRQ_EOF1	  0x00000002	/* End of frame 1 */
+#define   IRQ_EOF2	  0x00000004	/* End of frame 2 */
+#define   IRQ_SOF0	  0x00000008	/* Start of frame 0 */
+#define   IRQ_SOF1	  0x00000010	/* Start of frame 1 */
+#define   IRQ_SOF2	  0x00000020	/* Start of frame 2 */
+#define   IRQ_OVERFLOW	  0x00000040	/* FIFO overflow */
+#define   IRQ_TWSIW	  0x00010000	/* TWSI (smbus) write */
+#define   IRQ_TWSIR	  0x00020000	/* TWSI read */
+#define   IRQ_TWSIE	  0x00040000	/* TWSI error */
+#define   TWSIIRQS (IRQ_TWSIW|IRQ_TWSIR|IRQ_TWSIE)
+#define   FRAMEIRQS (IRQ_EOF0|IRQ_EOF1|IRQ_EOF2|IRQ_SOF0|IRQ_SOF1|IRQ_SOF2)
+#define   ALLIRQS (TWSIIRQS|FRAMEIRQS|IRQ_OVERFLOW)
+#define REG_IRQMASK	0x2c	/* IRQ mask - same bits as IRQSTAT */
+#define REG_IRQSTAT	0x30	/* IRQ status / clear */
+
+#define REG_IMGSIZE	0x34	/* Image size */
+#define  IMGSZ_V_MASK	  0x1fff0000
+#define  IMGSZ_V_SHIFT	  16
+#define	 IMGSZ_H_MASK	  0x00003fff
+#define REG_IMGOFFSET	0x38	/* IMage offset */
+
+#define REG_CTRL0	0x3c	/* Control 0 */
+#define   C0_ENABLE	  0x00000001	/* Makes the whole thing go */
+
+/* Mask for all the format bits */
+#define   C0_DF_MASK	  0x00fffffc    /* Bits 2-23 */
+
+/* RGB ordering */
+#define	  C0_RGB4_RGBX	  0x00000000
+#define	  C0_RGB4_XRGB	  0x00000004
+#define	  C0_RGB4_BGRX	  0x00000008
+#define	  C0_RGB4_XBGR	  0x0000000c
+#define	  C0_RGB5_RGGB	  0x00000000
+#define	  C0_RGB5_GRBG	  0x00000004
+#define	  C0_RGB5_GBRG	  0x00000008
+#define	  C0_RGB5_BGGR	  0x0000000c
+
+/* Spec has two fields for DIN and DOUT, but they must match, so
+   combine them here. */
+#define	  C0_DF_YUV	  0x00000000	/* Data is YUV	    */
+#define	  C0_DF_RGB	  0x000000a0	/* ... RGB		    */
+#define	  C0_DF_BAYER	  0x00000140	/* ... Bayer		    */
+/* 8-8-8 must be missing from the below - ask */
+#define	  C0_RGBF_565	  0x00000000
+#define	  C0_RGBF_444	  0x00000800
+#define	  C0_RGB_BGR	  0x00001000	/* Blue comes first */
+#define	  C0_YUV_PLANAR	  0x00000000	/* YUV 422 planar format */
+#define	  C0_YUV_PACKED	  0x00008000	/* YUV 422 packed	*/
+#define	  C0_YUV_420PL	  0x0000a000	/* YUV 420 planar	*/
+/* Think that 420 packed must be 111 - ask */
+#define	  C0_YUVE_YUYV	  0x00000000	/* Y1CbY0Cr		*/
+#define	  C0_YUVE_YVYU	  0x00010000	/* Y1CrY0Cb		*/
+#define	  C0_YUVE_VYUY	  0x00020000	/* CrY1CbY0		*/
+#define	  C0_YUVE_UYVY	  0x00030000	/* CbY1CrY0		*/
+#define	  C0_YUVE_XYUV	  0x00000000	/* 420: .YUV		*/
+#define	  C0_YUVE_XYVU	  0x00010000	/* 420: .YVU		*/
+#define	  C0_YUVE_XUVY	  0x00020000	/* 420: .UVY		*/
+#define	  C0_YUVE_XVUY	  0x00030000	/* 420: .VUY		*/
+/* Bayer bits 18,19 if needed */
+#define	  C0_HPOL_LOW	  0x01000000	/* HSYNC polarity active low */
+#define	  C0_VPOL_LOW	  0x02000000	/* VSYNC polarity active low */
+#define	  C0_VCLK_LOW	  0x04000000	/* VCLK on falling edge */
+#define	  C0_DOWNSCALE	  0x08000000	/* Enable downscaler */
+#define	  C0_SIFM_MASK	  0xc0000000	/* SIF mode bits */
+#define	  C0_SIF_HVSYNC	  0x00000000	/* Use H/VSYNC */
+#define	  CO_SOF_NOSYNC	  0x40000000	/* Use inband active signaling */
+
+/* Bits below C1_444ALPHA are not present in Cafe */
+#define REG_CTRL1	0x40	/* Control 1 */
+#define	  C1_CLKGATE	  0x00000001	/* Sensor clock gate */
+#define   C1_DESC_ENA	  0x00000100	/* DMA descriptor enable */
+#define   C1_DESC_3WORD   0x00000200	/* Three-word descriptors used */
+#define	  C1_444ALPHA	  0x00f00000	/* Alpha field in RGB444 */
+#define	  C1_ALPHA_SHFT	  20
+#define	  C1_DMAB32	  0x00000000	/* 32-byte DMA burst */
+#define	  C1_DMAB16	  0x02000000	/* 16-byte DMA burst */
+#define	  C1_DMAB64	  0x04000000	/* 64-byte DMA burst */
+#define	  C1_DMAB_MASK	  0x06000000
+#define	  C1_TWOBUFS	  0x08000000	/* Use only two DMA buffers */
+#define	  C1_PWRDWN	  0x10000000	/* Power down */
+
+#define REG_CLKCTRL	0x88	/* Clock control */
+#define	  CLK_DIV_MASK	  0x0000ffff	/* Upper bits RW "reserved" */
+
+/* This appears to be a Cafe-only register */
+#define REG_UBAR	0xc4	/* Upper base address register */
+
+/* Armada 610 DMA descriptor registers */
+#define	REG_DMA_DESC_Y	0x200
+#define	REG_DMA_DESC_U	0x204
+#define	REG_DMA_DESC_V	0x208
+#define REG_DESC_LEN_Y	0x20c	/* Lengths are in bytes */
+#define	REG_DESC_LEN_U	0x210
+#define REG_DESC_LEN_V	0x214
+
+/*
+ * Useful stuff that probably belongs somewhere global.
+ */
+#define VGA_WIDTH	640
+#define VGA_HEIGHT	480
+
+#endif /* _MCAM_CORE_H */
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
new file mode 100644
index 0000000..c4c17fe
--- /dev/null
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -0,0 +1,380 @@
+/*
+ * Support for the camera device found on Marvell MMP processors; known
+ * to work with the Armada 610 as used in the OLPC 1.75 system.
+ *
+ * Copyright 2011 Jonathan Corbet <corbet@lwn.net>
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License, version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-gpio.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/mmp-camera.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/pm.h>
+
+#include "mcam-core.h"
+
+MODULE_ALIAS("platform:mmp-camera");
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
+MODULE_LICENSE("GPL");
+
+struct mmp_camera {
+	void *power_regs;
+	struct platform_device *pdev;
+	struct mcam_camera mcam;
+	struct list_head devlist;
+	int irq;
+};
+
+static inline struct mmp_camera *mcam_to_cam(struct mcam_camera *mcam)
+{
+	return container_of(mcam, struct mmp_camera, mcam);
+}
+
+/*
+ * A silly little infrastructure so we can keep track of our devices.
+ * Chances are that we will never have more than one of them, but
+ * the Armada 610 *does* have two controllers...
+ */
+
+static LIST_HEAD(mmpcam_devices);
+static struct mutex mmpcam_devices_lock;
+
+static void mmpcam_add_device(struct mmp_camera *cam)
+{
+	mutex_lock(&mmpcam_devices_lock);
+	list_add(&cam->devlist, &mmpcam_devices);
+	mutex_unlock(&mmpcam_devices_lock);
+}
+
+static void mmpcam_remove_device(struct mmp_camera *cam)
+{
+	mutex_lock(&mmpcam_devices_lock);
+	list_del(&cam->devlist);
+	mutex_unlock(&mmpcam_devices_lock);
+}
+
+/*
+ * Platform dev remove passes us a platform_device, and there's
+ * no handy unused drvdata to stash a backpointer in.  So just
+ * dig it out of our list.
+ */
+static struct mmp_camera *mmpcam_find_device(struct platform_device *pdev)
+{
+	struct mmp_camera *cam;
+
+	mutex_lock(&mmpcam_devices_lock);
+	list_for_each_entry(cam, &mmpcam_devices, devlist) {
+		if (cam->pdev == pdev) {
+			mutex_unlock(&mmpcam_devices_lock);
+			return cam;
+		}
+	}
+	mutex_unlock(&mmpcam_devices_lock);
+	return NULL;
+}
+
+
+
+
+/*
+ * Power-related registers; this almost certainly belongs
+ * somewhere else.
+ *
+ * ARMADA 610 register manual, sec 7.2.1, p1842.
+ */
+#define CPU_SUBSYS_PMU_BASE	0xd4282800
+#define REG_CCIC_DCGCR		0x28	/* CCIC dyn clock gate ctrl reg */
+#define REG_CCIC_CRCR		0x50	/* CCIC clk reset ctrl reg	*/
+
+/*
+ * Power control.
+ */
+static void mmpcam_power_up_ctlr(struct mmp_camera *cam)
+{
+	iowrite32(0x3f, cam->power_regs + REG_CCIC_DCGCR);
+	iowrite32(0x3805b, cam->power_regs + REG_CCIC_CRCR);
+	mdelay(1);
+}
+
+static void mmpcam_power_up(struct mcam_camera *mcam)
+{
+	struct mmp_camera *cam = mcam_to_cam(mcam);
+	struct mmp_camera_platform_data *pdata;
+/*
+ * Turn on power and clocks to the controller.
+ */
+	mmpcam_power_up_ctlr(cam);
+/*
+ * Provide power to the sensor.
+ */
+	mcam_reg_write(mcam, REG_CLKCTRL, 0x60000002);
+	pdata = cam->pdev->dev.platform_data;
+	gpio_set_value(pdata->sensor_power_gpio, 1);
+	mdelay(5);
+	mcam_reg_clear_bit(mcam, REG_CTRL1, 0x10000000);
+	gpio_set_value(pdata->sensor_reset_gpio, 0); /* reset is active low */
+	mdelay(5);
+	gpio_set_value(pdata->sensor_reset_gpio, 1); /* reset is active low */
+	mdelay(5);
+}
+
+static void mmpcam_power_down(struct mcam_camera *mcam)
+{
+	struct mmp_camera *cam = mcam_to_cam(mcam);
+	struct mmp_camera_platform_data *pdata;
+/*
+ * Turn off clocks and set reset lines
+ */
+	iowrite32(0, cam->power_regs + REG_CCIC_DCGCR);
+	iowrite32(0, cam->power_regs + REG_CCIC_CRCR);
+/*
+ * Shut down the sensor.
+ */
+	pdata = cam->pdev->dev.platform_data;
+	gpio_set_value(pdata->sensor_power_gpio, 0);
+	gpio_set_value(pdata->sensor_reset_gpio, 0);
+}
+
+
+static irqreturn_t mmpcam_irq(int irq, void *data)
+{
+	struct mcam_camera *mcam = data;
+	unsigned int irqs, handled;
+
+	spin_lock(&mcam->dev_lock);
+	irqs = mcam_reg_read(mcam, REG_IRQSTAT);
+	handled = mccic_irq(mcam, irqs);
+	spin_unlock(&mcam->dev_lock);
+	return IRQ_RETVAL(handled);
+}
+
+
+static int mmpcam_probe(struct platform_device *pdev)
+{
+	struct mmp_camera *cam;
+	struct mcam_camera *mcam;
+	struct resource *res;
+	struct mmp_camera_platform_data *pdata;
+	int ret;
+
+	cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+	if (cam == NULL)
+		return -ENOMEM;
+	cam->pdev = pdev;
+	INIT_LIST_HEAD(&cam->devlist);
+
+	mcam = &cam->mcam;
+	mcam->plat_power_up = mmpcam_power_up;
+	mcam->plat_power_down = mmpcam_power_down;
+	mcam->dev = &pdev->dev;
+	mcam->use_smbus = 0;
+	mcam->chip_id = V4L2_IDENT_ARMADA610;
+	mcam->buffer_mode = B_DMA_sg;
+	spin_lock_init(&mcam->dev_lock);
+	/*
+	 * Get our I/O memory.
+	 */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "no iomem resource!\n");
+		ret = -ENODEV;
+		goto out_free;
+	}
+	mcam->regs = ioremap(res->start, resource_size(res));
+	if (mcam->regs == NULL) {
+		dev_err(&pdev->dev, "MMIO ioremap fail\n");
+		ret = -ENODEV;
+		goto out_free;
+	}
+	/*
+	 * Power/clock memory is elsewhere; get it too.  Perhaps this
+	 * should really be managed outside of this driver?
+	 */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "no power resource!\n");
+		ret = -ENODEV;
+		goto out_unmap1;
+	}
+	cam->power_regs = ioremap(res->start, resource_size(res));
+	if (cam->power_regs == NULL) {
+		dev_err(&pdev->dev, "power MMIO ioremap fail\n");
+		ret = -ENODEV;
+		goto out_unmap1;
+	}
+	/*
+	 * Find the i2c adapter.  This assumes, of course, that the
+	 * i2c bus is already up and functioning.
+	 */
+	pdata = pdev->dev.platform_data;
+	mcam->i2c_adapter = platform_get_drvdata(pdata->i2c_device);
+	if (mcam->i2c_adapter == NULL) {
+		ret = -ENODEV;
+		dev_err(&pdev->dev, "No i2c adapter\n");
+		goto out_unmap2;
+	}
+	/*
+	 * Sensor GPIO pins.
+	 */
+	ret = gpio_request(pdata->sensor_power_gpio, "cam-power");
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get sensor power gpio %d",
+				pdata->sensor_power_gpio);
+		goto out_unmap2;
+	}
+	gpio_direction_output(pdata->sensor_power_gpio, 0);
+	ret = gpio_request(pdata->sensor_reset_gpio, "cam-reset");
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get sensor reset gpio %d",
+				pdata->sensor_reset_gpio);
+		goto out_gpio;
+	}
+	gpio_direction_output(pdata->sensor_reset_gpio, 0);
+	/*
+	 * Power the device up and hand it off to the core.
+	 */
+	mmpcam_power_up(mcam);
+	ret = mccic_register(mcam);
+	if (ret)
+		goto out_gpio2;
+	/*
+	 * Finally, set up our IRQ now that the core is ready to
+	 * deal with it.
+	 */
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (res == NULL) {
+		ret = -ENODEV;
+		goto out_unregister;
+	}
+	cam->irq = res->start;
+	ret = request_irq(cam->irq, mmpcam_irq, IRQF_SHARED,
+			"mmp-camera", mcam);
+	if (ret == 0) {
+		mmpcam_add_device(cam);
+		return 0;
+	}
+
+out_unregister:
+	mccic_shutdown(mcam);
+out_gpio2:
+	mmpcam_power_down(mcam);
+	gpio_free(pdata->sensor_reset_gpio);
+out_gpio:
+	gpio_free(pdata->sensor_power_gpio);
+out_unmap2:
+	iounmap(cam->power_regs);
+out_unmap1:
+	iounmap(mcam->regs);
+out_free:
+	kfree(cam);
+	return ret;
+}
+
+
+static int mmpcam_remove(struct mmp_camera *cam)
+{
+	struct mcam_camera *mcam = &cam->mcam;
+	struct mmp_camera_platform_data *pdata;
+
+	mmpcam_remove_device(cam);
+	free_irq(cam->irq, mcam);
+	mccic_shutdown(mcam);
+	mmpcam_power_down(mcam);
+	pdata = cam->pdev->dev.platform_data;
+	gpio_free(pdata->sensor_reset_gpio);
+	gpio_free(pdata->sensor_power_gpio);
+	iounmap(cam->power_regs);
+	iounmap(mcam->regs);
+	kfree(cam);
+	return 0;
+}
+
+static int mmpcam_platform_remove(struct platform_device *pdev)
+{
+	struct mmp_camera *cam = mmpcam_find_device(pdev);
+
+	if (cam == NULL)
+		return -ENODEV;
+	return mmpcam_remove(cam);
+}
+
+/*
+ * Suspend/resume support.
+ */
+#ifdef CONFIG_PM
+
+static int mmpcam_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct mmp_camera *cam = mmpcam_find_device(pdev);
+
+	if (state.event != PM_EVENT_SUSPEND)
+		return 0;
+	mccic_suspend(&cam->mcam);
+	return 0;
+}
+
+static int mmpcam_resume(struct platform_device *pdev)
+{
+	struct mmp_camera *cam = mmpcam_find_device(pdev);
+
+	/*
+	 * Power up unconditionally just in case the core tries to
+	 * touch a register even if nothing was active before; trust
+	 * me, it's better this way.
+	 */
+	mmpcam_power_up_ctlr(cam);
+	return mccic_resume(&cam->mcam);
+}
+
+#endif
+
+
+static struct platform_driver mmpcam_driver = {
+	.probe		= mmpcam_probe,
+	.remove		= mmpcam_platform_remove,
+#ifdef CONFIG_PM
+	.suspend	= mmpcam_suspend,
+	.resume		= mmpcam_resume,
+#endif
+	.driver = {
+		.name	= "mmp-camera",
+		.owner	= THIS_MODULE
+	}
+};
+
+
+static int __init mmpcam_init_module(void)
+{
+	mutex_init(&mmpcam_devices_lock);
+	return platform_driver_register(&mmpcam_driver);
+}
+
+static void __exit mmpcam_exit_module(void)
+{
+	platform_driver_unregister(&mmpcam_driver);
+	/*
+	 * platform_driver_unregister() should have emptied the list
+	 */
+	if (!list_empty(&mmpcam_devices))
+		printk(KERN_ERR "mmp_camera leaving devices behind\n");
+}
+
+module_init(mmpcam_init_module);
+module_exit(mmpcam_exit_module);
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c
new file mode 100644
index 0000000..6d0d2fb
--- /dev/null
+++ b/drivers/media/platform/mem2mem_testdev.c
@@ -0,0 +1,1131 @@
+/*
+ * A virtual v4l2-mem2mem example device.
+ *
+ * This is a virtual device driver for testing mem-to-mem videobuf framework.
+ * It simulates a device that uses memory buffers for both source and
+ * destination, processes the data and issues an "irq" (simulated by a timer).
+ * The device is capable of multi-instance, multi-buffer-per-transaction
+ * operation (via the mem2mem framework).
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-vmalloc.h>
+
+#define MEM2MEM_TEST_MODULE_NAME "mem2mem-testdev"
+
+MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
+MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1.1");
+
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 640
+#define MAX_H 480
+#define DIM_ALIGN_MASK 7 /* 8-byte alignment for line length */
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE	(1 << 0)
+#define MEM2MEM_OUTPUT	(1 << 1)
+
+#define MEM2MEM_NAME		"m2m-testdev"
+
+/* Per queue */
+#define MEM2MEM_DEF_NUM_BUFS	VIDEO_MAX_FRAME
+/* In bytes, per queue */
+#define MEM2MEM_VID_MEM_LIMIT	(16 * 1024 * 1024)
+
+/* Default transaction time in msec */
+#define MEM2MEM_DEF_TRANSTIME	1000
+/* Default number of buffers per transaction */
+#define MEM2MEM_DEF_TRANSLEN	1
+#define MEM2MEM_COLOR_STEP	(0xff >> 4)
+#define MEM2MEM_NUM_TILES	8
+
+/* Flags that indicate processing mode */
+#define MEM2MEM_HFLIP	(1 << 0)
+#define MEM2MEM_VFLIP	(1 << 1)
+
+#define dprintk(dev, fmt, arg...) \
+	v4l2_dbg(1, 1, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+
+void m2mtest_dev_release(struct device *dev)
+{}
+
+static struct platform_device m2mtest_pdev = {
+	.name		= MEM2MEM_NAME,
+	.dev.release	= m2mtest_dev_release,
+};
+
+struct m2mtest_fmt {
+	char	*name;
+	u32	fourcc;
+	int	depth;
+	/* Types the format can be used for */
+	u32	types;
+};
+
+static struct m2mtest_fmt formats[] = {
+	{
+		.name	= "RGB565 (BE)",
+		.fourcc	= V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+		.depth	= 16,
+		/* Both capture and output format */
+		.types	= MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+	},
+	{
+		.name	= "4:2:2, packed, YUYV",
+		.fourcc	= V4L2_PIX_FMT_YUYV,
+		.depth	= 16,
+		/* Output-only format */
+		.types	= MEM2MEM_OUTPUT,
+	},
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+/* Per-queue, driver-specific private data */
+struct m2mtest_q_data {
+	unsigned int		width;
+	unsigned int		height;
+	unsigned int		sizeimage;
+	struct m2mtest_fmt	*fmt;
+};
+
+enum {
+	V4L2_M2M_SRC = 0,
+	V4L2_M2M_DST = 1,
+};
+
+#define V4L2_CID_TRANS_TIME_MSEC	(V4L2_CID_USER_BASE + 0x1000)
+#define V4L2_CID_TRANS_NUM_BUFS		(V4L2_CID_USER_BASE + 0x1001)
+
+static struct m2mtest_fmt *find_format(struct v4l2_format *f)
+{
+	struct m2mtest_fmt *fmt;
+	unsigned int k;
+
+	for (k = 0; k < NUM_FORMATS; k++) {
+		fmt = &formats[k];
+		if (fmt->fourcc == f->fmt.pix.pixelformat)
+			break;
+	}
+
+	if (k == NUM_FORMATS)
+		return NULL;
+
+	return &formats[k];
+}
+
+struct m2mtest_dev {
+	struct v4l2_device	v4l2_dev;
+	struct video_device	*vfd;
+
+	atomic_t		num_inst;
+	struct mutex		dev_mutex;
+	spinlock_t		irqlock;
+
+	struct timer_list	timer;
+
+	struct v4l2_m2m_dev	*m2m_dev;
+};
+
+struct m2mtest_ctx {
+	struct v4l2_fh		fh;
+	struct m2mtest_dev	*dev;
+
+	struct v4l2_ctrl_handler hdl;
+
+	/* Processed buffers in this transaction */
+	u8			num_processed;
+
+	/* Transaction length (i.e. how many buffers per transaction) */
+	u32			translen;
+	/* Transaction time (i.e. simulated processing time) in milliseconds */
+	u32			transtime;
+
+	/* Abort requested by m2m */
+	int			aborting;
+
+	/* Processing mode */
+	int			mode;
+
+	enum v4l2_colorspace	colorspace;
+
+	struct v4l2_m2m_ctx	*m2m_ctx;
+
+	/* Source and destination queue data */
+	struct m2mtest_q_data   q_data[2];
+};
+
+static inline struct m2mtest_ctx *file2ctx(struct file *file)
+{
+	return container_of(file->private_data, struct m2mtest_ctx, fh);
+}
+
+static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx,
+					 enum v4l2_buf_type type)
+{
+	switch (type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+		return &ctx->q_data[V4L2_M2M_SRC];
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+		return &ctx->q_data[V4L2_M2M_DST];
+	default:
+		BUG();
+	}
+	return NULL;
+}
+
+
+static int device_process(struct m2mtest_ctx *ctx,
+			  struct vb2_buffer *in_vb,
+			  struct vb2_buffer *out_vb)
+{
+	struct m2mtest_dev *dev = ctx->dev;
+	struct m2mtest_q_data *q_data;
+	u8 *p_in, *p_out;
+	int x, y, t, w;
+	int tile_w, bytes_left;
+	int width, height, bytesperline;
+
+	q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+	width	= q_data->width;
+	height	= q_data->height;
+	bytesperline	= (q_data->width * q_data->fmt->depth) >> 3;
+
+	p_in = vb2_plane_vaddr(in_vb, 0);
+	p_out = vb2_plane_vaddr(out_vb, 0);
+	if (!p_in || !p_out) {
+		v4l2_err(&dev->v4l2_dev,
+			 "Acquiring kernel pointers to buffers failed\n");
+		return -EFAULT;
+	}
+
+	if (vb2_plane_size(in_vb, 0) > vb2_plane_size(out_vb, 0)) {
+		v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
+		return -EINVAL;
+	}
+
+	tile_w = (width * (q_data[V4L2_M2M_DST].fmt->depth >> 3))
+		/ MEM2MEM_NUM_TILES;
+	bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES;
+	w = 0;
+
+	switch (ctx->mode) {
+	case MEM2MEM_HFLIP | MEM2MEM_VFLIP:
+		p_out += bytesperline * height - bytes_left;
+		for (y = 0; y < height; ++y) {
+			for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+				if (w & 0x1) {
+					for (x = 0; x < tile_w; ++x)
+						*--p_out = *p_in++ +
+							MEM2MEM_COLOR_STEP;
+				} else {
+					for (x = 0; x < tile_w; ++x)
+						*--p_out = *p_in++ -
+							MEM2MEM_COLOR_STEP;
+				}
+				++w;
+			}
+			p_in += bytes_left;
+			p_out -= bytes_left;
+		}
+		break;
+
+	case MEM2MEM_HFLIP:
+		for (y = 0; y < height; ++y) {
+			p_out += MEM2MEM_NUM_TILES * tile_w;
+			for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+				if (w & 0x01) {
+					for (x = 0; x < tile_w; ++x)
+						*--p_out = *p_in++ +
+							MEM2MEM_COLOR_STEP;
+				} else {
+					for (x = 0; x < tile_w; ++x)
+						*--p_out = *p_in++ -
+							MEM2MEM_COLOR_STEP;
+				}
+				++w;
+			}
+			p_in += bytes_left;
+			p_out += bytesperline;
+		}
+		break;
+
+	case MEM2MEM_VFLIP:
+		p_out += bytesperline * (height - 1);
+		for (y = 0; y < height; ++y) {
+			for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+				if (w & 0x1) {
+					for (x = 0; x < tile_w; ++x)
+						*p_out++ = *p_in++ +
+							MEM2MEM_COLOR_STEP;
+				} else {
+					for (x = 0; x < tile_w; ++x)
+						*p_out++ = *p_in++ -
+							MEM2MEM_COLOR_STEP;
+				}
+				++w;
+			}
+			p_in += bytes_left;
+			p_out += bytes_left - 2 * bytesperline;
+		}
+		break;
+
+	default:
+		for (y = 0; y < height; ++y) {
+			for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+				if (w & 0x1) {
+					for (x = 0; x < tile_w; ++x)
+						*p_out++ = *p_in++ +
+							MEM2MEM_COLOR_STEP;
+				} else {
+					for (x = 0; x < tile_w; ++x)
+						*p_out++ = *p_in++ -
+							MEM2MEM_COLOR_STEP;
+				}
+				++w;
+			}
+			p_in += bytes_left;
+			p_out += bytes_left;
+		}
+	}
+
+	return 0;
+}
+
+static void schedule_irq(struct m2mtest_dev *dev, int msec_timeout)
+{
+	dprintk(dev, "Scheduling a simulated irq\n");
+	mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout));
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/**
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int job_ready(void *priv)
+{
+	struct m2mtest_ctx *ctx = priv;
+
+	if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < ctx->translen
+	    || v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < ctx->translen) {
+		dprintk(ctx->dev, "Not enough buffers available\n");
+		return 0;
+	}
+
+	return 1;
+}
+
+static void job_abort(void *priv)
+{
+	struct m2mtest_ctx *ctx = priv;
+
+	/* Will cancel the transaction in the next interrupt handler */
+	ctx->aborting = 1;
+}
+
+static void m2mtest_lock(void *priv)
+{
+	struct m2mtest_ctx *ctx = priv;
+	struct m2mtest_dev *dev = ctx->dev;
+	mutex_lock(&dev->dev_mutex);
+}
+
+static void m2mtest_unlock(void *priv)
+{
+	struct m2mtest_ctx *ctx = priv;
+	struct m2mtest_dev *dev = ctx->dev;
+	mutex_unlock(&dev->dev_mutex);
+}
+
+
+/* device_run() - prepares and starts the device
+ *
+ * This simulates all the immediate preparations required before starting
+ * a device. This will be called by the framework when it decides to schedule
+ * a particular instance.
+ */
+static void device_run(void *priv)
+{
+	struct m2mtest_ctx *ctx = priv;
+	struct m2mtest_dev *dev = ctx->dev;
+	struct vb2_buffer *src_buf, *dst_buf;
+
+	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+
+	device_process(ctx, src_buf, dst_buf);
+
+	/* Run a timer, which simulates a hardware irq  */
+	schedule_irq(dev, ctx->transtime);
+}
+
+static void device_isr(unsigned long priv)
+{
+	struct m2mtest_dev *m2mtest_dev = (struct m2mtest_dev *)priv;
+	struct m2mtest_ctx *curr_ctx;
+	struct vb2_buffer *src_vb, *dst_vb;
+	unsigned long flags;
+
+	curr_ctx = v4l2_m2m_get_curr_priv(m2mtest_dev->m2m_dev);
+
+	if (NULL == curr_ctx) {
+		printk(KERN_ERR
+			"Instance released before the end of transaction\n");
+		return;
+	}
+
+	src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
+	dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+
+	curr_ctx->num_processed++;
+
+	spin_lock_irqsave(&m2mtest_dev->irqlock, flags);
+	v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+	v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+	spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags);
+
+	if (curr_ctx->num_processed == curr_ctx->translen
+	    || curr_ctx->aborting) {
+		dprintk(curr_ctx->dev, "Finishing transaction\n");
+		curr_ctx->num_processed = 0;
+		v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->m2m_ctx);
+	} else {
+		device_run(curr_ctx);
+	}
+}
+
+/*
+ * video ioctls
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+			   struct v4l2_capability *cap)
+{
+	strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
+	strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
+	strlcpy(cap->bus_info, MEM2MEM_NAME, sizeof(cap->bus_info));
+	cap->capabilities = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+	return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+	int i, num;
+	struct m2mtest_fmt *fmt;
+
+	num = 0;
+
+	for (i = 0; i < NUM_FORMATS; ++i) {
+		if (formats[i].types & type) {
+			/* index-th format of type type found ? */
+			if (num == f->index)
+				break;
+			/* Correct type but haven't reached our index yet,
+			 * just increment per-type index */
+			++num;
+		}
+	}
+
+	if (i < NUM_FORMATS) {
+		/* Format found */
+		fmt = &formats[i];
+		strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+		f->pixelformat = fmt->fourcc;
+		return 0;
+	}
+
+	/* Format not found */
+	return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	return enum_fmt(f, MEM2MEM_CAPTURE);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	return enum_fmt(f, MEM2MEM_OUTPUT);
+}
+
+static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
+{
+	struct vb2_queue *vq;
+	struct m2mtest_q_data *q_data;
+
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	if (!vq)
+		return -EINVAL;
+
+	q_data = get_q_data(ctx, f->type);
+
+	f->fmt.pix.width	= q_data->width;
+	f->fmt.pix.height	= q_data->height;
+	f->fmt.pix.field	= V4L2_FIELD_NONE;
+	f->fmt.pix.pixelformat	= q_data->fmt->fourcc;
+	f->fmt.pix.bytesperline	= (q_data->width * q_data->fmt->depth) >> 3;
+	f->fmt.pix.sizeimage	= q_data->sizeimage;
+	f->fmt.pix.colorspace	= ctx->colorspace;
+
+	return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	return vidioc_g_fmt(file2ctx(file), f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	return vidioc_g_fmt(file2ctx(file), f);
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt)
+{
+	enum v4l2_field field;
+
+	field = f->fmt.pix.field;
+
+	if (field == V4L2_FIELD_ANY)
+		field = V4L2_FIELD_NONE;
+	else if (V4L2_FIELD_NONE != field)
+		return -EINVAL;
+
+	/* V4L2 specification suggests the driver corrects the format struct
+	 * if any of the dimensions is unsupported */
+	f->fmt.pix.field = field;
+
+	if (f->fmt.pix.height < MIN_H)
+		f->fmt.pix.height = MIN_H;
+	else if (f->fmt.pix.height > MAX_H)
+		f->fmt.pix.height = MAX_H;
+
+	if (f->fmt.pix.width < MIN_W)
+		f->fmt.pix.width = MIN_W;
+	else if (f->fmt.pix.width > MAX_W)
+		f->fmt.pix.width = MAX_W;
+
+	f->fmt.pix.width &= ~DIM_ALIGN_MASK;
+	f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+	f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+	return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct m2mtest_fmt *fmt;
+	struct m2mtest_ctx *ctx = file2ctx(file);
+
+	fmt = find_format(f);
+	if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
+		v4l2_err(&ctx->dev->v4l2_dev,
+			 "Fourcc format (0x%08x) invalid.\n",
+			 f->fmt.pix.pixelformat);
+		return -EINVAL;
+	}
+	f->fmt.pix.colorspace = ctx->colorspace;
+
+	return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct m2mtest_fmt *fmt;
+	struct m2mtest_ctx *ctx = file2ctx(file);
+
+	fmt = find_format(f);
+	if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
+		v4l2_err(&ctx->dev->v4l2_dev,
+			 "Fourcc format (0x%08x) invalid.\n",
+			 f->fmt.pix.pixelformat);
+		return -EINVAL;
+	}
+	if (!f->fmt.pix.colorspace)
+		f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+
+	return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
+{
+	struct m2mtest_q_data *q_data;
+	struct vb2_queue *vq;
+
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	if (!vq)
+		return -EINVAL;
+
+	q_data = get_q_data(ctx, f->type);
+	if (!q_data)
+		return -EINVAL;
+
+	if (vb2_is_busy(vq)) {
+		v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+		return -EBUSY;
+	}
+
+	q_data->fmt		= find_format(f);
+	q_data->width		= f->fmt.pix.width;
+	q_data->height		= f->fmt.pix.height;
+	q_data->sizeimage	= q_data->width * q_data->height
+				* q_data->fmt->depth >> 3;
+
+	dprintk(ctx->dev,
+		"Setting format for type %d, wxh: %dx%d, fmt: %d\n",
+		f->type, q_data->width, q_data->height, q_data->fmt->fourcc);
+
+	return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	int ret;
+
+	ret = vidioc_try_fmt_vid_cap(file, priv, f);
+	if (ret)
+		return ret;
+
+	return vidioc_s_fmt(file2ctx(file), f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct m2mtest_ctx *ctx = file2ctx(file);
+	int ret;
+
+	ret = vidioc_try_fmt_vid_out(file, priv, f);
+	if (ret)
+		return ret;
+
+	ret = vidioc_s_fmt(file2ctx(file), f);
+	if (!ret)
+		ctx->colorspace = f->fmt.pix.colorspace;
+	return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+			  struct v4l2_requestbuffers *reqbufs)
+{
+	struct m2mtest_ctx *ctx = file2ctx(file);
+
+	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+			   struct v4l2_buffer *buf)
+{
+	struct m2mtest_ctx *ctx = file2ctx(file);
+
+	return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct m2mtest_ctx *ctx = file2ctx(file);
+
+	return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct m2mtest_ctx *ctx = file2ctx(file);
+
+	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_streamon(struct file *file, void *priv,
+			   enum v4l2_buf_type type)
+{
+	struct m2mtest_ctx *ctx = file2ctx(file);
+
+	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv,
+			    enum v4l2_buf_type type)
+{
+	struct m2mtest_ctx *ctx = file2ctx(file);
+
+	return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static int m2mtest_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct m2mtest_ctx *ctx =
+		container_of(ctrl->handler, struct m2mtest_ctx, hdl);
+
+	switch (ctrl->id) {
+	case V4L2_CID_HFLIP:
+		if (ctrl->val)
+			ctx->mode |= MEM2MEM_HFLIP;
+		else
+			ctx->mode &= ~MEM2MEM_HFLIP;
+		break;
+
+	case V4L2_CID_VFLIP:
+		if (ctrl->val)
+			ctx->mode |= MEM2MEM_VFLIP;
+		else
+			ctx->mode &= ~MEM2MEM_VFLIP;
+		break;
+
+	case V4L2_CID_TRANS_TIME_MSEC:
+		ctx->transtime = ctrl->val;
+		break;
+
+	case V4L2_CID_TRANS_NUM_BUFS:
+		ctx->translen = ctrl->val;
+		break;
+
+	default:
+		v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops m2mtest_ctrl_ops = {
+	.s_ctrl = m2mtest_s_ctrl,
+};
+
+
+static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
+	.vidioc_querycap	= vidioc_querycap,
+
+	.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap	= vidioc_g_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap	= vidioc_try_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap	= vidioc_s_fmt_vid_cap,
+
+	.vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+	.vidioc_g_fmt_vid_out	= vidioc_g_fmt_vid_out,
+	.vidioc_try_fmt_vid_out	= vidioc_try_fmt_vid_out,
+	.vidioc_s_fmt_vid_out	= vidioc_s_fmt_vid_out,
+
+	.vidioc_reqbufs		= vidioc_reqbufs,
+	.vidioc_querybuf	= vidioc_querybuf,
+
+	.vidioc_qbuf		= vidioc_qbuf,
+	.vidioc_dqbuf		= vidioc_dqbuf,
+
+	.vidioc_streamon	= vidioc_streamon,
+	.vidioc_streamoff	= vidioc_streamoff,
+	.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+
+/*
+ * Queue operations
+ */
+
+static int m2mtest_queue_setup(struct vb2_queue *vq,
+				const struct v4l2_format *fmt,
+				unsigned int *nbuffers, unsigned int *nplanes,
+				unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct m2mtest_ctx *ctx = vb2_get_drv_priv(vq);
+	struct m2mtest_q_data *q_data;
+	unsigned int size, count = *nbuffers;
+
+	q_data = get_q_data(ctx, vq->type);
+
+	size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
+
+	while (size * count > MEM2MEM_VID_MEM_LIMIT)
+		(count)--;
+
+	*nplanes = 1;
+	*nbuffers = count;
+	sizes[0] = size;
+
+	/*
+	 * videobuf2-vmalloc allocator is context-less so no need to set
+	 * alloc_ctxs array.
+	 */
+
+	dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
+
+	return 0;
+}
+
+static int m2mtest_buf_prepare(struct vb2_buffer *vb)
+{
+	struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct m2mtest_q_data *q_data;
+
+	dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+	q_data = get_q_data(ctx, vb->vb2_queue->type);
+
+	if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+		dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
+				__func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage);
+		return -EINVAL;
+	}
+
+	vb2_set_plane_payload(vb, 0, q_data->sizeimage);
+
+	return 0;
+}
+
+static void m2mtest_buf_queue(struct vb2_buffer *vb)
+{
+	struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static void m2mtest_wait_prepare(struct vb2_queue *q)
+{
+	struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
+	m2mtest_unlock(ctx);
+}
+
+static void m2mtest_wait_finish(struct vb2_queue *q)
+{
+	struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
+	m2mtest_lock(ctx);
+}
+
+static struct vb2_ops m2mtest_qops = {
+	.queue_setup	 = m2mtest_queue_setup,
+	.buf_prepare	 = m2mtest_buf_prepare,
+	.buf_queue	 = m2mtest_buf_queue,
+	.wait_prepare	 = m2mtest_wait_prepare,
+	.wait_finish	 = m2mtest_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+	struct m2mtest_ctx *ctx = priv;
+	int ret;
+
+	memset(src_vq, 0, sizeof(*src_vq));
+	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	src_vq->io_modes = VB2_MMAP;
+	src_vq->drv_priv = ctx;
+	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	src_vq->ops = &m2mtest_qops;
+	src_vq->mem_ops = &vb2_vmalloc_memops;
+
+	ret = vb2_queue_init(src_vq);
+	if (ret)
+		return ret;
+
+	memset(dst_vq, 0, sizeof(*dst_vq));
+	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	dst_vq->io_modes = VB2_MMAP;
+	dst_vq->drv_priv = ctx;
+	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	dst_vq->ops = &m2mtest_qops;
+	dst_vq->mem_ops = &vb2_vmalloc_memops;
+
+	return vb2_queue_init(dst_vq);
+}
+
+static const struct v4l2_ctrl_config m2mtest_ctrl_trans_time_msec = {
+	.ops = &m2mtest_ctrl_ops,
+	.id = V4L2_CID_TRANS_TIME_MSEC,
+	.name = "Transaction Time (msec)",
+	.type = V4L2_CTRL_TYPE_INTEGER,
+	.def = 1001,
+	.min = 1,
+	.max = 10001,
+	.step = 100,
+};
+
+static const struct v4l2_ctrl_config m2mtest_ctrl_trans_num_bufs = {
+	.ops = &m2mtest_ctrl_ops,
+	.id = V4L2_CID_TRANS_NUM_BUFS,
+	.name = "Buffers Per Transaction",
+	.type = V4L2_CTRL_TYPE_INTEGER,
+	.def = 1,
+	.min = 1,
+	.max = MEM2MEM_DEF_NUM_BUFS,
+	.step = 1,
+};
+
+/*
+ * File operations
+ */
+static int m2mtest_open(struct file *file)
+{
+	struct m2mtest_dev *dev = video_drvdata(file);
+	struct m2mtest_ctx *ctx = NULL;
+	struct v4l2_ctrl_handler *hdl;
+	int rc = 0;
+
+	if (mutex_lock_interruptible(&dev->dev_mutex))
+		return -ERESTARTSYS;
+	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+	if (!ctx) {
+		rc = -ENOMEM;
+		goto open_unlock;
+	}
+
+	v4l2_fh_init(&ctx->fh, video_devdata(file));
+	file->private_data = &ctx->fh;
+	ctx->dev = dev;
+	hdl = &ctx->hdl;
+	v4l2_ctrl_handler_init(hdl, 4);
+	v4l2_ctrl_new_std(hdl, &m2mtest_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+	v4l2_ctrl_new_std(hdl, &m2mtest_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+	v4l2_ctrl_new_custom(hdl, &m2mtest_ctrl_trans_time_msec, NULL);
+	v4l2_ctrl_new_custom(hdl, &m2mtest_ctrl_trans_num_bufs, NULL);
+	if (hdl->error) {
+		int err = hdl->error;
+
+		v4l2_ctrl_handler_free(hdl);
+		return err;
+	}
+	ctx->fh.ctrl_handler = hdl;
+	v4l2_ctrl_handler_setup(hdl);
+
+	ctx->q_data[V4L2_M2M_SRC].fmt = &formats[0];
+	ctx->q_data[V4L2_M2M_SRC].width = 640;
+	ctx->q_data[V4L2_M2M_SRC].height = 480;
+	ctx->q_data[V4L2_M2M_SRC].sizeimage =
+		ctx->q_data[V4L2_M2M_SRC].width *
+		ctx->q_data[V4L2_M2M_SRC].height *
+		(ctx->q_data[V4L2_M2M_SRC].fmt->depth >> 3);
+	ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
+	ctx->colorspace = V4L2_COLORSPACE_REC709;
+
+	ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+
+	if (IS_ERR(ctx->m2m_ctx)) {
+		rc = PTR_ERR(ctx->m2m_ctx);
+
+		v4l2_ctrl_handler_free(hdl);
+		kfree(ctx);
+		goto open_unlock;
+	}
+
+	v4l2_fh_add(&ctx->fh);
+	atomic_inc(&dev->num_inst);
+
+	dprintk(dev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+
+open_unlock:
+	mutex_unlock(&dev->dev_mutex);
+	return 0;
+}
+
+static int m2mtest_release(struct file *file)
+{
+	struct m2mtest_dev *dev = video_drvdata(file);
+	struct m2mtest_ctx *ctx = file2ctx(file);
+
+	dprintk(dev, "Releasing instance %p\n", ctx);
+
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	v4l2_ctrl_handler_free(&ctx->hdl);
+	mutex_lock(&dev->dev_mutex);
+	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+	mutex_unlock(&dev->dev_mutex);
+	kfree(ctx);
+
+	atomic_dec(&dev->num_inst);
+
+	return 0;
+}
+
+static unsigned int m2mtest_poll(struct file *file,
+				 struct poll_table_struct *wait)
+{
+	struct m2mtest_ctx *ctx = file2ctx(file);
+
+	return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+}
+
+static int m2mtest_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct m2mtest_dev *dev = video_drvdata(file);
+	struct m2mtest_ctx *ctx = file2ctx(file);
+	int res;
+
+	if (mutex_lock_interruptible(&dev->dev_mutex))
+		return -ERESTARTSYS;
+	res = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+	mutex_unlock(&dev->dev_mutex);
+	return res;
+}
+
+static const struct v4l2_file_operations m2mtest_fops = {
+	.owner		= THIS_MODULE,
+	.open		= m2mtest_open,
+	.release	= m2mtest_release,
+	.poll		= m2mtest_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= m2mtest_mmap,
+};
+
+static struct video_device m2mtest_videodev = {
+	.name		= MEM2MEM_NAME,
+	.fops		= &m2mtest_fops,
+	.ioctl_ops	= &m2mtest_ioctl_ops,
+	.minor		= -1,
+	.release	= video_device_release,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+	.device_run	= device_run,
+	.job_ready	= job_ready,
+	.job_abort	= job_abort,
+	.lock		= m2mtest_lock,
+	.unlock		= m2mtest_unlock,
+};
+
+static int m2mtest_probe(struct platform_device *pdev)
+{
+	struct m2mtest_dev *dev;
+	struct video_device *vfd;
+	int ret;
+
+	dev = kzalloc(sizeof *dev, GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->irqlock);
+
+	ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+	if (ret)
+		goto free_dev;
+
+	atomic_set(&dev->num_inst, 0);
+	mutex_init(&dev->dev_mutex);
+
+	vfd = video_device_alloc();
+	if (!vfd) {
+		v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+		ret = -ENOMEM;
+		goto unreg_dev;
+	}
+
+	*vfd = m2mtest_videodev;
+	vfd->lock = &dev->dev_mutex;
+
+	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+	if (ret) {
+		v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+		goto rel_vdev;
+	}
+
+	video_set_drvdata(vfd, dev);
+	snprintf(vfd->name, sizeof(vfd->name), "%s", m2mtest_videodev.name);
+	dev->vfd = vfd;
+	v4l2_info(&dev->v4l2_dev, MEM2MEM_TEST_MODULE_NAME
+			"Device registered as /dev/video%d\n", vfd->num);
+
+	setup_timer(&dev->timer, device_isr, (long)dev);
+	platform_set_drvdata(pdev, dev);
+
+	dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+	if (IS_ERR(dev->m2m_dev)) {
+		v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+		ret = PTR_ERR(dev->m2m_dev);
+		goto err_m2m;
+	}
+
+	return 0;
+
+	v4l2_m2m_release(dev->m2m_dev);
+err_m2m:
+	video_unregister_device(dev->vfd);
+rel_vdev:
+	video_device_release(vfd);
+unreg_dev:
+	v4l2_device_unregister(&dev->v4l2_dev);
+free_dev:
+	kfree(dev);
+
+	return ret;
+}
+
+static int m2mtest_remove(struct platform_device *pdev)
+{
+	struct m2mtest_dev *dev =
+		(struct m2mtest_dev *)platform_get_drvdata(pdev);
+
+	v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
+	v4l2_m2m_release(dev->m2m_dev);
+	del_timer_sync(&dev->timer);
+	video_unregister_device(dev->vfd);
+	v4l2_device_unregister(&dev->v4l2_dev);
+	kfree(dev);
+
+	return 0;
+}
+
+static struct platform_driver m2mtest_pdrv = {
+	.probe		= m2mtest_probe,
+	.remove		= m2mtest_remove,
+	.driver		= {
+		.name	= MEM2MEM_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static void __exit m2mtest_exit(void)
+{
+	platform_driver_unregister(&m2mtest_pdrv);
+	platform_device_unregister(&m2mtest_pdev);
+}
+
+static int __init m2mtest_init(void)
+{
+	int ret;
+
+	ret = platform_device_register(&m2mtest_pdev);
+	if (ret)
+		return ret;
+
+	ret = platform_driver_register(&m2mtest_pdrv);
+	if (ret)
+		platform_device_unregister(&m2mtest_pdev);
+
+	return 0;
+}
+
+module_init(m2mtest_init);
+module_exit(m2mtest_exit);
+
diff --git a/drivers/media/platform/mx1_camera.c b/drivers/media/platform/mx1_camera.c
new file mode 100644
index 0000000..d2e6f82
--- /dev/null
+++ b/drivers/media/platform/mx1_camera.c
@@ -0,0 +1,889 @@
+/*
+ * V4L2 Driver for i.MXL/i.MXL camera (CSI) host
+ *
+ * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Copyright (C) 2009, Darius Augulis <augulis.darius@gmail.com>
+ *
+ * Based on PXA SoC camera driver
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/videodev2.h>
+
+#include <media/soc_camera.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf-dma-contig.h>
+#include <media/soc_mediabus.h>
+
+#include <asm/dma.h>
+#include <asm/fiq.h>
+#include <mach/dma-mx1-mx2.h>
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+#include <mach/mx1_camera.h>
+
+/*
+ * CSI registers
+ */
+#define CSICR1		0x00			/* CSI Control Register 1 */
+#define CSISR		0x08			/* CSI Status Register */
+#define CSIRXR		0x10			/* CSI RxFIFO Register */
+
+#define CSICR1_RXFF_LEVEL(x)	(((x) & 0x3) << 19)
+#define CSICR1_SOF_POL		(1 << 17)
+#define CSICR1_SOF_INTEN	(1 << 16)
+#define CSICR1_MCLKDIV(x)	(((x) & 0xf) << 12)
+#define CSICR1_MCLKEN		(1 << 9)
+#define CSICR1_FCC		(1 << 8)
+#define CSICR1_BIG_ENDIAN	(1 << 7)
+#define CSICR1_CLR_RXFIFO	(1 << 5)
+#define CSICR1_GCLK_MODE	(1 << 4)
+#define CSICR1_DATA_POL		(1 << 2)
+#define CSICR1_REDGE		(1 << 1)
+#define CSICR1_EN		(1 << 0)
+
+#define CSISR_SFF_OR_INT	(1 << 25)
+#define CSISR_RFF_OR_INT	(1 << 24)
+#define CSISR_STATFF_INT	(1 << 21)
+#define CSISR_RXFF_INT		(1 << 18)
+#define CSISR_SOF_INT		(1 << 16)
+#define CSISR_DRDY		(1 << 0)
+
+#define DRIVER_VERSION "0.0.2"
+#define DRIVER_NAME "mx1-camera"
+
+#define CSI_IRQ_MASK	(CSISR_SFF_OR_INT | CSISR_RFF_OR_INT | \
+			CSISR_STATFF_INT | CSISR_RXFF_INT | CSISR_SOF_INT)
+
+#define CSI_BUS_FLAGS	(V4L2_MBUS_MASTER | V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+			V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+			V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING | \
+			V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_LOW)
+
+#define MAX_VIDEO_MEM 16	/* Video memory limit in megabytes */
+
+/*
+ * Structures
+ */
+
+/* buffer for one video frame */
+struct mx1_buffer {
+	/* common v4l buffer stuff -- must be first */
+	struct videobuf_buffer		vb;
+	enum v4l2_mbus_pixelcode	code;
+	int				inwork;
+};
+
+/*
+ * i.MX1/i.MXL is only supposed to handle one camera on its Camera Sensor
+ * Interface. If anyone ever builds hardware to enable more than
+ * one camera, they will have to modify this driver too
+ */
+struct mx1_camera_dev {
+	struct soc_camera_host		soc_host;
+	struct soc_camera_device	*icd;
+	struct mx1_camera_pdata		*pdata;
+	struct mx1_buffer		*active;
+	struct resource			*res;
+	struct clk			*clk;
+	struct list_head		capture;
+
+	void __iomem			*base;
+	int				dma_chan;
+	unsigned int			irq;
+	unsigned long			mclk;
+
+	spinlock_t			lock;
+};
+
+/*
+ *  Videobuf operations
+ */
+static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
+			      unsigned int *size)
+{
+	struct soc_camera_device *icd = vq->priv_data;
+
+	*size = icd->sizeimage;
+
+	if (!*count)
+		*count = 32;
+
+	if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
+		*count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
+
+	dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
+
+	return 0;
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct mx1_buffer *buf)
+{
+	struct soc_camera_device *icd = vq->priv_data;
+	struct videobuf_buffer *vb = &buf->vb;
+
+	BUG_ON(in_interrupt());
+
+	dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+		vb, vb->baddr, vb->bsize);
+
+	/*
+	 * This waits until this buffer is out of danger, i.e., until it is no
+	 * longer in STATE_QUEUED or STATE_ACTIVE
+	 */
+	videobuf_waiton(vq, vb, 0, 0);
+	videobuf_dma_contig_free(vq, vb);
+
+	vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int mx1_videobuf_prepare(struct videobuf_queue *vq,
+		struct videobuf_buffer *vb, enum v4l2_field field)
+{
+	struct soc_camera_device *icd = vq->priv_data;
+	struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
+	int ret;
+
+	dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+		vb, vb->baddr, vb->bsize);
+
+	/* Added list head initialization on alloc */
+	WARN_ON(!list_empty(&vb->queue));
+
+	BUG_ON(NULL == icd->current_fmt);
+
+	/*
+	 * I think, in buf_prepare you only have to protect global data,
+	 * the actual buffer is yours
+	 */
+	buf->inwork = 1;
+
+	if (buf->code	!= icd->current_fmt->code ||
+	    vb->width	!= icd->user_width ||
+	    vb->height	!= icd->user_height ||
+	    vb->field	!= field) {
+		buf->code	= icd->current_fmt->code;
+		vb->width	= icd->user_width;
+		vb->height	= icd->user_height;
+		vb->field	= field;
+		vb->state	= VIDEOBUF_NEEDS_INIT;
+	}
+
+	vb->size = icd->sizeimage;
+	if (0 != vb->baddr && vb->bsize < vb->size) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (vb->state == VIDEOBUF_NEEDS_INIT) {
+		ret = videobuf_iolock(vq, vb, NULL);
+		if (ret)
+			goto fail;
+
+		vb->state = VIDEOBUF_PREPARED;
+	}
+
+	buf->inwork = 0;
+
+	return 0;
+
+fail:
+	free_buffer(vq, buf);
+out:
+	buf->inwork = 0;
+	return ret;
+}
+
+static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev)
+{
+	struct videobuf_buffer *vbuf = &pcdev->active->vb;
+	struct device *dev = pcdev->icd->parent;
+	int ret;
+
+	if (unlikely(!pcdev->active)) {
+		dev_err(dev, "DMA End IRQ with no active buffer\n");
+		return -EFAULT;
+	}
+
+	/* setup sg list for future DMA */
+	ret = imx_dma_setup_single(pcdev->dma_chan,
+		videobuf_to_dma_contig(vbuf),
+		vbuf->size, pcdev->res->start +
+		CSIRXR, DMA_MODE_READ);
+	if (unlikely(ret))
+		dev_err(dev, "Failed to setup DMA sg list\n");
+
+	return ret;
+}
+
+/* Called under spinlock_irqsave(&pcdev->lock, ...) */
+static void mx1_videobuf_queue(struct videobuf_queue *vq,
+						struct videobuf_buffer *vb)
+{
+	struct soc_camera_device *icd = vq->priv_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx1_camera_dev *pcdev = ici->priv;
+	struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
+
+	dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+		vb, vb->baddr, vb->bsize);
+
+	list_add_tail(&vb->queue, &pcdev->capture);
+
+	vb->state = VIDEOBUF_ACTIVE;
+
+	if (!pcdev->active) {
+		pcdev->active = buf;
+
+		/* setup sg list for future DMA */
+		if (!mx1_camera_setup_dma(pcdev)) {
+			unsigned int temp;
+			/* enable SOF irq */
+			temp = __raw_readl(pcdev->base + CSICR1) |
+							CSICR1_SOF_INTEN;
+			__raw_writel(temp, pcdev->base + CSICR1);
+		}
+	}
+}
+
+static void mx1_videobuf_release(struct videobuf_queue *vq,
+				 struct videobuf_buffer *vb)
+{
+	struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
+#ifdef DEBUG
+	struct soc_camera_device *icd = vq->priv_data;
+	struct device *dev = icd->parent;
+
+	dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+		vb, vb->baddr, vb->bsize);
+
+	switch (vb->state) {
+	case VIDEOBUF_ACTIVE:
+		dev_dbg(dev, "%s (active)\n", __func__);
+		break;
+	case VIDEOBUF_QUEUED:
+		dev_dbg(dev, "%s (queued)\n", __func__);
+		break;
+	case VIDEOBUF_PREPARED:
+		dev_dbg(dev, "%s (prepared)\n", __func__);
+		break;
+	default:
+		dev_dbg(dev, "%s (unknown)\n", __func__);
+		break;
+	}
+#endif
+
+	free_buffer(vq, buf);
+}
+
+static void mx1_camera_wakeup(struct mx1_camera_dev *pcdev,
+			      struct videobuf_buffer *vb,
+			      struct mx1_buffer *buf)
+{
+	/* _init is used to debug races, see comment in mx1_camera_reqbufs() */
+	list_del_init(&vb->queue);
+	vb->state = VIDEOBUF_DONE;
+	do_gettimeofday(&vb->ts);
+	vb->field_count++;
+	wake_up(&vb->done);
+
+	if (list_empty(&pcdev->capture)) {
+		pcdev->active = NULL;
+		return;
+	}
+
+	pcdev->active = list_entry(pcdev->capture.next,
+				   struct mx1_buffer, vb.queue);
+
+	/* setup sg list for future DMA */
+	if (likely(!mx1_camera_setup_dma(pcdev))) {
+		unsigned int temp;
+
+		/* enable SOF irq */
+		temp = __raw_readl(pcdev->base + CSICR1) | CSICR1_SOF_INTEN;
+		__raw_writel(temp, pcdev->base + CSICR1);
+	}
+}
+
+static void mx1_camera_dma_irq(int channel, void *data)
+{
+	struct mx1_camera_dev *pcdev = data;
+	struct device *dev = pcdev->icd->parent;
+	struct mx1_buffer *buf;
+	struct videobuf_buffer *vb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pcdev->lock, flags);
+
+	imx_dma_disable(channel);
+
+	if (unlikely(!pcdev->active)) {
+		dev_err(dev, "DMA End IRQ with no active buffer\n");
+		goto out;
+	}
+
+	vb = &pcdev->active->vb;
+	buf = container_of(vb, struct mx1_buffer, vb);
+	WARN_ON(buf->inwork || list_empty(&vb->queue));
+	dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+		vb, vb->baddr, vb->bsize);
+
+	mx1_camera_wakeup(pcdev, vb, buf);
+out:
+	spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static struct videobuf_queue_ops mx1_videobuf_ops = {
+	.buf_setup	= mx1_videobuf_setup,
+	.buf_prepare	= mx1_videobuf_prepare,
+	.buf_queue	= mx1_videobuf_queue,
+	.buf_release	= mx1_videobuf_release,
+};
+
+static void mx1_camera_init_videobuf(struct videobuf_queue *q,
+				     struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx1_camera_dev *pcdev = ici->priv;
+
+	videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, icd->parent,
+				&pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+				V4L2_FIELD_NONE,
+				sizeof(struct mx1_buffer), icd, &icd->video_lock);
+}
+
+static int mclk_get_divisor(struct mx1_camera_dev *pcdev)
+{
+	unsigned int mclk = pcdev->mclk;
+	unsigned long div;
+	unsigned long lcdclk;
+
+	lcdclk = clk_get_rate(pcdev->clk);
+
+	/*
+	 * We verify platform_mclk_10khz != 0, so if anyone breaks it, here
+	 * they get a nice Oops
+	 */
+	div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
+
+	dev_dbg(pcdev->icd->parent,
+		"System clock %lukHz, target freq %dkHz, divisor %lu\n",
+		lcdclk / 1000, mclk / 1000, div);
+
+	return div;
+}
+
+static void mx1_camera_activate(struct mx1_camera_dev *pcdev)
+{
+	unsigned int csicr1 = CSICR1_EN;
+
+	dev_dbg(pcdev->icd->parent, "Activate device\n");
+
+	clk_enable(pcdev->clk);
+
+	/* enable CSI before doing anything else */
+	__raw_writel(csicr1, pcdev->base + CSICR1);
+
+	csicr1 |= CSICR1_MCLKEN | CSICR1_FCC | CSICR1_GCLK_MODE;
+	csicr1 |= CSICR1_MCLKDIV(mclk_get_divisor(pcdev));
+	csicr1 |= CSICR1_RXFF_LEVEL(2); /* 16 words */
+
+	__raw_writel(csicr1, pcdev->base + CSICR1);
+}
+
+static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)
+{
+	dev_dbg(pcdev->icd->parent, "Deactivate device\n");
+
+	/* Disable all CSI interface */
+	__raw_writel(0x00, pcdev->base + CSICR1);
+
+	clk_disable(pcdev->clk);
+}
+
+/*
+ * The following two functions absolutely depend on the fact, that
+ * there can be only one camera on i.MX1/i.MXL camera sensor interface
+ */
+static int mx1_camera_add_device(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx1_camera_dev *pcdev = ici->priv;
+
+	if (pcdev->icd)
+		return -EBUSY;
+
+	dev_info(icd->parent, "MX1 Camera driver attached to camera %d\n",
+		 icd->devnum);
+
+	mx1_camera_activate(pcdev);
+
+	pcdev->icd = icd;
+
+	return 0;
+}
+
+static void mx1_camera_remove_device(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx1_camera_dev *pcdev = ici->priv;
+	unsigned int csicr1;
+
+	BUG_ON(icd != pcdev->icd);
+
+	/* disable interrupts */
+	csicr1 = __raw_readl(pcdev->base + CSICR1) & ~CSI_IRQ_MASK;
+	__raw_writel(csicr1, pcdev->base + CSICR1);
+
+	/* Stop DMA engine */
+	imx_dma_disable(pcdev->dma_chan);
+
+	dev_info(icd->parent, "MX1 Camera driver detached from camera %d\n",
+		 icd->devnum);
+
+	mx1_camera_deactivate(pcdev);
+
+	pcdev->icd = NULL;
+}
+
+static int mx1_camera_set_crop(struct soc_camera_device *icd,
+			       struct v4l2_crop *a)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+	return v4l2_subdev_call(sd, video, s_crop, a);
+}
+
+static int mx1_camera_set_bus_param(struct soc_camera_device *icd)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx1_camera_dev *pcdev = ici->priv;
+	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+	unsigned long common_flags;
+	unsigned int csicr1;
+	int ret;
+
+	/* MX1 supports only 8bit buswidth */
+	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+	if (!ret) {
+		common_flags = soc_mbus_config_compatible(&cfg, CSI_BUS_FLAGS);
+		if (!common_flags) {
+			dev_warn(icd->parent,
+				 "Flags incompatible: camera 0x%x, host 0x%x\n",
+				 cfg.flags, CSI_BUS_FLAGS);
+			return -EINVAL;
+		}
+	} else if (ret != -ENOIOCTLCMD) {
+		return ret;
+	} else {
+		common_flags = CSI_BUS_FLAGS;
+	}
+
+	/* Make choises, based on platform choice */
+	if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+		(common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
+			if (!pcdev->pdata ||
+			     pcdev->pdata->flags & MX1_CAMERA_VSYNC_HIGH)
+				common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
+			else
+				common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+	}
+
+	if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+		(common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
+			if (!pcdev->pdata ||
+			     pcdev->pdata->flags & MX1_CAMERA_PCLK_RISING)
+				common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
+			else
+				common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
+	}
+
+	if ((common_flags & V4L2_MBUS_DATA_ACTIVE_HIGH) &&
+		(common_flags & V4L2_MBUS_DATA_ACTIVE_LOW)) {
+			if (!pcdev->pdata ||
+			     pcdev->pdata->flags & MX1_CAMERA_DATA_HIGH)
+				common_flags &= ~V4L2_MBUS_DATA_ACTIVE_LOW;
+			else
+				common_flags &= ~V4L2_MBUS_DATA_ACTIVE_HIGH;
+	}
+
+	cfg.flags = common_flags;
+	ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+	if (ret < 0 && ret != -ENOIOCTLCMD) {
+		dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
+			common_flags, ret);
+		return ret;
+	}
+
+	csicr1 = __raw_readl(pcdev->base + CSICR1);
+
+	if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+		csicr1 |= CSICR1_REDGE;
+	if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+		csicr1 |= CSICR1_SOF_POL;
+	if (common_flags & V4L2_MBUS_DATA_ACTIVE_LOW)
+		csicr1 |= CSICR1_DATA_POL;
+
+	__raw_writel(csicr1, pcdev->base + CSICR1);
+
+	return 0;
+}
+
+static int mx1_camera_set_fmt(struct soc_camera_device *icd,
+			      struct v4l2_format *f)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	const struct soc_camera_format_xlate *xlate;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_mbus_framefmt mf;
+	int ret, buswidth;
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+	if (!xlate) {
+		dev_warn(icd->parent, "Format %x not found\n",
+			 pix->pixelformat);
+		return -EINVAL;
+	}
+
+	buswidth = xlate->host_fmt->bits_per_sample;
+	if (buswidth > 8) {
+		dev_warn(icd->parent,
+			 "bits-per-sample %d for format %x unsupported\n",
+			 buswidth, pix->pixelformat);
+		return -EINVAL;
+	}
+
+	mf.width	= pix->width;
+	mf.height	= pix->height;
+	mf.field	= pix->field;
+	mf.colorspace	= pix->colorspace;
+	mf.code		= xlate->code;
+
+	ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+	if (ret < 0)
+		return ret;
+
+	if (mf.code != xlate->code)
+		return -EINVAL;
+
+	pix->width		= mf.width;
+	pix->height		= mf.height;
+	pix->field		= mf.field;
+	pix->colorspace		= mf.colorspace;
+	icd->current_fmt	= xlate;
+
+	return ret;
+}
+
+static int mx1_camera_try_fmt(struct soc_camera_device *icd,
+			      struct v4l2_format *f)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	const struct soc_camera_format_xlate *xlate;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_mbus_framefmt mf;
+	int ret;
+	/* TODO: limit to mx1 hardware capabilities */
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+	if (!xlate) {
+		dev_warn(icd->parent, "Format %x not found\n",
+			 pix->pixelformat);
+		return -EINVAL;
+	}
+
+	mf.width	= pix->width;
+	mf.height	= pix->height;
+	mf.field	= pix->field;
+	mf.colorspace	= pix->colorspace;
+	mf.code		= xlate->code;
+
+	/* limit to sensor capabilities */
+	ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+	if (ret < 0)
+		return ret;
+
+	pix->width	= mf.width;
+	pix->height	= mf.height;
+	pix->field	= mf.field;
+	pix->colorspace	= mf.colorspace;
+
+	return 0;
+}
+
+static int mx1_camera_reqbufs(struct soc_camera_device *icd,
+			      struct v4l2_requestbuffers *p)
+{
+	int i;
+
+	/*
+	 * This is for locking debugging only. I removed spinlocks and now I
+	 * check whether .prepare is ever called on a linked buffer, or whether
+	 * a dma IRQ can occur for an in-work or unlinked buffer. Until now
+	 * it hadn't triggered
+	 */
+	for (i = 0; i < p->count; i++) {
+		struct mx1_buffer *buf = container_of(icd->vb_vidq.bufs[i],
+						      struct mx1_buffer, vb);
+		buf->inwork = 0;
+		INIT_LIST_HEAD(&buf->vb.queue);
+	}
+
+	return 0;
+}
+
+static unsigned int mx1_camera_poll(struct file *file, poll_table *pt)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct mx1_buffer *buf;
+
+	buf = list_entry(icd->vb_vidq.stream.next, struct mx1_buffer,
+			 vb.stream);
+
+	poll_wait(file, &buf->vb.done, pt);
+
+	if (buf->vb.state == VIDEOBUF_DONE ||
+	    buf->vb.state == VIDEOBUF_ERROR)
+		return POLLIN | POLLRDNORM;
+
+	return 0;
+}
+
+static int mx1_camera_querycap(struct soc_camera_host *ici,
+			       struct v4l2_capability *cap)
+{
+	/* cap->name is set by the friendly caller:-> */
+	strlcpy(cap->card, "i.MX1/i.MXL Camera", sizeof(cap->card));
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+	return 0;
+}
+
+static struct soc_camera_host_ops mx1_soc_camera_host_ops = {
+	.owner		= THIS_MODULE,
+	.add		= mx1_camera_add_device,
+	.remove		= mx1_camera_remove_device,
+	.set_bus_param	= mx1_camera_set_bus_param,
+	.set_crop	= mx1_camera_set_crop,
+	.set_fmt	= mx1_camera_set_fmt,
+	.try_fmt	= mx1_camera_try_fmt,
+	.init_videobuf	= mx1_camera_init_videobuf,
+	.reqbufs	= mx1_camera_reqbufs,
+	.poll		= mx1_camera_poll,
+	.querycap	= mx1_camera_querycap,
+};
+
+static struct fiq_handler fh = {
+	.name		= "csi_sof"
+};
+
+static int __init mx1_camera_probe(struct platform_device *pdev)
+{
+	struct mx1_camera_dev *pcdev;
+	struct resource *res;
+	struct pt_regs regs;
+	struct clk *clk;
+	void __iomem *base;
+	unsigned int irq;
+	int err = 0;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+	if (!res || (int)irq <= 0) {
+		err = -ENODEV;
+		goto exit;
+	}
+
+	clk = clk_get(&pdev->dev, "csi_clk");
+	if (IS_ERR(clk)) {
+		err = PTR_ERR(clk);
+		goto exit;
+	}
+
+	pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL);
+	if (!pcdev) {
+		dev_err(&pdev->dev, "Could not allocate pcdev\n");
+		err = -ENOMEM;
+		goto exit_put_clk;
+	}
+
+	pcdev->res = res;
+	pcdev->clk = clk;
+
+	pcdev->pdata = pdev->dev.platform_data;
+
+	if (pcdev->pdata)
+		pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
+
+	if (!pcdev->mclk) {
+		dev_warn(&pdev->dev,
+			 "mclk_10khz == 0! Please, fix your platform data. "
+			 "Using default 20MHz\n");
+		pcdev->mclk = 20000000;
+	}
+
+	INIT_LIST_HEAD(&pcdev->capture);
+	spin_lock_init(&pcdev->lock);
+
+	/*
+	 * Request the regions.
+	 */
+	if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) {
+		err = -EBUSY;
+		goto exit_kfree;
+	}
+
+	base = ioremap(res->start, resource_size(res));
+	if (!base) {
+		err = -ENOMEM;
+		goto exit_release;
+	}
+	pcdev->irq = irq;
+	pcdev->base = base;
+
+	/* request dma */
+	pcdev->dma_chan = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_HIGH);
+	if (pcdev->dma_chan < 0) {
+		dev_err(&pdev->dev, "Can't request DMA for MX1 CSI\n");
+		err = -EBUSY;
+		goto exit_iounmap;
+	}
+	dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chan);
+
+	imx_dma_setup_handlers(pcdev->dma_chan, mx1_camera_dma_irq, NULL,
+			       pcdev);
+
+	imx_dma_config_channel(pcdev->dma_chan, IMX_DMA_TYPE_FIFO,
+			       IMX_DMA_MEMSIZE_32, MX1_DMA_REQ_CSI_R, 0);
+	/* burst length : 16 words = 64 bytes */
+	imx_dma_config_burstlen(pcdev->dma_chan, 0);
+
+	/* request irq */
+	err = claim_fiq(&fh);
+	if (err) {
+		dev_err(&pdev->dev, "Camera interrupt register failed \n");
+		goto exit_free_dma;
+	}
+
+	set_fiq_handler(&mx1_camera_sof_fiq_start, &mx1_camera_sof_fiq_end -
+						   &mx1_camera_sof_fiq_start);
+
+	regs.ARM_r8 = (long)MX1_DMA_DIMR;
+	regs.ARM_r9 = (long)MX1_DMA_CCR(pcdev->dma_chan);
+	regs.ARM_r10 = (long)pcdev->base + CSICR1;
+	regs.ARM_fp = (long)pcdev->base + CSISR;
+	regs.ARM_sp = 1 << pcdev->dma_chan;
+	set_fiq_regs(&regs);
+
+	mxc_set_irq_fiq(irq, 1);
+	enable_fiq(irq);
+
+	pcdev->soc_host.drv_name	= DRIVER_NAME;
+	pcdev->soc_host.ops		= &mx1_soc_camera_host_ops;
+	pcdev->soc_host.priv		= pcdev;
+	pcdev->soc_host.v4l2_dev.dev	= &pdev->dev;
+	pcdev->soc_host.nr		= pdev->id;
+	err = soc_camera_host_register(&pcdev->soc_host);
+	if (err)
+		goto exit_free_irq;
+
+	dev_info(&pdev->dev, "MX1 Camera driver loaded\n");
+
+	return 0;
+
+exit_free_irq:
+	disable_fiq(irq);
+	mxc_set_irq_fiq(irq, 0);
+	release_fiq(&fh);
+exit_free_dma:
+	imx_dma_free(pcdev->dma_chan);
+exit_iounmap:
+	iounmap(base);
+exit_release:
+	release_mem_region(res->start, resource_size(res));
+exit_kfree:
+	kfree(pcdev);
+exit_put_clk:
+	clk_put(clk);
+exit:
+	return err;
+}
+
+static int __exit mx1_camera_remove(struct platform_device *pdev)
+{
+	struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+	struct mx1_camera_dev *pcdev = container_of(soc_host,
+					struct mx1_camera_dev, soc_host);
+	struct resource *res;
+
+	imx_dma_free(pcdev->dma_chan);
+	disable_fiq(pcdev->irq);
+	mxc_set_irq_fiq(pcdev->irq, 0);
+	release_fiq(&fh);
+
+	clk_put(pcdev->clk);
+
+	soc_camera_host_unregister(soc_host);
+
+	iounmap(pcdev->base);
+
+	res = pcdev->res;
+	release_mem_region(res->start, resource_size(res));
+
+	kfree(pcdev);
+
+	dev_info(&pdev->dev, "MX1 Camera driver unloaded\n");
+
+	return 0;
+}
+
+static struct platform_driver mx1_camera_driver = {
+	.driver 	= {
+		.name	= DRIVER_NAME,
+	},
+	.remove		= __exit_p(mx1_camera_remove),
+};
+
+static int __init mx1_camera_init(void)
+{
+	return platform_driver_probe(&mx1_camera_driver, mx1_camera_probe);
+}
+
+static void __exit mx1_camera_exit(void)
+{
+	return platform_driver_unregister(&mx1_camera_driver);
+}
+
+module_init(mx1_camera_init);
+module_exit(mx1_camera_exit);
+
+MODULE_DESCRIPTION("i.MX1/i.MXL SoC Camera Host driver");
+MODULE_AUTHOR("Paulius Zaleckas <paulius.zaleckas@teltonika.lt>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/platform/mx2_camera.c b/drivers/media/platform/mx2_camera.c
new file mode 100644
index 0000000..637bde8
--- /dev/null
+++ b/drivers/media/platform/mx2_camera.c
@@ -0,0 +1,1869 @@
+/*
+ * V4L2 Driver for i.MX27/i.MX25 camera host
+ *
+ * Copyright (C) 2008, Sascha Hauer, Pengutronix
+ * Copyright (C) 2010, Baruch Siach, Orex Computed Radiography
+ * Copyright (C) 2012, Javier Martin, Vista Silicon S.L.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/gcd.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+
+#include <linux/videodev2.h>
+
+#include <mach/mx2_cam.h>
+#include <mach/hardware.h>
+
+#include <asm/dma.h>
+
+#define MX2_CAM_DRV_NAME "mx2-camera"
+#define MX2_CAM_VERSION "0.0.6"
+#define MX2_CAM_DRIVER_DESCRIPTION "i.MX2x_Camera"
+
+/* reset values */
+#define CSICR1_RESET_VAL	0x40000800
+#define CSICR2_RESET_VAL	0x0
+#define CSICR3_RESET_VAL	0x0
+
+/* csi control reg 1 */
+#define CSICR1_SWAP16_EN	(1 << 31)
+#define CSICR1_EXT_VSYNC	(1 << 30)
+#define CSICR1_EOF_INTEN	(1 << 29)
+#define CSICR1_PRP_IF_EN	(1 << 28)
+#define CSICR1_CCIR_MODE	(1 << 27)
+#define CSICR1_COF_INTEN	(1 << 26)
+#define CSICR1_SF_OR_INTEN	(1 << 25)
+#define CSICR1_RF_OR_INTEN	(1 << 24)
+#define CSICR1_STATFF_LEVEL	(3 << 22)
+#define CSICR1_STATFF_INTEN	(1 << 21)
+#define CSICR1_RXFF_LEVEL(l)	(((l) & 3) << 19)	/* MX27 */
+#define CSICR1_FB2_DMA_INTEN	(1 << 20)		/* MX25 */
+#define CSICR1_FB1_DMA_INTEN	(1 << 19)		/* MX25 */
+#define CSICR1_RXFF_INTEN	(1 << 18)
+#define CSICR1_SOF_POL		(1 << 17)
+#define CSICR1_SOF_INTEN	(1 << 16)
+#define CSICR1_MCLKDIV(d)	(((d) & 0xF) << 12)
+#define CSICR1_HSYNC_POL	(1 << 11)
+#define CSICR1_CCIR_EN		(1 << 10)
+#define CSICR1_MCLKEN		(1 << 9)
+#define CSICR1_FCC		(1 << 8)
+#define CSICR1_PACK_DIR		(1 << 7)
+#define CSICR1_CLR_STATFIFO	(1 << 6)
+#define CSICR1_CLR_RXFIFO	(1 << 5)
+#define CSICR1_GCLK_MODE	(1 << 4)
+#define CSICR1_INV_DATA		(1 << 3)
+#define CSICR1_INV_PCLK		(1 << 2)
+#define CSICR1_REDGE		(1 << 1)
+#define CSICR1_FMT_MASK		(CSICR1_PACK_DIR | CSICR1_SWAP16_EN)
+
+#define SHIFT_STATFF_LEVEL	22
+#define SHIFT_RXFF_LEVEL	19
+#define SHIFT_MCLKDIV		12
+
+/* control reg 3 */
+#define CSICR3_FRMCNT		(0xFFFF << 16)
+#define CSICR3_FRMCNT_RST	(1 << 15)
+#define CSICR3_DMA_REFLASH_RFF	(1 << 14)
+#define CSICR3_DMA_REFLASH_SFF	(1 << 13)
+#define CSICR3_DMA_REQ_EN_RFF	(1 << 12)
+#define CSICR3_DMA_REQ_EN_SFF	(1 << 11)
+#define CSICR3_RXFF_LEVEL(l)	(((l) & 7) << 4)	/* MX25 */
+#define CSICR3_CSI_SUP		(1 << 3)
+#define CSICR3_ZERO_PACK_EN	(1 << 2)
+#define CSICR3_ECC_INT_EN	(1 << 1)
+#define CSICR3_ECC_AUTO_EN	(1 << 0)
+
+#define SHIFT_FRMCNT		16
+
+/* csi status reg */
+#define CSISR_SFF_OR_INT	(1 << 25)
+#define CSISR_RFF_OR_INT	(1 << 24)
+#define CSISR_STATFF_INT	(1 << 21)
+#define CSISR_DMA_TSF_FB2_INT	(1 << 20)	/* MX25 */
+#define CSISR_DMA_TSF_FB1_INT	(1 << 19)	/* MX25 */
+#define CSISR_RXFF_INT		(1 << 18)
+#define CSISR_EOF_INT		(1 << 17)
+#define CSISR_SOF_INT		(1 << 16)
+#define CSISR_F2_INT		(1 << 15)
+#define CSISR_F1_INT		(1 << 14)
+#define CSISR_COF_INT		(1 << 13)
+#define CSISR_ECC_INT		(1 << 1)
+#define CSISR_DRDY		(1 << 0)
+
+#define CSICR1			0x00
+#define CSICR2			0x04
+#define CSISR			(cpu_is_mx27() ? 0x08 : 0x18)
+#define CSISTATFIFO		0x0c
+#define CSIRFIFO		0x10
+#define CSIRXCNT		0x14
+#define CSICR3			(cpu_is_mx27() ? 0x1C : 0x08)
+#define CSIDMASA_STATFIFO	0x20
+#define CSIDMATA_STATFIFO	0x24
+#define CSIDMASA_FB1		0x28
+#define CSIDMASA_FB2		0x2c
+#define CSIFBUF_PARA		0x30
+#define CSIIMAG_PARA		0x34
+
+/* EMMA PrP */
+#define PRP_CNTL			0x00
+#define PRP_INTR_CNTL			0x04
+#define PRP_INTRSTATUS			0x08
+#define PRP_SOURCE_Y_PTR		0x0c
+#define PRP_SOURCE_CB_PTR		0x10
+#define PRP_SOURCE_CR_PTR		0x14
+#define PRP_DEST_RGB1_PTR		0x18
+#define PRP_DEST_RGB2_PTR		0x1c
+#define PRP_DEST_Y_PTR			0x20
+#define PRP_DEST_CB_PTR			0x24
+#define PRP_DEST_CR_PTR			0x28
+#define PRP_SRC_FRAME_SIZE		0x2c
+#define PRP_DEST_CH1_LINE_STRIDE	0x30
+#define PRP_SRC_PIXEL_FORMAT_CNTL	0x34
+#define PRP_CH1_PIXEL_FORMAT_CNTL	0x38
+#define PRP_CH1_OUT_IMAGE_SIZE		0x3c
+#define PRP_CH2_OUT_IMAGE_SIZE		0x40
+#define PRP_SRC_LINE_STRIDE		0x44
+#define PRP_CSC_COEF_012		0x48
+#define PRP_CSC_COEF_345		0x4c
+#define PRP_CSC_COEF_678		0x50
+#define PRP_CH1_RZ_HORI_COEF1		0x54
+#define PRP_CH1_RZ_HORI_COEF2		0x58
+#define PRP_CH1_RZ_HORI_VALID		0x5c
+#define PRP_CH1_RZ_VERT_COEF1		0x60
+#define PRP_CH1_RZ_VERT_COEF2		0x64
+#define PRP_CH1_RZ_VERT_VALID		0x68
+#define PRP_CH2_RZ_HORI_COEF1		0x6c
+#define PRP_CH2_RZ_HORI_COEF2		0x70
+#define PRP_CH2_RZ_HORI_VALID		0x74
+#define PRP_CH2_RZ_VERT_COEF1		0x78
+#define PRP_CH2_RZ_VERT_COEF2		0x7c
+#define PRP_CH2_RZ_VERT_VALID		0x80
+
+#define PRP_CNTL_CH1EN		(1 << 0)
+#define PRP_CNTL_CH2EN		(1 << 1)
+#define PRP_CNTL_CSIEN		(1 << 2)
+#define PRP_CNTL_DATA_IN_YUV420	(0 << 3)
+#define PRP_CNTL_DATA_IN_YUV422	(1 << 3)
+#define PRP_CNTL_DATA_IN_RGB16	(2 << 3)
+#define PRP_CNTL_DATA_IN_RGB32	(3 << 3)
+#define PRP_CNTL_CH1_OUT_RGB8	(0 << 5)
+#define PRP_CNTL_CH1_OUT_RGB16	(1 << 5)
+#define PRP_CNTL_CH1_OUT_RGB32	(2 << 5)
+#define PRP_CNTL_CH1_OUT_YUV422	(3 << 5)
+#define PRP_CNTL_CH2_OUT_YUV420	(0 << 7)
+#define PRP_CNTL_CH2_OUT_YUV422 (1 << 7)
+#define PRP_CNTL_CH2_OUT_YUV444	(2 << 7)
+#define PRP_CNTL_CH1_LEN	(1 << 9)
+#define PRP_CNTL_CH2_LEN	(1 << 10)
+#define PRP_CNTL_SKIP_FRAME	(1 << 11)
+#define PRP_CNTL_SWRST		(1 << 12)
+#define PRP_CNTL_CLKEN		(1 << 13)
+#define PRP_CNTL_WEN		(1 << 14)
+#define PRP_CNTL_CH1BYP		(1 << 15)
+#define PRP_CNTL_IN_TSKIP(x)	((x) << 16)
+#define PRP_CNTL_CH1_TSKIP(x)	((x) << 19)
+#define PRP_CNTL_CH2_TSKIP(x)	((x) << 22)
+#define PRP_CNTL_INPUT_FIFO_LEVEL(x)	((x) << 25)
+#define PRP_CNTL_RZ_FIFO_LEVEL(x)	((x) << 27)
+#define PRP_CNTL_CH2B1EN	(1 << 29)
+#define PRP_CNTL_CH2B2EN	(1 << 30)
+#define PRP_CNTL_CH2FEN		(1 << 31)
+
+/* IRQ Enable and status register */
+#define PRP_INTR_RDERR		(1 << 0)
+#define PRP_INTR_CH1WERR	(1 << 1)
+#define PRP_INTR_CH2WERR	(1 << 2)
+#define PRP_INTR_CH1FC		(1 << 3)
+#define PRP_INTR_CH2FC		(1 << 5)
+#define PRP_INTR_LBOVF		(1 << 7)
+#define PRP_INTR_CH2OVF		(1 << 8)
+
+/* Resizing registers */
+#define PRP_RZ_VALID_TBL_LEN(x)	((x) << 24)
+#define PRP_RZ_VALID_BILINEAR	(1 << 31)
+
+#define MAX_VIDEO_MEM	16
+
+#define RESIZE_NUM_MIN	1
+#define RESIZE_NUM_MAX	20
+#define BC_COEF		3
+#define SZ_COEF		(1 << BC_COEF)
+
+#define RESIZE_DIR_H	0
+#define RESIZE_DIR_V	1
+
+#define RESIZE_ALGO_BILINEAR 0
+#define RESIZE_ALGO_AVERAGING 1
+
+struct mx2_prp_cfg {
+	int channel;
+	u32 in_fmt;
+	u32 out_fmt;
+	u32 src_pixel;
+	u32 ch1_pixel;
+	u32 irq_flags;
+	u32 csicr1;
+};
+
+/* prp resizing parameters */
+struct emma_prp_resize {
+	int		algo; /* type of algorithm used */
+	int		len; /* number of coefficients */
+	unsigned char	s[RESIZE_NUM_MAX]; /* table of coefficients */
+};
+
+/* prp configuration for a client-host fmt pair */
+struct mx2_fmt_cfg {
+	enum v4l2_mbus_pixelcode	in_fmt;
+	u32				out_fmt;
+	struct mx2_prp_cfg		cfg;
+};
+
+enum mx2_buffer_state {
+	MX2_STATE_QUEUED,
+	MX2_STATE_ACTIVE,
+	MX2_STATE_DONE,
+};
+
+struct mx2_buf_internal {
+	struct list_head	queue;
+	int			bufnum;
+	bool			discard;
+};
+
+/* buffer for one video frame */
+struct mx2_buffer {
+	/* common v4l buffer stuff -- must be first */
+	struct vb2_buffer		vb;
+	enum mx2_buffer_state		state;
+	struct mx2_buf_internal		internal;
+};
+
+struct mx2_camera_dev {
+	struct device		*dev;
+	struct soc_camera_host	soc_host;
+	struct soc_camera_device *icd;
+	struct clk		*clk_csi, *clk_emma;
+
+	unsigned int		irq_csi, irq_emma;
+	void __iomem		*base_csi, *base_emma;
+	unsigned long		base_dma;
+
+	struct mx2_camera_platform_data *pdata;
+	struct resource		*res_csi, *res_emma;
+	unsigned long		platform_flags;
+
+	struct list_head	capture;
+	struct list_head	active_bufs;
+	struct list_head	discard;
+
+	spinlock_t		lock;
+
+	int			dma;
+	struct mx2_buffer	*active;
+	struct mx2_buffer	*fb1_active;
+	struct mx2_buffer	*fb2_active;
+
+	u32			csicr1;
+
+	struct mx2_buf_internal buf_discard[2];
+	void			*discard_buffer;
+	dma_addr_t		discard_buffer_dma;
+	size_t			discard_size;
+	struct mx2_fmt_cfg	*emma_prp;
+	struct emma_prp_resize	resizing[2];
+	unsigned int		s_width, s_height;
+	u32			frame_count;
+	struct vb2_alloc_ctx	*alloc_ctx;
+};
+
+static struct mx2_buffer *mx2_ibuf_to_buf(struct mx2_buf_internal *int_buf)
+{
+	return container_of(int_buf, struct mx2_buffer, internal);
+}
+
+static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
+	/*
+	 * This is a generic configuration which is valid for most
+	 * prp input-output format combinations.
+	 * We set the incomming and outgoing pixelformat to a
+	 * 16 Bit wide format and adjust the bytesperline
+	 * accordingly. With this configuration the inputdata
+	 * will not be changed by the emma and could be any type
+	 * of 16 Bit Pixelformat.
+	 */
+	{
+		.in_fmt		= 0,
+		.out_fmt	= 0,
+		.cfg		= {
+			.channel	= 1,
+			.in_fmt		= PRP_CNTL_DATA_IN_RGB16,
+			.out_fmt	= PRP_CNTL_CH1_OUT_RGB16,
+			.src_pixel	= 0x2ca00565, /* RGB565 */
+			.ch1_pixel	= 0x2ca00565, /* RGB565 */
+			.irq_flags	= PRP_INTR_RDERR | PRP_INTR_CH1WERR |
+						PRP_INTR_CH1FC | PRP_INTR_LBOVF,
+			.csicr1		= 0,
+		}
+	},
+	{
+		.in_fmt		= V4L2_MBUS_FMT_YUYV8_2X8,
+		.out_fmt	= V4L2_PIX_FMT_YUV420,
+		.cfg		= {
+			.channel	= 2,
+			.in_fmt		= PRP_CNTL_DATA_IN_YUV422,
+			.out_fmt	= PRP_CNTL_CH2_OUT_YUV420,
+			.src_pixel	= 0x22000888, /* YUV422 (YUYV) */
+			.irq_flags	= PRP_INTR_RDERR | PRP_INTR_CH2WERR |
+					PRP_INTR_CH2FC | PRP_INTR_LBOVF |
+					PRP_INTR_CH2OVF,
+			.csicr1		= CSICR1_PACK_DIR,
+		}
+	},
+	{
+		.in_fmt		= V4L2_MBUS_FMT_UYVY8_2X8,
+		.out_fmt	= V4L2_PIX_FMT_YUV420,
+		.cfg		= {
+			.channel	= 2,
+			.in_fmt		= PRP_CNTL_DATA_IN_YUV422,
+			.out_fmt	= PRP_CNTL_CH2_OUT_YUV420,
+			.src_pixel	= 0x22000888, /* YUV422 (YUYV) */
+			.irq_flags	= PRP_INTR_RDERR | PRP_INTR_CH2WERR |
+					PRP_INTR_CH2FC | PRP_INTR_LBOVF |
+					PRP_INTR_CH2OVF,
+			.csicr1		= CSICR1_SWAP16_EN,
+		}
+	},
+};
+
+static struct mx2_fmt_cfg *mx27_emma_prp_get_format(
+					enum v4l2_mbus_pixelcode in_fmt,
+					u32 out_fmt)
+{
+	int i;
+
+	for (i = 1; i < ARRAY_SIZE(mx27_emma_prp_table); i++)
+		if ((mx27_emma_prp_table[i].in_fmt == in_fmt) &&
+				(mx27_emma_prp_table[i].out_fmt == out_fmt)) {
+			return &mx27_emma_prp_table[i];
+		}
+	/* If no match return the most generic configuration */
+	return &mx27_emma_prp_table[0];
+};
+
+static void mx27_update_emma_buf(struct mx2_camera_dev *pcdev,
+				 unsigned long phys, int bufnum)
+{
+	struct mx2_fmt_cfg *prp = pcdev->emma_prp;
+
+	if (prp->cfg.channel == 1) {
+		writel(phys, pcdev->base_emma +
+				PRP_DEST_RGB1_PTR + 4 * bufnum);
+	} else {
+		writel(phys, pcdev->base_emma +
+			PRP_DEST_Y_PTR - 0x14 * bufnum);
+		if (prp->out_fmt == V4L2_PIX_FMT_YUV420) {
+			u32 imgsize = pcdev->icd->user_height *
+					pcdev->icd->user_width;
+
+			writel(phys + imgsize, pcdev->base_emma +
+				PRP_DEST_CB_PTR - 0x14 * bufnum);
+			writel(phys + ((5 * imgsize) / 4), pcdev->base_emma +
+				PRP_DEST_CR_PTR - 0x14 * bufnum);
+		}
+	}
+}
+
+static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev)
+{
+	unsigned long flags;
+
+	clk_disable(pcdev->clk_csi);
+	writel(0, pcdev->base_csi + CSICR1);
+	if (cpu_is_mx27()) {
+		writel(0, pcdev->base_emma + PRP_CNTL);
+	} else if (cpu_is_mx25()) {
+		spin_lock_irqsave(&pcdev->lock, flags);
+		pcdev->fb1_active = NULL;
+		pcdev->fb2_active = NULL;
+		writel(0, pcdev->base_csi + CSIDMASA_FB1);
+		writel(0, pcdev->base_csi + CSIDMASA_FB2);
+		spin_unlock_irqrestore(&pcdev->lock, flags);
+	}
+}
+
+/*
+ * The following two functions absolutely depend on the fact, that
+ * there can be only one camera on mx2 camera sensor interface
+ */
+static int mx2_camera_add_device(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx2_camera_dev *pcdev = ici->priv;
+	int ret;
+	u32 csicr1;
+
+	if (pcdev->icd)
+		return -EBUSY;
+
+	ret = clk_enable(pcdev->clk_csi);
+	if (ret < 0)
+		return ret;
+
+	csicr1 = CSICR1_MCLKEN;
+
+	if (cpu_is_mx27()) {
+		csicr1 |= CSICR1_PRP_IF_EN | CSICR1_FCC |
+			CSICR1_RXFF_LEVEL(0);
+	} else if (cpu_is_mx27())
+		csicr1 |= CSICR1_SOF_INTEN | CSICR1_RXFF_LEVEL(2);
+
+	pcdev->csicr1 = csicr1;
+	writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
+
+	pcdev->icd = icd;
+	pcdev->frame_count = 0;
+
+	dev_info(icd->parent, "Camera driver attached to camera %d\n",
+		 icd->devnum);
+
+	return 0;
+}
+
+static void mx2_camera_remove_device(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx2_camera_dev *pcdev = ici->priv;
+
+	BUG_ON(icd != pcdev->icd);
+
+	dev_info(icd->parent, "Camera driver detached from camera %d\n",
+		 icd->devnum);
+
+	mx2_camera_deactivate(pcdev);
+
+	pcdev->icd = NULL;
+}
+
+static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb,
+		int state)
+{
+	struct vb2_buffer *vb;
+	struct mx2_buffer *buf;
+	struct mx2_buffer **fb_active = fb == 1 ? &pcdev->fb1_active :
+		&pcdev->fb2_active;
+	u32 fb_reg = fb == 1 ? CSIDMASA_FB1 : CSIDMASA_FB2;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pcdev->lock, flags);
+
+	if (*fb_active == NULL)
+		goto out;
+
+	vb = &(*fb_active)->vb;
+	dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+		vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
+
+	do_gettimeofday(&vb->v4l2_buf.timestamp);
+	vb->v4l2_buf.sequence++;
+	vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+
+	if (list_empty(&pcdev->capture)) {
+		buf = NULL;
+		writel(0, pcdev->base_csi + fb_reg);
+	} else {
+		buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
+				internal.queue);
+		vb = &buf->vb;
+		list_del(&buf->internal.queue);
+		buf->state = MX2_STATE_ACTIVE;
+		writel(vb2_dma_contig_plane_dma_addr(vb, 0),
+		       pcdev->base_csi + fb_reg);
+	}
+
+	*fb_active = buf;
+
+out:
+	spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static irqreturn_t mx25_camera_irq(int irq_csi, void *data)
+{
+	struct mx2_camera_dev *pcdev = data;
+	u32 status = readl(pcdev->base_csi + CSISR);
+
+	if (status & CSISR_DMA_TSF_FB1_INT)
+		mx25_camera_frame_done(pcdev, 1, MX2_STATE_DONE);
+	else if (status & CSISR_DMA_TSF_FB2_INT)
+		mx25_camera_frame_done(pcdev, 2, MX2_STATE_DONE);
+
+	/* FIXME: handle CSISR_RFF_OR_INT */
+
+	writel(status, pcdev->base_csi + CSISR);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ *  Videobuf operations
+ */
+static int mx2_videobuf_setup(struct vb2_queue *vq,
+			const struct v4l2_format *fmt,
+			unsigned int *count, unsigned int *num_planes,
+			unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx2_camera_dev *pcdev = ici->priv;
+
+	dev_dbg(icd->parent, "count=%d, size=%d\n", *count, sizes[0]);
+
+	/* TODO: support for VIDIOC_CREATE_BUFS not ready */
+	if (fmt != NULL)
+		return -ENOTTY;
+
+	alloc_ctxs[0] = pcdev->alloc_ctx;
+
+	sizes[0] = icd->sizeimage;
+
+	if (0 == *count)
+		*count = 32;
+	if (!*num_planes &&
+	    sizes[0] * *count > MAX_VIDEO_MEM * 1024 * 1024)
+		*count = (MAX_VIDEO_MEM * 1024 * 1024) / sizes[0];
+
+	*num_planes = 1;
+
+	return 0;
+}
+
+static int mx2_videobuf_prepare(struct vb2_buffer *vb)
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+	int ret = 0;
+
+	dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+		vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
+
+#ifdef DEBUG
+	/*
+	 * This can be useful if you want to see if we actually fill
+	 * the buffer with something
+	 */
+	memset((void *)vb2_plane_vaddr(vb, 0),
+	       0xaa, vb2_get_plane_payload(vb, 0));
+#endif
+
+	vb2_set_plane_payload(vb, 0, icd->sizeimage);
+	if (vb2_plane_vaddr(vb, 0) &&
+	    vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static void mx2_videobuf_queue(struct vb2_buffer *vb)
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+	struct soc_camera_host *ici =
+		to_soc_camera_host(icd->parent);
+	struct mx2_camera_dev *pcdev = ici->priv;
+	struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
+	unsigned long flags;
+
+	dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+		vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
+
+	spin_lock_irqsave(&pcdev->lock, flags);
+
+	buf->state = MX2_STATE_QUEUED;
+	list_add_tail(&buf->internal.queue, &pcdev->capture);
+
+	if (cpu_is_mx25()) {
+		u32 csicr3, dma_inten = 0;
+
+		if (pcdev->fb1_active == NULL) {
+			writel(vb2_dma_contig_plane_dma_addr(vb, 0),
+					pcdev->base_csi + CSIDMASA_FB1);
+			pcdev->fb1_active = buf;
+			dma_inten = CSICR1_FB1_DMA_INTEN;
+		} else if (pcdev->fb2_active == NULL) {
+			writel(vb2_dma_contig_plane_dma_addr(vb, 0),
+					pcdev->base_csi + CSIDMASA_FB2);
+			pcdev->fb2_active = buf;
+			dma_inten = CSICR1_FB2_DMA_INTEN;
+		}
+
+		if (dma_inten) {
+			list_del(&buf->internal.queue);
+			buf->state = MX2_STATE_ACTIVE;
+
+			csicr3 = readl(pcdev->base_csi + CSICR3);
+
+			/* Reflash DMA */
+			writel(csicr3 | CSICR3_DMA_REFLASH_RFF,
+					pcdev->base_csi + CSICR3);
+
+			/* clear & enable interrupts */
+			writel(dma_inten, pcdev->base_csi + CSISR);
+			pcdev->csicr1 |= dma_inten;
+			writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
+
+			/* enable DMA */
+			csicr3 |= CSICR3_DMA_REQ_EN_RFF | CSICR3_RXFF_LEVEL(1);
+			writel(csicr3, pcdev->base_csi + CSICR3);
+		}
+	}
+
+	spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static void mx2_videobuf_release(struct vb2_buffer *vb)
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx2_camera_dev *pcdev = ici->priv;
+	struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
+	unsigned long flags;
+
+#ifdef DEBUG
+	dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+		vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
+
+	switch (buf->state) {
+	case MX2_STATE_ACTIVE:
+		dev_info(icd->parent, "%s (active)\n", __func__);
+		break;
+	case MX2_STATE_QUEUED:
+		dev_info(icd->parent, "%s (queued)\n", __func__);
+		break;
+	default:
+		dev_info(icd->parent, "%s (unknown) %d\n", __func__,
+				buf->state);
+		break;
+	}
+#endif
+
+	/*
+	 * Terminate only queued but inactive buffers. Active buffers are
+	 * released when they become inactive after videobuf_waiton().
+	 *
+	 * FIXME: implement forced termination of active buffers for mx27 and
+	 * mx27 eMMA, so that the user won't get stuck in an uninterruptible
+	 * state. This requires a specific handling for each of the these DMA
+	 * types.
+	 */
+
+	spin_lock_irqsave(&pcdev->lock, flags);
+	if (cpu_is_mx25() && buf->state == MX2_STATE_ACTIVE) {
+		if (pcdev->fb1_active == buf) {
+			pcdev->csicr1 &= ~CSICR1_FB1_DMA_INTEN;
+			writel(0, pcdev->base_csi + CSIDMASA_FB1);
+			pcdev->fb1_active = NULL;
+		} else if (pcdev->fb2_active == buf) {
+			pcdev->csicr1 &= ~CSICR1_FB2_DMA_INTEN;
+			writel(0, pcdev->base_csi + CSIDMASA_FB2);
+			pcdev->fb2_active = NULL;
+		}
+		writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
+	}
+	spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static void mx27_camera_emma_buf_init(struct soc_camera_device *icd,
+		int bytesperline)
+{
+	struct soc_camera_host *ici =
+		to_soc_camera_host(icd->parent);
+	struct mx2_camera_dev *pcdev = ici->priv;
+	struct mx2_fmt_cfg *prp = pcdev->emma_prp;
+
+	writel((pcdev->s_width << 16) | pcdev->s_height,
+	       pcdev->base_emma + PRP_SRC_FRAME_SIZE);
+	writel(prp->cfg.src_pixel,
+	       pcdev->base_emma + PRP_SRC_PIXEL_FORMAT_CNTL);
+	if (prp->cfg.channel == 1) {
+		writel((icd->user_width << 16) | icd->user_height,
+			pcdev->base_emma + PRP_CH1_OUT_IMAGE_SIZE);
+		writel(bytesperline,
+			pcdev->base_emma + PRP_DEST_CH1_LINE_STRIDE);
+		writel(prp->cfg.ch1_pixel,
+			pcdev->base_emma + PRP_CH1_PIXEL_FORMAT_CNTL);
+	} else { /* channel 2 */
+		writel((icd->user_width << 16) | icd->user_height,
+			pcdev->base_emma + PRP_CH2_OUT_IMAGE_SIZE);
+	}
+
+	/* Enable interrupts */
+	writel(prp->cfg.irq_flags, pcdev->base_emma + PRP_INTR_CNTL);
+}
+
+static void mx2_prp_resize_commit(struct mx2_camera_dev *pcdev)
+{
+	int dir;
+
+	for (dir = RESIZE_DIR_H; dir <= RESIZE_DIR_V; dir++) {
+		unsigned char *s = pcdev->resizing[dir].s;
+		int len = pcdev->resizing[dir].len;
+		unsigned int coeff[2] = {0, 0};
+		unsigned int valid  = 0;
+		int i;
+
+		if (len == 0)
+			continue;
+
+		for (i = RESIZE_NUM_MAX - 1; i >= 0; i--) {
+			int j;
+
+			j = i > 9 ? 1 : 0;
+			coeff[j] = (coeff[j] << BC_COEF) |
+					(s[i] & (SZ_COEF - 1));
+
+			if (i == 5 || i == 15)
+				coeff[j] <<= 1;
+
+			valid = (valid << 1) | (s[i] >> BC_COEF);
+		}
+
+		valid |= PRP_RZ_VALID_TBL_LEN(len);
+
+		if (pcdev->resizing[dir].algo == RESIZE_ALGO_BILINEAR)
+			valid |= PRP_RZ_VALID_BILINEAR;
+
+		if (pcdev->emma_prp->cfg.channel == 1) {
+			if (dir == RESIZE_DIR_H) {
+				writel(coeff[0], pcdev->base_emma +
+							PRP_CH1_RZ_HORI_COEF1);
+				writel(coeff[1], pcdev->base_emma +
+							PRP_CH1_RZ_HORI_COEF2);
+				writel(valid, pcdev->base_emma +
+							PRP_CH1_RZ_HORI_VALID);
+			} else {
+				writel(coeff[0], pcdev->base_emma +
+							PRP_CH1_RZ_VERT_COEF1);
+				writel(coeff[1], pcdev->base_emma +
+							PRP_CH1_RZ_VERT_COEF2);
+				writel(valid, pcdev->base_emma +
+							PRP_CH1_RZ_VERT_VALID);
+			}
+		} else {
+			if (dir == RESIZE_DIR_H) {
+				writel(coeff[0], pcdev->base_emma +
+							PRP_CH2_RZ_HORI_COEF1);
+				writel(coeff[1], pcdev->base_emma +
+							PRP_CH2_RZ_HORI_COEF2);
+				writel(valid, pcdev->base_emma +
+							PRP_CH2_RZ_HORI_VALID);
+			} else {
+				writel(coeff[0], pcdev->base_emma +
+							PRP_CH2_RZ_VERT_COEF1);
+				writel(coeff[1], pcdev->base_emma +
+							PRP_CH2_RZ_VERT_COEF2);
+				writel(valid, pcdev->base_emma +
+							PRP_CH2_RZ_VERT_VALID);
+			}
+		}
+	}
+}
+
+static int mx2_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(q);
+	struct soc_camera_host *ici =
+		to_soc_camera_host(icd->parent);
+	struct mx2_camera_dev *pcdev = ici->priv;
+	struct mx2_fmt_cfg *prp = pcdev->emma_prp;
+	struct vb2_buffer *vb;
+	struct mx2_buffer *buf;
+	unsigned long phys;
+	int bytesperline;
+
+	if (cpu_is_mx27()) {
+		unsigned long flags;
+		if (count < 2)
+			return -EINVAL;
+
+		spin_lock_irqsave(&pcdev->lock, flags);
+
+		buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
+				       internal.queue);
+		buf->internal.bufnum = 0;
+		vb = &buf->vb;
+		buf->state = MX2_STATE_ACTIVE;
+
+		phys = vb2_dma_contig_plane_dma_addr(vb, 0);
+		mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
+		list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
+
+		buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
+				       internal.queue);
+		buf->internal.bufnum = 1;
+		vb = &buf->vb;
+		buf->state = MX2_STATE_ACTIVE;
+
+		phys = vb2_dma_contig_plane_dma_addr(vb, 0);
+		mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
+		list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
+
+		bytesperline = soc_mbus_bytes_per_line(icd->user_width,
+				icd->current_fmt->host_fmt);
+		if (bytesperline < 0)
+			return bytesperline;
+
+		/*
+		 * I didn't manage to properly enable/disable the prp
+		 * on a per frame basis during running transfers,
+		 * thus we allocate a buffer here and use it to
+		 * discard frames when no buffer is available.
+		 * Feel free to work on this ;)
+		 */
+		pcdev->discard_size = icd->user_height * bytesperline;
+		pcdev->discard_buffer = dma_alloc_coherent(ici->v4l2_dev.dev,
+				pcdev->discard_size, &pcdev->discard_buffer_dma,
+				GFP_KERNEL);
+		if (!pcdev->discard_buffer)
+			return -ENOMEM;
+
+		pcdev->buf_discard[0].discard = true;
+		list_add_tail(&pcdev->buf_discard[0].queue,
+				      &pcdev->discard);
+
+		pcdev->buf_discard[1].discard = true;
+		list_add_tail(&pcdev->buf_discard[1].queue,
+				      &pcdev->discard);
+
+		mx2_prp_resize_commit(pcdev);
+
+		mx27_camera_emma_buf_init(icd, bytesperline);
+
+		if (prp->cfg.channel == 1) {
+			writel(PRP_CNTL_CH1EN |
+				PRP_CNTL_CSIEN |
+				prp->cfg.in_fmt |
+				prp->cfg.out_fmt |
+				PRP_CNTL_CH1_LEN |
+				PRP_CNTL_CH1BYP |
+				PRP_CNTL_CH1_TSKIP(0) |
+				PRP_CNTL_IN_TSKIP(0),
+				pcdev->base_emma + PRP_CNTL);
+		} else {
+			writel(PRP_CNTL_CH2EN |
+				PRP_CNTL_CSIEN |
+				prp->cfg.in_fmt |
+				prp->cfg.out_fmt |
+				PRP_CNTL_CH2_LEN |
+				PRP_CNTL_CH2_TSKIP(0) |
+				PRP_CNTL_IN_TSKIP(0),
+				pcdev->base_emma + PRP_CNTL);
+		}
+		spin_unlock_irqrestore(&pcdev->lock, flags);
+	}
+
+	return 0;
+}
+
+static int mx2_stop_streaming(struct vb2_queue *q)
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(q);
+	struct soc_camera_host *ici =
+		to_soc_camera_host(icd->parent);
+	struct mx2_camera_dev *pcdev = ici->priv;
+	struct mx2_fmt_cfg *prp = pcdev->emma_prp;
+	unsigned long flags;
+	void *b;
+	u32 cntl;
+
+	if (cpu_is_mx27()) {
+		spin_lock_irqsave(&pcdev->lock, flags);
+
+		cntl = readl(pcdev->base_emma + PRP_CNTL);
+		if (prp->cfg.channel == 1) {
+			writel(cntl & ~PRP_CNTL_CH1EN,
+			       pcdev->base_emma + PRP_CNTL);
+		} else {
+			writel(cntl & ~PRP_CNTL_CH2EN,
+			       pcdev->base_emma + PRP_CNTL);
+		}
+		INIT_LIST_HEAD(&pcdev->capture);
+		INIT_LIST_HEAD(&pcdev->active_bufs);
+		INIT_LIST_HEAD(&pcdev->discard);
+
+		b = pcdev->discard_buffer;
+		pcdev->discard_buffer = NULL;
+
+		spin_unlock_irqrestore(&pcdev->lock, flags);
+
+		dma_free_coherent(ici->v4l2_dev.dev,
+			pcdev->discard_size, b, pcdev->discard_buffer_dma);
+	}
+
+	return 0;
+}
+
+static struct vb2_ops mx2_videobuf_ops = {
+	.queue_setup	 = mx2_videobuf_setup,
+	.buf_prepare	 = mx2_videobuf_prepare,
+	.buf_queue	 = mx2_videobuf_queue,
+	.buf_cleanup	 = mx2_videobuf_release,
+	.start_streaming = mx2_start_streaming,
+	.stop_streaming	 = mx2_stop_streaming,
+};
+
+static int mx2_camera_init_videobuf(struct vb2_queue *q,
+			      struct soc_camera_device *icd)
+{
+	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	q->io_modes = VB2_MMAP | VB2_USERPTR;
+	q->drv_priv = icd;
+	q->ops = &mx2_videobuf_ops;
+	q->mem_ops = &vb2_dma_contig_memops;
+	q->buf_struct_size = sizeof(struct mx2_buffer);
+
+	return vb2_queue_init(q);
+}
+
+#define MX2_BUS_FLAGS	(V4L2_MBUS_MASTER | \
+			V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+			V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+			V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+			V4L2_MBUS_HSYNC_ACTIVE_LOW | \
+			V4L2_MBUS_PCLK_SAMPLE_RISING | \
+			V4L2_MBUS_PCLK_SAMPLE_FALLING | \
+			V4L2_MBUS_DATA_ACTIVE_HIGH | \
+			V4L2_MBUS_DATA_ACTIVE_LOW)
+
+static int mx27_camera_emma_prp_reset(struct mx2_camera_dev *pcdev)
+{
+	u32 cntl;
+	int count = 0;
+
+	cntl = readl(pcdev->base_emma + PRP_CNTL);
+	writel(PRP_CNTL_SWRST, pcdev->base_emma + PRP_CNTL);
+	while (count++ < 100) {
+		if (!(readl(pcdev->base_emma + PRP_CNTL) & PRP_CNTL_SWRST))
+			return 0;
+		barrier();
+		udelay(1);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx2_camera_dev *pcdev = ici->priv;
+	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+	unsigned long common_flags;
+	int ret;
+	int bytesperline;
+	u32 csicr1 = pcdev->csicr1;
+
+	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+	if (!ret) {
+		common_flags = soc_mbus_config_compatible(&cfg, MX2_BUS_FLAGS);
+		if (!common_flags) {
+			dev_warn(icd->parent,
+				 "Flags incompatible: camera 0x%x, host 0x%x\n",
+				 cfg.flags, MX2_BUS_FLAGS);
+			return -EINVAL;
+		}
+	} else if (ret != -ENOIOCTLCMD) {
+		return ret;
+	} else {
+		common_flags = MX2_BUS_FLAGS;
+	}
+
+	if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+	    (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
+		if (pcdev->platform_flags & MX2_CAMERA_HSYNC_HIGH)
+			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
+		else
+			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+	}
+
+	if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+	    (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
+		if (pcdev->platform_flags & MX2_CAMERA_PCLK_SAMPLE_RISING)
+			common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
+		else
+			common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
+	}
+
+	cfg.flags = common_flags;
+	ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+	if (ret < 0 && ret != -ENOIOCTLCMD) {
+		dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
+			common_flags, ret);
+		return ret;
+	}
+
+	csicr1 = (csicr1 & ~CSICR1_FMT_MASK) | pcdev->emma_prp->cfg.csicr1;
+
+	if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+		csicr1 |= CSICR1_REDGE;
+	if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+		csicr1 |= CSICR1_SOF_POL;
+	if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+		csicr1 |= CSICR1_HSYNC_POL;
+	if (pcdev->platform_flags & MX2_CAMERA_EXT_VSYNC)
+		csicr1 |= CSICR1_EXT_VSYNC;
+	if (pcdev->platform_flags & MX2_CAMERA_CCIR)
+		csicr1 |= CSICR1_CCIR_EN;
+	if (pcdev->platform_flags & MX2_CAMERA_CCIR_INTERLACE)
+		csicr1 |= CSICR1_CCIR_MODE;
+	if (pcdev->platform_flags & MX2_CAMERA_GATED_CLOCK)
+		csicr1 |= CSICR1_GCLK_MODE;
+	if (pcdev->platform_flags & MX2_CAMERA_INV_DATA)
+		csicr1 |= CSICR1_INV_DATA;
+
+	pcdev->csicr1 = csicr1;
+
+	bytesperline = soc_mbus_bytes_per_line(icd->user_width,
+			icd->current_fmt->host_fmt);
+	if (bytesperline < 0)
+		return bytesperline;
+
+	if (cpu_is_mx27()) {
+		ret = mx27_camera_emma_prp_reset(pcdev);
+		if (ret)
+			return ret;
+	} else if (cpu_is_mx25()) {
+		writel((bytesperline * icd->user_height) >> 2,
+				pcdev->base_csi + CSIRXCNT);
+		writel((bytesperline << 16) | icd->user_height,
+				pcdev->base_csi + CSIIMAG_PARA);
+	}
+
+	writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
+
+	return 0;
+}
+
+static int mx2_camera_set_crop(struct soc_camera_device *icd,
+				struct v4l2_crop *a)
+{
+	struct v4l2_rect *rect = &a->c;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct v4l2_mbus_framefmt mf;
+	int ret;
+
+	soc_camera_limit_side(&rect->left, &rect->width, 0, 2, 4096);
+	soc_camera_limit_side(&rect->top, &rect->height, 0, 2, 4096);
+
+	ret = v4l2_subdev_call(sd, video, s_crop, a);
+	if (ret < 0)
+		return ret;
+
+	/* The capture device might have changed its output  */
+	ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+	if (ret < 0)
+		return ret;
+
+	dev_dbg(icd->parent, "Sensor cropped %dx%d\n",
+		mf.width, mf.height);
+
+	icd->user_width		= mf.width;
+	icd->user_height	= mf.height;
+
+	return ret;
+}
+
+static int mx2_camera_get_formats(struct soc_camera_device *icd,
+				  unsigned int idx,
+				  struct soc_camera_format_xlate *xlate)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	const struct soc_mbus_pixelfmt *fmt;
+	struct device *dev = icd->parent;
+	enum v4l2_mbus_pixelcode code;
+	int ret, formats = 0;
+
+	ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+	if (ret < 0)
+		/* no more formats */
+		return 0;
+
+	fmt = soc_mbus_get_fmtdesc(code);
+	if (!fmt) {
+		dev_err(dev, "Invalid format code #%u: %d\n", idx, code);
+		return 0;
+	}
+
+	if (code == V4L2_MBUS_FMT_YUYV8_2X8 ||
+	    code == V4L2_MBUS_FMT_UYVY8_2X8) {
+		formats++;
+		if (xlate) {
+			/*
+			 * CH2 can output YUV420 which is a standard format in
+			 * soc_mediabus.c
+			 */
+			xlate->host_fmt =
+				soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_1_5X8);
+			xlate->code	= code;
+			dev_dbg(dev, "Providing host format %s for sensor code %d\n",
+			       xlate->host_fmt->name, code);
+			xlate++;
+		}
+	}
+
+	/* Generic pass-trough */
+	formats++;
+	if (xlate) {
+		xlate->host_fmt = fmt;
+		xlate->code	= code;
+		xlate++;
+	}
+	return formats;
+}
+
+static int mx2_emmaprp_resize(struct mx2_camera_dev *pcdev,
+			      struct v4l2_mbus_framefmt *mf_in,
+			      struct v4l2_pix_format *pix_out, bool apply)
+{
+	int num, den;
+	unsigned long m;
+	int i, dir;
+
+	for (dir = RESIZE_DIR_H; dir <= RESIZE_DIR_V; dir++) {
+		struct emma_prp_resize tmprsz;
+		unsigned char *s = tmprsz.s;
+		int len = 0;
+		int in, out;
+
+		if (dir == RESIZE_DIR_H) {
+			in = mf_in->width;
+			out = pix_out->width;
+		} else {
+			in = mf_in->height;
+			out = pix_out->height;
+		}
+
+		if (in < out)
+			return -EINVAL;
+		else if (in == out)
+			continue;
+
+		/* Calculate ratio */
+		m = gcd(in, out);
+		num = in / m;
+		den = out / m;
+		if (num > RESIZE_NUM_MAX)
+			return -EINVAL;
+
+		if ((num >= 2 * den) && (den == 1) &&
+		    (num < 9) && (!(num & 0x01))) {
+			int sum = 0;
+			int j;
+
+			/* Average scaling for >= 2:1 ratios */
+			/* Support can be added for num >=9 and odd values */
+
+			tmprsz.algo = RESIZE_ALGO_AVERAGING;
+			len = num;
+
+			for (i = 0; i < (len / 2); i++)
+				s[i] = 8;
+
+			do {
+				for (i = 0; i < (len / 2); i++) {
+					s[i] = s[i] >> 1;
+					sum = 0;
+					for (j = 0; j < (len / 2); j++)
+						sum += s[j];
+					if (sum == 4)
+						break;
+				}
+			} while (sum != 4);
+
+			for (i = (len / 2); i < len; i++)
+				s[i] = s[len - i - 1];
+
+			s[len - 1] |= SZ_COEF;
+		} else {
+			/* bilinear scaling for < 2:1 ratios */
+			int v; /* overflow counter */
+			int coeff, nxt; /* table output */
+			int in_pos_inc = 2 * den;
+			int out_pos = num;
+			int out_pos_inc = 2 * num;
+			int init_carry = num - den;
+			int carry = init_carry;
+
+			tmprsz.algo = RESIZE_ALGO_BILINEAR;
+			v = den + in_pos_inc;
+			do {
+				coeff = v - out_pos;
+				out_pos += out_pos_inc;
+				carry += out_pos_inc;
+				for (nxt = 0; v < out_pos; nxt++) {
+					v += in_pos_inc;
+					carry -= in_pos_inc;
+				}
+
+				if (len > RESIZE_NUM_MAX)
+					return -EINVAL;
+
+				coeff = ((coeff << BC_COEF) +
+					(in_pos_inc >> 1)) / in_pos_inc;
+
+				if (coeff >= (SZ_COEF - 1))
+					coeff--;
+
+				coeff |= SZ_COEF;
+				s[len] = (unsigned char)coeff;
+				len++;
+
+				for (i = 1; i < nxt; i++) {
+					if (len >= RESIZE_NUM_MAX)
+						return -EINVAL;
+					s[len] = 0;
+					len++;
+				}
+			} while (carry != init_carry);
+		}
+		tmprsz.len = len;
+		if (dir == RESIZE_DIR_H)
+			mf_in->width = pix_out->width;
+		else
+			mf_in->height = pix_out->height;
+
+		if (apply)
+			memcpy(&pcdev->resizing[dir], &tmprsz, sizeof(tmprsz));
+	}
+	return 0;
+}
+
+static int mx2_camera_set_fmt(struct soc_camera_device *icd,
+			       struct v4l2_format *f)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx2_camera_dev *pcdev = ici->priv;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	const struct soc_camera_format_xlate *xlate;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_mbus_framefmt mf;
+	int ret;
+
+	dev_dbg(icd->parent, "%s: requested params: width = %d, height = %d\n",
+		__func__, pix->width, pix->height);
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+	if (!xlate) {
+		dev_warn(icd->parent, "Format %x not found\n",
+				pix->pixelformat);
+		return -EINVAL;
+	}
+
+	mf.width	= pix->width;
+	mf.height	= pix->height;
+	mf.field	= pix->field;
+	mf.colorspace	= pix->colorspace;
+	mf.code		= xlate->code;
+
+	ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+	if (ret < 0 && ret != -ENOIOCTLCMD)
+		return ret;
+
+	/* Store width and height returned by the sensor for resizing */
+	pcdev->s_width = mf.width;
+	pcdev->s_height = mf.height;
+	dev_dbg(icd->parent, "%s: sensor params: width = %d, height = %d\n",
+		__func__, pcdev->s_width, pcdev->s_height);
+
+	pcdev->emma_prp = mx27_emma_prp_get_format(xlate->code,
+						   xlate->host_fmt->fourcc);
+
+	memset(pcdev->resizing, 0, sizeof(pcdev->resizing));
+	if ((mf.width != pix->width || mf.height != pix->height) &&
+		pcdev->emma_prp->cfg.in_fmt == PRP_CNTL_DATA_IN_YUV422) {
+		if (mx2_emmaprp_resize(pcdev, &mf, pix, true) < 0)
+			dev_dbg(icd->parent, "%s: can't resize\n", __func__);
+	}
+
+	if (mf.code != xlate->code)
+		return -EINVAL;
+
+	pix->width		= mf.width;
+	pix->height		= mf.height;
+	pix->field		= mf.field;
+	pix->colorspace		= mf.colorspace;
+	icd->current_fmt	= xlate;
+
+	dev_dbg(icd->parent, "%s: returned params: width = %d, height = %d\n",
+		__func__, pix->width, pix->height);
+
+	return 0;
+}
+
+static int mx2_camera_try_fmt(struct soc_camera_device *icd,
+				  struct v4l2_format *f)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	const struct soc_camera_format_xlate *xlate;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_mbus_framefmt mf;
+	__u32 pixfmt = pix->pixelformat;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx2_camera_dev *pcdev = ici->priv;
+	unsigned int width_limit;
+	int ret;
+
+	dev_dbg(icd->parent, "%s: requested params: width = %d, height = %d\n",
+		__func__, pix->width, pix->height);
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+	if (pixfmt && !xlate) {
+		dev_warn(icd->parent, "Format %x not found\n", pixfmt);
+		return -EINVAL;
+	}
+
+	/* FIXME: implement MX27 limits */
+
+	/* limit to MX25 hardware capabilities */
+	if (cpu_is_mx25()) {
+		if (xlate->host_fmt->bits_per_sample <= 8)
+			width_limit = 0xffff * 4;
+		else
+			width_limit = 0xffff * 2;
+		/* CSIIMAG_PARA limit */
+		if (pix->width > width_limit)
+			pix->width = width_limit;
+		if (pix->height > 0xffff)
+			pix->height = 0xffff;
+
+		pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
+				xlate->host_fmt);
+		if (pix->bytesperline < 0)
+			return pix->bytesperline;
+		pix->sizeimage = soc_mbus_image_size(xlate->host_fmt,
+						pix->bytesperline, pix->height);
+		/* Check against the CSIRXCNT limit */
+		if (pix->sizeimage > 4 * 0x3ffff) {
+			/* Adjust geometry, preserve aspect ratio */
+			unsigned int new_height = int_sqrt(div_u64(0x3ffffULL *
+					4 * pix->height, pix->bytesperline));
+			pix->width = new_height * pix->width / pix->height;
+			pix->height = new_height;
+			pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
+							xlate->host_fmt);
+			BUG_ON(pix->bytesperline < 0);
+			pix->sizeimage = soc_mbus_image_size(xlate->host_fmt,
+						pix->bytesperline, pix->height);
+		}
+	}
+
+	/* limit to sensor capabilities */
+	mf.width	= pix->width;
+	mf.height	= pix->height;
+	mf.field	= pix->field;
+	mf.colorspace	= pix->colorspace;
+	mf.code		= xlate->code;
+
+	ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+	if (ret < 0)
+		return ret;
+
+	dev_dbg(icd->parent, "%s: sensor params: width = %d, height = %d\n",
+		__func__, pcdev->s_width, pcdev->s_height);
+
+	/* If the sensor does not support image size try PrP resizing */
+	pcdev->emma_prp = mx27_emma_prp_get_format(xlate->code,
+						   xlate->host_fmt->fourcc);
+
+	memset(pcdev->resizing, 0, sizeof(pcdev->resizing));
+	if ((mf.width != pix->width || mf.height != pix->height) &&
+		pcdev->emma_prp->cfg.in_fmt == PRP_CNTL_DATA_IN_YUV422) {
+		if (mx2_emmaprp_resize(pcdev, &mf, pix, false) < 0)
+			dev_dbg(icd->parent, "%s: can't resize\n", __func__);
+	}
+
+	if (mf.field == V4L2_FIELD_ANY)
+		mf.field = V4L2_FIELD_NONE;
+	/*
+	 * Driver supports interlaced images provided they have
+	 * both fields so that they can be processed as if they
+	 * were progressive.
+	 */
+	if (mf.field != V4L2_FIELD_NONE && !V4L2_FIELD_HAS_BOTH(mf.field)) {
+		dev_err(icd->parent, "Field type %d unsupported.\n",
+				mf.field);
+		return -EINVAL;
+	}
+
+	pix->width	= mf.width;
+	pix->height	= mf.height;
+	pix->field	= mf.field;
+	pix->colorspace	= mf.colorspace;
+
+	dev_dbg(icd->parent, "%s: returned params: width = %d, height = %d\n",
+		__func__, pix->width, pix->height);
+
+	return 0;
+}
+
+static int mx2_camera_querycap(struct soc_camera_host *ici,
+			       struct v4l2_capability *cap)
+{
+	/* cap->name is set by the friendly caller:-> */
+	strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card));
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+	return 0;
+}
+
+static unsigned int mx2_camera_poll(struct file *file, poll_table *pt)
+{
+	struct soc_camera_device *icd = file->private_data;
+
+	return vb2_poll(&icd->vb2_vidq, file, pt);
+}
+
+static struct soc_camera_host_ops mx2_soc_camera_host_ops = {
+	.owner		= THIS_MODULE,
+	.add		= mx2_camera_add_device,
+	.remove		= mx2_camera_remove_device,
+	.set_fmt	= mx2_camera_set_fmt,
+	.set_crop	= mx2_camera_set_crop,
+	.get_formats	= mx2_camera_get_formats,
+	.try_fmt	= mx2_camera_try_fmt,
+	.init_videobuf2	= mx2_camera_init_videobuf,
+	.poll		= mx2_camera_poll,
+	.querycap	= mx2_camera_querycap,
+	.set_bus_param	= mx2_camera_set_bus_param,
+};
+
+static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
+		int bufnum, bool err)
+{
+#ifdef DEBUG
+	struct mx2_fmt_cfg *prp = pcdev->emma_prp;
+#endif
+	struct mx2_buf_internal *ibuf;
+	struct mx2_buffer *buf;
+	struct vb2_buffer *vb;
+	unsigned long phys;
+
+	ibuf = list_first_entry(&pcdev->active_bufs, struct mx2_buf_internal,
+			       queue);
+
+	BUG_ON(ibuf->bufnum != bufnum);
+
+	if (ibuf->discard) {
+		/*
+		 * Discard buffer must not be returned to user space.
+		 * Just return it to the discard queue.
+		 */
+		list_move_tail(pcdev->active_bufs.next, &pcdev->discard);
+	} else {
+		buf = mx2_ibuf_to_buf(ibuf);
+
+		vb = &buf->vb;
+#ifdef DEBUG
+		phys = vb2_dma_contig_plane_dma_addr(vb, 0);
+		if (prp->cfg.channel == 1) {
+			if (readl(pcdev->base_emma + PRP_DEST_RGB1_PTR +
+				4 * bufnum) != phys) {
+				dev_err(pcdev->dev, "%lx != %x\n", phys,
+					readl(pcdev->base_emma +
+					PRP_DEST_RGB1_PTR + 4 * bufnum));
+			}
+		} else {
+			if (readl(pcdev->base_emma + PRP_DEST_Y_PTR -
+				0x14 * bufnum) != phys) {
+				dev_err(pcdev->dev, "%lx != %x\n", phys,
+					readl(pcdev->base_emma +
+					PRP_DEST_Y_PTR - 0x14 * bufnum));
+			}
+		}
+#endif
+		dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%p %lu\n", __func__, vb,
+				vb2_plane_vaddr(vb, 0),
+				vb2_get_plane_payload(vb, 0));
+
+		list_del_init(&buf->internal.queue);
+		do_gettimeofday(&vb->v4l2_buf.timestamp);
+		vb->v4l2_buf.sequence = pcdev->frame_count;
+		if (err)
+			vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+		else
+			vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+	}
+
+	pcdev->frame_count++;
+
+	if (list_empty(&pcdev->capture)) {
+		if (list_empty(&pcdev->discard)) {
+			dev_warn(pcdev->dev, "%s: trying to access empty discard list\n",
+				 __func__);
+			return;
+		}
+
+		ibuf = list_first_entry(&pcdev->discard,
+					struct mx2_buf_internal, queue);
+		ibuf->bufnum = bufnum;
+
+		list_move_tail(pcdev->discard.next, &pcdev->active_bufs);
+		mx27_update_emma_buf(pcdev, pcdev->discard_buffer_dma, bufnum);
+		return;
+	}
+
+	buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
+			       internal.queue);
+
+	buf->internal.bufnum = bufnum;
+
+	list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
+
+	vb = &buf->vb;
+	buf->state = MX2_STATE_ACTIVE;
+
+	phys = vb2_dma_contig_plane_dma_addr(vb, 0);
+	mx27_update_emma_buf(pcdev, phys, bufnum);
+}
+
+static irqreturn_t mx27_camera_emma_irq(int irq_emma, void *data)
+{
+	struct mx2_camera_dev *pcdev = data;
+	unsigned int status = readl(pcdev->base_emma + PRP_INTRSTATUS);
+	struct mx2_buf_internal *ibuf;
+
+	spin_lock(&pcdev->lock);
+
+	if (list_empty(&pcdev->active_bufs)) {
+		dev_warn(pcdev->dev, "%s: called while active list is empty\n",
+			__func__);
+
+		if (!status) {
+			spin_unlock(&pcdev->lock);
+			return IRQ_NONE;
+		}
+	}
+
+	if (status & (1 << 7)) { /* overflow */
+		u32 cntl = readl(pcdev->base_emma + PRP_CNTL);
+		writel(cntl & ~(PRP_CNTL_CH1EN | PRP_CNTL_CH2EN),
+		       pcdev->base_emma + PRP_CNTL);
+		writel(cntl, pcdev->base_emma + PRP_CNTL);
+
+		ibuf = list_first_entry(&pcdev->active_bufs,
+					struct mx2_buf_internal, queue);
+		mx27_camera_frame_done_emma(pcdev,
+					ibuf->bufnum, true);
+
+		status &= ~(1 << 7);
+	} else if (((status & (3 << 5)) == (3 << 5)) ||
+		((status & (3 << 3)) == (3 << 3))) {
+		/*
+		 * Both buffers have triggered, process the one we're expecting
+		 * to first
+		 */
+		ibuf = list_first_entry(&pcdev->active_bufs,
+					struct mx2_buf_internal, queue);
+		mx27_camera_frame_done_emma(pcdev, ibuf->bufnum, false);
+		status &= ~(1 << (6 - ibuf->bufnum)); /* mark processed */
+	} else if ((status & (1 << 6)) || (status & (1 << 4))) {
+		mx27_camera_frame_done_emma(pcdev, 0, false);
+	} else if ((status & (1 << 5)) || (status & (1 << 3))) {
+		mx27_camera_frame_done_emma(pcdev, 1, false);
+	}
+
+	spin_unlock(&pcdev->lock);
+	writel(status, pcdev->base_emma + PRP_INTRSTATUS);
+
+	return IRQ_HANDLED;
+}
+
+static int __devinit mx27_camera_emma_init(struct mx2_camera_dev *pcdev)
+{
+	struct resource *res_emma = pcdev->res_emma;
+	int err = 0;
+
+	if (!request_mem_region(res_emma->start, resource_size(res_emma),
+				MX2_CAM_DRV_NAME)) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	pcdev->base_emma = ioremap(res_emma->start, resource_size(res_emma));
+	if (!pcdev->base_emma) {
+		err = -ENOMEM;
+		goto exit_release;
+	}
+
+	err = request_irq(pcdev->irq_emma, mx27_camera_emma_irq, 0,
+			MX2_CAM_DRV_NAME, pcdev);
+	if (err) {
+		dev_err(pcdev->dev, "Camera EMMA interrupt register failed \n");
+		goto exit_iounmap;
+	}
+
+	pcdev->clk_emma = clk_get(NULL, "emma");
+	if (IS_ERR(pcdev->clk_emma)) {
+		err = PTR_ERR(pcdev->clk_emma);
+		goto exit_free_irq;
+	}
+
+	clk_enable(pcdev->clk_emma);
+
+	err = mx27_camera_emma_prp_reset(pcdev);
+	if (err)
+		goto exit_clk_emma_put;
+
+	return err;
+
+exit_clk_emma_put:
+	clk_disable(pcdev->clk_emma);
+	clk_put(pcdev->clk_emma);
+exit_free_irq:
+	free_irq(pcdev->irq_emma, pcdev);
+exit_iounmap:
+	iounmap(pcdev->base_emma);
+exit_release:
+	release_mem_region(res_emma->start, resource_size(res_emma));
+out:
+	return err;
+}
+
+static int __devinit mx2_camera_probe(struct platform_device *pdev)
+{
+	struct mx2_camera_dev *pcdev;
+	struct resource *res_csi, *res_emma;
+	void __iomem *base_csi;
+	int irq_csi, irq_emma;
+	int err = 0;
+
+	dev_dbg(&pdev->dev, "initialising\n");
+
+	res_csi = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq_csi = platform_get_irq(pdev, 0);
+	if (res_csi == NULL || irq_csi < 0) {
+		dev_err(&pdev->dev, "Missing platform resources data\n");
+		err = -ENODEV;
+		goto exit;
+	}
+
+	pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL);
+	if (!pcdev) {
+		dev_err(&pdev->dev, "Could not allocate pcdev\n");
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	pcdev->clk_csi = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(pcdev->clk_csi)) {
+		dev_err(&pdev->dev, "Could not get csi clock\n");
+		err = PTR_ERR(pcdev->clk_csi);
+		goto exit_kfree;
+	}
+
+	pcdev->res_csi = res_csi;
+	pcdev->pdata = pdev->dev.platform_data;
+	if (pcdev->pdata) {
+		long rate;
+
+		pcdev->platform_flags = pcdev->pdata->flags;
+
+		rate = clk_round_rate(pcdev->clk_csi, pcdev->pdata->clk * 2);
+		if (rate <= 0) {
+			err = -ENODEV;
+			goto exit_dma_free;
+		}
+		err = clk_set_rate(pcdev->clk_csi, rate);
+		if (err < 0)
+			goto exit_dma_free;
+	}
+
+	INIT_LIST_HEAD(&pcdev->capture);
+	INIT_LIST_HEAD(&pcdev->active_bufs);
+	INIT_LIST_HEAD(&pcdev->discard);
+	spin_lock_init(&pcdev->lock);
+
+	/*
+	 * Request the regions.
+	 */
+	if (!request_mem_region(res_csi->start, resource_size(res_csi),
+				MX2_CAM_DRV_NAME)) {
+		err = -EBUSY;
+		goto exit_dma_free;
+	}
+
+	base_csi = ioremap(res_csi->start, resource_size(res_csi));
+	if (!base_csi) {
+		err = -ENOMEM;
+		goto exit_release;
+	}
+	pcdev->irq_csi = irq_csi;
+	pcdev->base_csi = base_csi;
+	pcdev->base_dma = res_csi->start;
+	pcdev->dev = &pdev->dev;
+
+	if (cpu_is_mx25()) {
+		err = request_irq(pcdev->irq_csi, mx25_camera_irq, 0,
+				MX2_CAM_DRV_NAME, pcdev);
+		if (err) {
+			dev_err(pcdev->dev, "Camera interrupt register failed \n");
+			goto exit_iounmap;
+		}
+	}
+
+	if (cpu_is_mx27()) {
+		/* EMMA support */
+		res_emma = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		irq_emma = platform_get_irq(pdev, 1);
+
+		if (!res_emma || !irq_emma) {
+			dev_err(&pdev->dev, "no EMMA resources\n");
+			goto exit_free_irq;
+		}
+
+		pcdev->res_emma = res_emma;
+		pcdev->irq_emma = irq_emma;
+		if (mx27_camera_emma_init(pcdev))
+			goto exit_free_irq;
+	}
+
+	pcdev->soc_host.drv_name	= MX2_CAM_DRV_NAME,
+	pcdev->soc_host.ops		= &mx2_soc_camera_host_ops,
+	pcdev->soc_host.priv		= pcdev;
+	pcdev->soc_host.v4l2_dev.dev	= &pdev->dev;
+	pcdev->soc_host.nr		= pdev->id;
+	if (cpu_is_mx25())
+		pcdev->soc_host.capabilities = SOCAM_HOST_CAP_STRIDE;
+
+	pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+	if (IS_ERR(pcdev->alloc_ctx)) {
+		err = PTR_ERR(pcdev->alloc_ctx);
+		goto eallocctx;
+	}
+	err = soc_camera_host_register(&pcdev->soc_host);
+	if (err)
+		goto exit_free_emma;
+
+	dev_info(&pdev->dev, "MX2 Camera (CSI) driver probed, clock frequency: %ld\n",
+			clk_get_rate(pcdev->clk_csi));
+
+	return 0;
+
+exit_free_emma:
+	vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+eallocctx:
+	if (cpu_is_mx27()) {
+		free_irq(pcdev->irq_emma, pcdev);
+		clk_disable(pcdev->clk_emma);
+		clk_put(pcdev->clk_emma);
+		iounmap(pcdev->base_emma);
+		release_mem_region(pcdev->res_emma->start, resource_size(pcdev->res_emma));
+	}
+exit_free_irq:
+	if (cpu_is_mx25())
+		free_irq(pcdev->irq_csi, pcdev);
+exit_iounmap:
+	iounmap(base_csi);
+exit_release:
+	release_mem_region(res_csi->start, resource_size(res_csi));
+exit_dma_free:
+	clk_put(pcdev->clk_csi);
+exit_kfree:
+	kfree(pcdev);
+exit:
+	return err;
+}
+
+static int __devexit mx2_camera_remove(struct platform_device *pdev)
+{
+	struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+	struct mx2_camera_dev *pcdev = container_of(soc_host,
+			struct mx2_camera_dev, soc_host);
+	struct resource *res;
+
+	clk_put(pcdev->clk_csi);
+	if (cpu_is_mx25())
+		free_irq(pcdev->irq_csi, pcdev);
+	if (cpu_is_mx27())
+		free_irq(pcdev->irq_emma, pcdev);
+
+	soc_camera_host_unregister(&pcdev->soc_host);
+
+	vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+
+	iounmap(pcdev->base_csi);
+
+	if (cpu_is_mx27()) {
+		clk_disable(pcdev->clk_emma);
+		clk_put(pcdev->clk_emma);
+		iounmap(pcdev->base_emma);
+		res = pcdev->res_emma;
+		release_mem_region(res->start, resource_size(res));
+	}
+
+	res = pcdev->res_csi;
+	release_mem_region(res->start, resource_size(res));
+
+	kfree(pcdev);
+
+	dev_info(&pdev->dev, "MX2 Camera driver unloaded\n");
+
+	return 0;
+}
+
+static struct platform_driver mx2_camera_driver = {
+	.driver 	= {
+		.name	= MX2_CAM_DRV_NAME,
+	},
+	.remove		= __devexit_p(mx2_camera_remove),
+};
+
+
+static int __init mx2_camera_init(void)
+{
+	return platform_driver_probe(&mx2_camera_driver, &mx2_camera_probe);
+}
+
+static void __exit mx2_camera_exit(void)
+{
+	return platform_driver_unregister(&mx2_camera_driver);
+}
+
+module_init(mx2_camera_init);
+module_exit(mx2_camera_exit);
+
+MODULE_DESCRIPTION("i.MX27/i.MX25 SoC Camera Host driver");
+MODULE_AUTHOR("Sascha Hauer <sha@pengutronix.de>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MX2_CAM_VERSION);
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
new file mode 100644
index 0000000..2810015
--- /dev/null
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -0,0 +1,1037 @@
+/*
+ * Support eMMa-PrP through mem2mem framework.
+ *
+ * eMMa-PrP is a piece of HW that allows fetching buffers
+ * from one memory location and do several operations on
+ * them such as scaling or format conversion giving, as a result
+ * a new processed buffer in another memory location.
+ *
+ * Based on mem2mem_testdev.c by Pawel Osciak.
+ *
+ * Copyright (c) 2011 Vista Silicon S.L.
+ * Javier Martin <javier.martin@vista-silicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+#include <asm/sizes.h>
+
+#define EMMAPRP_MODULE_NAME "mem2mem-emmaprp"
+
+MODULE_DESCRIPTION("Mem-to-mem device which supports eMMa-PrP present in mx2 SoCs");
+MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.1");
+
+static bool debug;
+module_param(debug, bool, 0644);
+
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 2040
+#define MAX_H 2046
+
+#define S_ALIGN		1 /* multiple of 2 */
+#define W_ALIGN_YUV420	3 /* multiple of 8 */
+#define W_ALIGN_OTHERS	2 /* multiple of 4 */
+#define H_ALIGN		1 /* multiple of 2 */
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE	(1 << 0)
+#define MEM2MEM_OUTPUT	(1 << 1)
+
+#define MEM2MEM_NAME		"m2m-emmaprp"
+
+/* In bytes, per queue */
+#define MEM2MEM_VID_MEM_LIMIT	SZ_16M
+
+#define dprintk(dev, fmt, arg...) \
+	v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+/* EMMA PrP */
+#define PRP_CNTL                        0x00
+#define PRP_INTR_CNTL                   0x04
+#define PRP_INTRSTATUS                  0x08
+#define PRP_SOURCE_Y_PTR                0x0c
+#define PRP_SOURCE_CB_PTR               0x10
+#define PRP_SOURCE_CR_PTR               0x14
+#define PRP_DEST_RGB1_PTR               0x18
+#define PRP_DEST_RGB2_PTR               0x1c
+#define PRP_DEST_Y_PTR                  0x20
+#define PRP_DEST_CB_PTR                 0x24
+#define PRP_DEST_CR_PTR                 0x28
+#define PRP_SRC_FRAME_SIZE              0x2c
+#define PRP_DEST_CH1_LINE_STRIDE        0x30
+#define PRP_SRC_PIXEL_FORMAT_CNTL       0x34
+#define PRP_CH1_PIXEL_FORMAT_CNTL       0x38
+#define PRP_CH1_OUT_IMAGE_SIZE          0x3c
+#define PRP_CH2_OUT_IMAGE_SIZE          0x40
+#define PRP_SRC_LINE_STRIDE             0x44
+#define PRP_CSC_COEF_012                0x48
+#define PRP_CSC_COEF_345                0x4c
+#define PRP_CSC_COEF_678                0x50
+#define PRP_CH1_RZ_HORI_COEF1           0x54
+#define PRP_CH1_RZ_HORI_COEF2           0x58
+#define PRP_CH1_RZ_HORI_VALID           0x5c
+#define PRP_CH1_RZ_VERT_COEF1           0x60
+#define PRP_CH1_RZ_VERT_COEF2           0x64
+#define PRP_CH1_RZ_VERT_VALID           0x68
+#define PRP_CH2_RZ_HORI_COEF1           0x6c
+#define PRP_CH2_RZ_HORI_COEF2           0x70
+#define PRP_CH2_RZ_HORI_VALID           0x74
+#define PRP_CH2_RZ_VERT_COEF1           0x78
+#define PRP_CH2_RZ_VERT_COEF2           0x7c
+#define PRP_CH2_RZ_VERT_VALID           0x80
+
+#define PRP_CNTL_CH1EN          (1 << 0)
+#define PRP_CNTL_CH2EN          (1 << 1)
+#define PRP_CNTL_CSIEN          (1 << 2)
+#define PRP_CNTL_DATA_IN_YUV420 (0 << 3)
+#define PRP_CNTL_DATA_IN_YUV422 (1 << 3)
+#define PRP_CNTL_DATA_IN_RGB16  (2 << 3)
+#define PRP_CNTL_DATA_IN_RGB32  (3 << 3)
+#define PRP_CNTL_CH1_OUT_RGB8   (0 << 5)
+#define PRP_CNTL_CH1_OUT_RGB16  (1 << 5)
+#define PRP_CNTL_CH1_OUT_RGB32  (2 << 5)
+#define PRP_CNTL_CH1_OUT_YUV422 (3 << 5)
+#define PRP_CNTL_CH2_OUT_YUV420 (0 << 7)
+#define PRP_CNTL_CH2_OUT_YUV422 (1 << 7)
+#define PRP_CNTL_CH2_OUT_YUV444 (2 << 7)
+#define PRP_CNTL_CH1_LEN        (1 << 9)
+#define PRP_CNTL_CH2_LEN        (1 << 10)
+#define PRP_CNTL_SKIP_FRAME     (1 << 11)
+#define PRP_CNTL_SWRST          (1 << 12)
+#define PRP_CNTL_CLKEN          (1 << 13)
+#define PRP_CNTL_WEN            (1 << 14)
+#define PRP_CNTL_CH1BYP         (1 << 15)
+#define PRP_CNTL_IN_TSKIP(x)    ((x) << 16)
+#define PRP_CNTL_CH1_TSKIP(x)   ((x) << 19)
+#define PRP_CNTL_CH2_TSKIP(x)   ((x) << 22)
+#define PRP_CNTL_INPUT_FIFO_LEVEL(x)    ((x) << 25)
+#define PRP_CNTL_RZ_FIFO_LEVEL(x)       ((x) << 27)
+#define PRP_CNTL_CH2B1EN        (1 << 29)
+#define PRP_CNTL_CH2B2EN        (1 << 30)
+#define PRP_CNTL_CH2FEN         (1 << 31)
+
+#define PRP_SIZE_HEIGHT(x)	(x)
+#define PRP_SIZE_WIDTH(x)	((x) << 16)
+
+/* IRQ Enable and status register */
+#define PRP_INTR_RDERR          (1 << 0)
+#define PRP_INTR_CH1WERR        (1 << 1)
+#define PRP_INTR_CH2WERR        (1 << 2)
+#define PRP_INTR_CH1FC          (1 << 3)
+#define PRP_INTR_CH2FC          (1 << 5)
+#define PRP_INTR_LBOVF          (1 << 7)
+#define PRP_INTR_CH2OVF         (1 << 8)
+
+#define PRP_INTR_ST_RDERR	(1 << 0)
+#define PRP_INTR_ST_CH1WERR	(1 << 1)
+#define PRP_INTR_ST_CH2WERR	(1 << 2)
+#define PRP_INTR_ST_CH2B2CI	(1 << 3)
+#define PRP_INTR_ST_CH2B1CI	(1 << 4)
+#define PRP_INTR_ST_CH1B2CI	(1 << 5)
+#define PRP_INTR_ST_CH1B1CI	(1 << 6)
+#define PRP_INTR_ST_LBOVF	(1 << 7)
+#define PRP_INTR_ST_CH2OVF	(1 << 8)
+
+struct emmaprp_fmt {
+	char	*name;
+	u32	fourcc;
+	/* Types the format can be used for */
+	u32	types;
+};
+
+static struct emmaprp_fmt formats[] = {
+	{
+		.name	= "YUV 4:2:0 Planar",
+		.fourcc	= V4L2_PIX_FMT_YUV420,
+		.types	= MEM2MEM_CAPTURE,
+	},
+	{
+		.name	= "4:2:2, packed, YUYV",
+		.fourcc	= V4L2_PIX_FMT_YUYV,
+		.types	= MEM2MEM_OUTPUT,
+	},
+};
+
+/* Per-queue, driver-specific private data */
+struct emmaprp_q_data {
+	unsigned int		width;
+	unsigned int		height;
+	unsigned int		sizeimage;
+	struct emmaprp_fmt	*fmt;
+};
+
+enum {
+	V4L2_M2M_SRC = 0,
+	V4L2_M2M_DST = 1,
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static struct emmaprp_fmt *find_format(struct v4l2_format *f)
+{
+	struct emmaprp_fmt *fmt;
+	unsigned int k;
+
+	for (k = 0; k < NUM_FORMATS; k++) {
+		fmt = &formats[k];
+		if (fmt->fourcc == f->fmt.pix.pixelformat)
+			break;
+	}
+
+	if (k == NUM_FORMATS)
+		return NULL;
+
+	return &formats[k];
+}
+
+struct emmaprp_dev {
+	struct v4l2_device	v4l2_dev;
+	struct video_device	*vfd;
+
+	struct mutex		dev_mutex;
+	spinlock_t		irqlock;
+
+	int			irq_emma;
+	void __iomem		*base_emma;
+	struct clk		*clk_emma_ahb, *clk_emma_ipg;
+	struct resource		*res_emma;
+
+	struct v4l2_m2m_dev	*m2m_dev;
+	struct vb2_alloc_ctx	*alloc_ctx;
+};
+
+struct emmaprp_ctx {
+	struct emmaprp_dev	*dev;
+	/* Abort requested by m2m */
+	int			aborting;
+	struct emmaprp_q_data	q_data[2];
+	struct v4l2_m2m_ctx	*m2m_ctx;
+};
+
+static struct emmaprp_q_data *get_q_data(struct emmaprp_ctx *ctx,
+					 enum v4l2_buf_type type)
+{
+	switch (type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+		return &(ctx->q_data[V4L2_M2M_SRC]);
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+		return &(ctx->q_data[V4L2_M2M_DST]);
+	default:
+		BUG();
+	}
+	return NULL;
+}
+
+/*
+ * mem2mem callbacks
+ */
+static void emmaprp_job_abort(void *priv)
+{
+	struct emmaprp_ctx *ctx = priv;
+	struct emmaprp_dev *pcdev = ctx->dev;
+
+	ctx->aborting = 1;
+
+	dprintk(pcdev, "Aborting task\n");
+
+	v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx);
+}
+
+static void emmaprp_lock(void *priv)
+{
+	struct emmaprp_ctx *ctx = priv;
+	struct emmaprp_dev *pcdev = ctx->dev;
+	mutex_lock(&pcdev->dev_mutex);
+}
+
+static void emmaprp_unlock(void *priv)
+{
+	struct emmaprp_ctx *ctx = priv;
+	struct emmaprp_dev *pcdev = ctx->dev;
+	mutex_unlock(&pcdev->dev_mutex);
+}
+
+static inline void emmaprp_dump_regs(struct emmaprp_dev *pcdev)
+{
+	dprintk(pcdev,
+		"eMMa-PrP Registers:\n"
+		"  SOURCE_Y_PTR = 0x%08X\n"
+		"  SRC_FRAME_SIZE = 0x%08X\n"
+		"  DEST_Y_PTR = 0x%08X\n"
+		"  DEST_CR_PTR = 0x%08X\n"
+		"  DEST_CB_PTR = 0x%08X\n"
+		"  CH2_OUT_IMAGE_SIZE = 0x%08X\n"
+		"  CNTL = 0x%08X\n",
+		readl(pcdev->base_emma + PRP_SOURCE_Y_PTR),
+		readl(pcdev->base_emma + PRP_SRC_FRAME_SIZE),
+		readl(pcdev->base_emma + PRP_DEST_Y_PTR),
+		readl(pcdev->base_emma + PRP_DEST_CR_PTR),
+		readl(pcdev->base_emma + PRP_DEST_CB_PTR),
+		readl(pcdev->base_emma + PRP_CH2_OUT_IMAGE_SIZE),
+		readl(pcdev->base_emma + PRP_CNTL));
+}
+
+static void emmaprp_device_run(void *priv)
+{
+	struct emmaprp_ctx *ctx = priv;
+	struct emmaprp_q_data *s_q_data, *d_q_data;
+	struct vb2_buffer *src_buf, *dst_buf;
+	struct emmaprp_dev *pcdev = ctx->dev;
+	unsigned int s_width, s_height;
+	unsigned int d_width, d_height;
+	unsigned int d_size;
+	dma_addr_t p_in, p_out;
+	u32 tmp;
+
+	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+
+	s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+	s_width	= s_q_data->width;
+	s_height = s_q_data->height;
+
+	d_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+	d_width = d_q_data->width;
+	d_height = d_q_data->height;
+	d_size = d_width * d_height;
+
+	p_in = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+	p_out = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+	if (!p_in || !p_out) {
+		v4l2_err(&pcdev->v4l2_dev,
+			 "Acquiring kernel pointers to buffers failed\n");
+		return;
+	}
+
+	/* Input frame parameters */
+	writel(p_in, pcdev->base_emma + PRP_SOURCE_Y_PTR);
+	writel(PRP_SIZE_WIDTH(s_width) | PRP_SIZE_HEIGHT(s_height),
+	       pcdev->base_emma + PRP_SRC_FRAME_SIZE);
+
+	/* Output frame parameters */
+	writel(p_out, pcdev->base_emma + PRP_DEST_Y_PTR);
+	writel(p_out + d_size, pcdev->base_emma + PRP_DEST_CB_PTR);
+	writel(p_out + d_size + (d_size >> 2),
+	       pcdev->base_emma + PRP_DEST_CR_PTR);
+	writel(PRP_SIZE_WIDTH(d_width) | PRP_SIZE_HEIGHT(d_height),
+	       pcdev->base_emma + PRP_CH2_OUT_IMAGE_SIZE);
+
+	/* IRQ configuration */
+	tmp = readl(pcdev->base_emma + PRP_INTR_CNTL);
+	writel(tmp | PRP_INTR_RDERR |
+		PRP_INTR_CH2WERR |
+		PRP_INTR_CH2FC,
+		pcdev->base_emma + PRP_INTR_CNTL);
+
+	emmaprp_dump_regs(pcdev);
+
+	/* Enable transfer */
+	tmp = readl(pcdev->base_emma + PRP_CNTL);
+	writel(tmp | PRP_CNTL_CH2_OUT_YUV420 |
+		PRP_CNTL_DATA_IN_YUV422 |
+		PRP_CNTL_CH2EN,
+		pcdev->base_emma + PRP_CNTL);
+}
+
+static irqreturn_t emmaprp_irq(int irq_emma, void *data)
+{
+	struct emmaprp_dev *pcdev = data;
+	struct emmaprp_ctx *curr_ctx;
+	struct vb2_buffer *src_vb, *dst_vb;
+	unsigned long flags;
+	u32 irqst;
+
+	/* Check irq flags and clear irq */
+	irqst = readl(pcdev->base_emma + PRP_INTRSTATUS);
+	writel(irqst, pcdev->base_emma + PRP_INTRSTATUS);
+	dprintk(pcdev, "irqst = 0x%08x\n", irqst);
+
+	curr_ctx = v4l2_m2m_get_curr_priv(pcdev->m2m_dev);
+	if (curr_ctx == NULL) {
+		pr_err("Instance released before the end of transaction\n");
+		return IRQ_HANDLED;
+	}
+
+	if (!curr_ctx->aborting) {
+		if ((irqst & PRP_INTR_ST_RDERR) ||
+		(irqst & PRP_INTR_ST_CH2WERR)) {
+			pr_err("PrP bus error ocurred, this transfer is probably corrupted\n");
+			writel(PRP_CNTL_SWRST, pcdev->base_emma + PRP_CNTL);
+		} else if (irqst & PRP_INTR_ST_CH2B1CI) { /* buffer ready */
+			src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
+			dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+
+			spin_lock_irqsave(&pcdev->irqlock, flags);
+			v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+			v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+			spin_unlock_irqrestore(&pcdev->irqlock, flags);
+		}
+	}
+
+	v4l2_m2m_job_finish(pcdev->m2m_dev, curr_ctx->m2m_ctx);
+	return IRQ_HANDLED;
+}
+
+/*
+ * video ioctls
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+			   struct v4l2_capability *cap)
+{
+	strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
+	strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
+	/*
+	 * This is only a mem-to-mem video device. The capture and output
+	 * device capability flags are left only for backward compatibility
+	 * and are scheduled for removal.
+	 */
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+			    V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+	return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+	int i, num;
+	struct emmaprp_fmt *fmt;
+
+	num = 0;
+
+	for (i = 0; i < NUM_FORMATS; ++i) {
+		if (formats[i].types & type) {
+			/* index-th format of type type found ? */
+			if (num == f->index)
+				break;
+			/* Correct type but haven't reached our index yet,
+			 * just increment per-type index */
+			++num;
+		}
+	}
+
+	if (i < NUM_FORMATS) {
+		/* Format found */
+		fmt = &formats[i];
+		strlcpy(f->description, fmt->name, sizeof(f->description) - 1);
+		f->pixelformat = fmt->fourcc;
+		return 0;
+	}
+
+	/* Format not found */
+	return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	return enum_fmt(f, MEM2MEM_CAPTURE);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	return enum_fmt(f, MEM2MEM_OUTPUT);
+}
+
+static int vidioc_g_fmt(struct emmaprp_ctx *ctx, struct v4l2_format *f)
+{
+	struct vb2_queue *vq;
+	struct emmaprp_q_data *q_data;
+
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	if (!vq)
+		return -EINVAL;
+
+	q_data = get_q_data(ctx, f->type);
+
+	f->fmt.pix.width	= q_data->width;
+	f->fmt.pix.height	= q_data->height;
+	f->fmt.pix.field	= V4L2_FIELD_NONE;
+	f->fmt.pix.pixelformat	= q_data->fmt->fourcc;
+	if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
+		f->fmt.pix.bytesperline = q_data->width * 3 / 2;
+	else /* YUYV */
+		f->fmt.pix.bytesperline = q_data->width * 2;
+	f->fmt.pix.sizeimage	= q_data->sizeimage;
+
+	return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f)
+{
+	enum v4l2_field field;
+
+
+	if (!find_format(f))
+		return -EINVAL;
+
+	field = f->fmt.pix.field;
+	if (field == V4L2_FIELD_ANY)
+		field = V4L2_FIELD_NONE;
+	else if (V4L2_FIELD_NONE != field)
+		return -EINVAL;
+
+	/* V4L2 specification suggests the driver corrects the format struct
+	 * if any of the dimensions is unsupported */
+	f->fmt.pix.field = field;
+
+	if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420) {
+		v4l_bound_align_image(&f->fmt.pix.width, MIN_W, MAX_W,
+				      W_ALIGN_YUV420, &f->fmt.pix.height,
+				      MIN_H, MAX_H, H_ALIGN, S_ALIGN);
+		f->fmt.pix.bytesperline = f->fmt.pix.width * 3 / 2;
+	} else {
+		v4l_bound_align_image(&f->fmt.pix.width, MIN_W, MAX_W,
+				      W_ALIGN_OTHERS, &f->fmt.pix.height,
+				      MIN_H, MAX_H, H_ALIGN, S_ALIGN);
+		f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
+	}
+	f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+	return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct emmaprp_fmt *fmt;
+	struct emmaprp_ctx *ctx = priv;
+
+	fmt = find_format(f);
+	if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
+		v4l2_err(&ctx->dev->v4l2_dev,
+			 "Fourcc format (0x%08x) invalid.\n",
+			 f->fmt.pix.pixelformat);
+		return -EINVAL;
+	}
+
+	return vidioc_try_fmt(f);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct emmaprp_fmt *fmt;
+	struct emmaprp_ctx *ctx = priv;
+
+	fmt = find_format(f);
+	if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
+		v4l2_err(&ctx->dev->v4l2_dev,
+			 "Fourcc format (0x%08x) invalid.\n",
+			 f->fmt.pix.pixelformat);
+		return -EINVAL;
+	}
+
+	return vidioc_try_fmt(f);
+}
+
+static int vidioc_s_fmt(struct emmaprp_ctx *ctx, struct v4l2_format *f)
+{
+	struct emmaprp_q_data *q_data;
+	struct vb2_queue *vq;
+	int ret;
+
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	if (!vq)
+		return -EINVAL;
+
+	q_data = get_q_data(ctx, f->type);
+	if (!q_data)
+		return -EINVAL;
+
+	if (vb2_is_busy(vq)) {
+		v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+		return -EBUSY;
+	}
+
+	ret = vidioc_try_fmt(f);
+	if (ret)
+		return ret;
+
+	q_data->fmt		= find_format(f);
+	q_data->width		= f->fmt.pix.width;
+	q_data->height		= f->fmt.pix.height;
+	if (q_data->fmt->fourcc == V4L2_PIX_FMT_YUV420)
+		q_data->sizeimage = q_data->width * q_data->height * 3 / 2;
+	else /* YUYV */
+		q_data->sizeimage = q_data->width * q_data->height * 2;
+
+	dprintk(ctx->dev,
+		"Setting format for type %d, wxh: %dx%d, fmt: %d\n",
+		f->type, q_data->width, q_data->height, q_data->fmt->fourcc);
+
+	return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	int ret;
+
+	ret = vidioc_try_fmt_vid_cap(file, priv, f);
+	if (ret)
+		return ret;
+
+	return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	int ret;
+
+	ret = vidioc_try_fmt_vid_out(file, priv, f);
+	if (ret)
+		return ret;
+
+	return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+			  struct v4l2_requestbuffers *reqbufs)
+{
+	struct emmaprp_ctx *ctx = priv;
+
+	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+			   struct v4l2_buffer *buf)
+{
+	struct emmaprp_ctx *ctx = priv;
+
+	return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct emmaprp_ctx *ctx = priv;
+
+	return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct emmaprp_ctx *ctx = priv;
+
+	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_streamon(struct file *file, void *priv,
+			   enum v4l2_buf_type type)
+{
+	struct emmaprp_ctx *ctx = priv;
+
+	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv,
+			    enum v4l2_buf_type type)
+{
+	struct emmaprp_ctx *ctx = priv;
+
+	return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops emmaprp_ioctl_ops = {
+	.vidioc_querycap	= vidioc_querycap,
+
+	.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap	= vidioc_g_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap	= vidioc_try_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap	= vidioc_s_fmt_vid_cap,
+
+	.vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+	.vidioc_g_fmt_vid_out	= vidioc_g_fmt_vid_out,
+	.vidioc_try_fmt_vid_out	= vidioc_try_fmt_vid_out,
+	.vidioc_s_fmt_vid_out	= vidioc_s_fmt_vid_out,
+
+	.vidioc_reqbufs		= vidioc_reqbufs,
+	.vidioc_querybuf	= vidioc_querybuf,
+
+	.vidioc_qbuf		= vidioc_qbuf,
+	.vidioc_dqbuf		= vidioc_dqbuf,
+
+	.vidioc_streamon	= vidioc_streamon,
+	.vidioc_streamoff	= vidioc_streamoff,
+};
+
+
+/*
+ * Queue operations
+ */
+static int emmaprp_queue_setup(struct vb2_queue *vq,
+				const struct v4l2_format *fmt,
+				unsigned int *nbuffers, unsigned int *nplanes,
+				unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct emmaprp_ctx *ctx = vb2_get_drv_priv(vq);
+	struct emmaprp_q_data *q_data;
+	unsigned int size, count = *nbuffers;
+
+	q_data = get_q_data(ctx, vq->type);
+
+	if (q_data->fmt->fourcc == V4L2_PIX_FMT_YUV420)
+		size = q_data->width * q_data->height * 3 / 2;
+	else
+		size = q_data->width * q_data->height * 2;
+
+	while (size * count > MEM2MEM_VID_MEM_LIMIT)
+		(count)--;
+
+	*nplanes = 1;
+	*nbuffers = count;
+	sizes[0] = size;
+
+	alloc_ctxs[0] = ctx->dev->alloc_ctx;
+
+	dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
+
+	return 0;
+}
+
+static int emmaprp_buf_prepare(struct vb2_buffer *vb)
+{
+	struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct emmaprp_q_data *q_data;
+
+	dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+	q_data = get_q_data(ctx, vb->vb2_queue->type);
+
+	if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+		dprintk(ctx->dev, "%s data will not fit into plane"
+				  "(%lu < %lu)\n", __func__,
+				  vb2_plane_size(vb, 0),
+				  (long)q_data->sizeimage);
+		return -EINVAL;
+	}
+
+	vb2_set_plane_payload(vb, 0, q_data->sizeimage);
+
+	return 0;
+}
+
+static void emmaprp_buf_queue(struct vb2_buffer *vb)
+{
+	struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static struct vb2_ops emmaprp_qops = {
+	.queue_setup	 = emmaprp_queue_setup,
+	.buf_prepare	 = emmaprp_buf_prepare,
+	.buf_queue	 = emmaprp_buf_queue,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+		      struct vb2_queue *dst_vq)
+{
+	struct emmaprp_ctx *ctx = priv;
+	int ret;
+
+	memset(src_vq, 0, sizeof(*src_vq));
+	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+	src_vq->drv_priv = ctx;
+	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	src_vq->ops = &emmaprp_qops;
+	src_vq->mem_ops = &vb2_dma_contig_memops;
+
+	ret = vb2_queue_init(src_vq);
+	if (ret)
+		return ret;
+
+	memset(dst_vq, 0, sizeof(*dst_vq));
+	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+	dst_vq->drv_priv = ctx;
+	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	dst_vq->ops = &emmaprp_qops;
+	dst_vq->mem_ops = &vb2_dma_contig_memops;
+
+	return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int emmaprp_open(struct file *file)
+{
+	struct emmaprp_dev *pcdev = video_drvdata(file);
+	struct emmaprp_ctx *ctx;
+
+	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	file->private_data = ctx;
+	ctx->dev = pcdev;
+
+	if (mutex_lock_interruptible(&pcdev->dev_mutex)) {
+		kfree(ctx);
+		return -ERESTARTSYS;
+	}
+
+	ctx->m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init);
+
+	if (IS_ERR(ctx->m2m_ctx)) {
+		int ret = PTR_ERR(ctx->m2m_ctx);
+
+		mutex_unlock(&pcdev->dev_mutex);
+		kfree(ctx);
+		return ret;
+	}
+
+	clk_prepare_enable(pcdev->clk_emma_ipg);
+	clk_prepare_enable(pcdev->clk_emma_ahb);
+	ctx->q_data[V4L2_M2M_SRC].fmt = &formats[1];
+	ctx->q_data[V4L2_M2M_DST].fmt = &formats[0];
+	mutex_unlock(&pcdev->dev_mutex);
+
+	dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+
+	return 0;
+}
+
+static int emmaprp_release(struct file *file)
+{
+	struct emmaprp_dev *pcdev = video_drvdata(file);
+	struct emmaprp_ctx *ctx = file->private_data;
+
+	dprintk(pcdev, "Releasing instance %p\n", ctx);
+
+	mutex_lock(&pcdev->dev_mutex);
+	clk_disable_unprepare(pcdev->clk_emma_ahb);
+	clk_disable_unprepare(pcdev->clk_emma_ipg);
+	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+	mutex_unlock(&pcdev->dev_mutex);
+	kfree(ctx);
+
+	return 0;
+}
+
+static unsigned int emmaprp_poll(struct file *file,
+				 struct poll_table_struct *wait)
+{
+	struct emmaprp_dev *pcdev = video_drvdata(file);
+	struct emmaprp_ctx *ctx = file->private_data;
+	unsigned int res;
+
+	mutex_lock(&pcdev->dev_mutex);
+	res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+	mutex_unlock(&pcdev->dev_mutex);
+	return res;
+}
+
+static int emmaprp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct emmaprp_dev *pcdev = video_drvdata(file);
+	struct emmaprp_ctx *ctx = file->private_data;
+	int ret;
+
+	if (mutex_lock_interruptible(&pcdev->dev_mutex))
+		return -ERESTARTSYS;
+	ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+	mutex_unlock(&pcdev->dev_mutex);
+	return ret;
+}
+
+static const struct v4l2_file_operations emmaprp_fops = {
+	.owner		= THIS_MODULE,
+	.open		= emmaprp_open,
+	.release	= emmaprp_release,
+	.poll		= emmaprp_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= emmaprp_mmap,
+};
+
+static struct video_device emmaprp_videodev = {
+	.name		= MEM2MEM_NAME,
+	.fops		= &emmaprp_fops,
+	.ioctl_ops	= &emmaprp_ioctl_ops,
+	.minor		= -1,
+	.release	= video_device_release,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+	.device_run	= emmaprp_device_run,
+	.job_abort	= emmaprp_job_abort,
+	.lock		= emmaprp_lock,
+	.unlock		= emmaprp_unlock,
+};
+
+static int emmaprp_probe(struct platform_device *pdev)
+{
+	struct emmaprp_dev *pcdev;
+	struct video_device *vfd;
+	struct resource *res_emma;
+	int irq_emma;
+	int ret;
+
+	pcdev = kzalloc(sizeof *pcdev, GFP_KERNEL);
+	if (!pcdev)
+		return -ENOMEM;
+
+	spin_lock_init(&pcdev->irqlock);
+
+	pcdev->clk_emma_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(pcdev->clk_emma_ipg)) {
+		ret = PTR_ERR(pcdev->clk_emma_ipg);
+		goto free_dev;
+	}
+
+	pcdev->clk_emma_ahb = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(pcdev->clk_emma_ipg)) {
+		ret = PTR_ERR(pcdev->clk_emma_ahb);
+		goto free_dev;
+	}
+
+	irq_emma = platform_get_irq(pdev, 0);
+	res_emma = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (irq_emma < 0 || res_emma == NULL) {
+		dev_err(&pdev->dev, "Missing platform resources data\n");
+		ret = -ENODEV;
+		goto free_dev;
+	}
+
+	ret = v4l2_device_register(&pdev->dev, &pcdev->v4l2_dev);
+	if (ret)
+		goto free_dev;
+
+	mutex_init(&pcdev->dev_mutex);
+
+	vfd = video_device_alloc();
+	if (!vfd) {
+		v4l2_err(&pcdev->v4l2_dev, "Failed to allocate video device\n");
+		ret = -ENOMEM;
+		goto unreg_dev;
+	}
+
+	*vfd = emmaprp_videodev;
+	vfd->lock = &pcdev->dev_mutex;
+
+	video_set_drvdata(vfd, pcdev);
+	snprintf(vfd->name, sizeof(vfd->name), "%s", emmaprp_videodev.name);
+	pcdev->vfd = vfd;
+	v4l2_info(&pcdev->v4l2_dev, EMMAPRP_MODULE_NAME
+			" Device registered as /dev/video%d\n", vfd->num);
+
+	platform_set_drvdata(pdev, pcdev);
+
+	if (devm_request_mem_region(&pdev->dev, res_emma->start,
+	    resource_size(res_emma), MEM2MEM_NAME) == NULL)
+		goto rel_vdev;
+
+	pcdev->base_emma = devm_ioremap(&pdev->dev, res_emma->start,
+					resource_size(res_emma));
+	if (!pcdev->base_emma)
+		goto rel_vdev;
+
+	pcdev->irq_emma = irq_emma;
+	pcdev->res_emma = res_emma;
+
+	if (devm_request_irq(&pdev->dev, pcdev->irq_emma, emmaprp_irq,
+			     0, MEM2MEM_NAME, pcdev) < 0)
+		goto rel_vdev;
+
+	pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+	if (IS_ERR(pcdev->alloc_ctx)) {
+		v4l2_err(&pcdev->v4l2_dev, "Failed to alloc vb2 context\n");
+		ret = PTR_ERR(pcdev->alloc_ctx);
+		goto rel_vdev;
+	}
+
+	pcdev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+	if (IS_ERR(pcdev->m2m_dev)) {
+		v4l2_err(&pcdev->v4l2_dev, "Failed to init mem2mem device\n");
+		ret = PTR_ERR(pcdev->m2m_dev);
+		goto rel_ctx;
+	}
+
+	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+	if (ret) {
+		v4l2_err(&pcdev->v4l2_dev, "Failed to register video device\n");
+		goto rel_m2m;
+	}
+
+	return 0;
+
+
+rel_m2m:
+	v4l2_m2m_release(pcdev->m2m_dev);
+rel_ctx:
+	vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+rel_vdev:
+	video_device_release(vfd);
+unreg_dev:
+	v4l2_device_unregister(&pcdev->v4l2_dev);
+free_dev:
+	kfree(pcdev);
+
+	return ret;
+}
+
+static int emmaprp_remove(struct platform_device *pdev)
+{
+	struct emmaprp_dev *pcdev = platform_get_drvdata(pdev);
+
+	v4l2_info(&pcdev->v4l2_dev, "Removing " EMMAPRP_MODULE_NAME);
+
+	video_unregister_device(pcdev->vfd);
+	v4l2_m2m_release(pcdev->m2m_dev);
+	vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+	v4l2_device_unregister(&pcdev->v4l2_dev);
+	kfree(pcdev);
+
+	return 0;
+}
+
+static struct platform_driver emmaprp_pdrv = {
+	.probe		= emmaprp_probe,
+	.remove		= emmaprp_remove,
+	.driver		= {
+		.name	= MEM2MEM_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static void __exit emmaprp_exit(void)
+{
+	platform_driver_unregister(&emmaprp_pdrv);
+}
+
+static int __init emmaprp_init(void)
+{
+	return platform_driver_register(&emmaprp_pdrv);
+}
+
+module_init(emmaprp_init);
+module_exit(emmaprp_exit);
diff --git a/drivers/media/platform/mx3_camera.c b/drivers/media/platform/mx3_camera.c
new file mode 100644
index 0000000..f13643d
--- /dev/null
+++ b/drivers/media/platform/mx3_camera.c
@@ -0,0 +1,1300 @@
+/*
+ * V4L2 Driver for i.MX3x camera host
+ *
+ * Copyright (C) 2008
+ * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/videodev2.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+
+#include <mach/ipu.h>
+#include <mach/mx3_camera.h>
+#include <mach/dma.h>
+
+#define MX3_CAM_DRV_NAME "mx3-camera"
+
+/* CMOS Sensor Interface Registers */
+#define CSI_REG_START		0x60
+
+#define CSI_SENS_CONF		(0x60 - CSI_REG_START)
+#define CSI_SENS_FRM_SIZE	(0x64 - CSI_REG_START)
+#define CSI_ACT_FRM_SIZE	(0x68 - CSI_REG_START)
+#define CSI_OUT_FRM_CTRL	(0x6C - CSI_REG_START)
+#define CSI_TST_CTRL		(0x70 - CSI_REG_START)
+#define CSI_CCIR_CODE_1		(0x74 - CSI_REG_START)
+#define CSI_CCIR_CODE_2		(0x78 - CSI_REG_START)
+#define CSI_CCIR_CODE_3		(0x7C - CSI_REG_START)
+#define CSI_FLASH_STROBE_1	(0x80 - CSI_REG_START)
+#define CSI_FLASH_STROBE_2	(0x84 - CSI_REG_START)
+
+#define CSI_SENS_CONF_VSYNC_POL_SHIFT		0
+#define CSI_SENS_CONF_HSYNC_POL_SHIFT		1
+#define CSI_SENS_CONF_DATA_POL_SHIFT		2
+#define CSI_SENS_CONF_PIX_CLK_POL_SHIFT		3
+#define CSI_SENS_CONF_SENS_PRTCL_SHIFT		4
+#define CSI_SENS_CONF_SENS_CLKSRC_SHIFT		7
+#define CSI_SENS_CONF_DATA_FMT_SHIFT		8
+#define CSI_SENS_CONF_DATA_WIDTH_SHIFT		10
+#define CSI_SENS_CONF_EXT_VSYNC_SHIFT		15
+#define CSI_SENS_CONF_DIVRATIO_SHIFT		16
+
+#define CSI_SENS_CONF_DATA_FMT_RGB_YUV444	(0UL << CSI_SENS_CONF_DATA_FMT_SHIFT)
+#define CSI_SENS_CONF_DATA_FMT_YUV422		(2UL << CSI_SENS_CONF_DATA_FMT_SHIFT)
+#define CSI_SENS_CONF_DATA_FMT_BAYER		(3UL << CSI_SENS_CONF_DATA_FMT_SHIFT)
+
+#define MAX_VIDEO_MEM 16
+
+enum csi_buffer_state {
+	CSI_BUF_NEEDS_INIT,
+	CSI_BUF_PREPARED,
+};
+
+struct mx3_camera_buffer {
+	/* common v4l buffer stuff -- must be first */
+	struct vb2_buffer			vb;
+	enum csi_buffer_state			state;
+	struct list_head			queue;
+
+	/* One descriptot per scatterlist (per frame) */
+	struct dma_async_tx_descriptor		*txd;
+
+	/* We have to "build" a scatterlist ourselves - one element per frame */
+	struct scatterlist			sg;
+};
+
+/**
+ * struct mx3_camera_dev - i.MX3x camera (CSI) object
+ * @dev:		camera device, to which the coherent buffer is attached
+ * @icd:		currently attached camera sensor
+ * @clk:		pointer to clock
+ * @base:		remapped register base address
+ * @pdata:		platform data
+ * @platform_flags:	platform flags
+ * @mclk:		master clock frequency in Hz
+ * @capture:		list of capture videobuffers
+ * @lock:		protects video buffer lists
+ * @active:		active video buffer
+ * @idmac_channel:	array of pointers to IPU DMAC DMA channels
+ * @soc_host:		embedded soc_host object
+ */
+struct mx3_camera_dev {
+	/*
+	 * i.MX3x is only supposed to handle one camera on its Camera Sensor
+	 * Interface. If anyone ever builds hardware to enable more than one
+	 * camera _simultaneously_, they will have to modify this driver too
+	 */
+	struct soc_camera_device *icd;
+	struct clk		*clk;
+
+	void __iomem		*base;
+
+	struct mx3_camera_pdata	*pdata;
+
+	unsigned long		platform_flags;
+	unsigned long		mclk;
+	u16			width_flags;	/* max 15 bits */
+
+	struct list_head	capture;
+	spinlock_t		lock;		/* Protects video buffer lists */
+	struct mx3_camera_buffer *active;
+	size_t			buf_total;
+	struct vb2_alloc_ctx	*alloc_ctx;
+	enum v4l2_field		field;
+	int			sequence;
+
+	/* IDMAC / dmaengine interface */
+	struct idmac_channel	*idmac_channel[1];	/* We need one channel */
+
+	struct soc_camera_host	soc_host;
+};
+
+struct dma_chan_request {
+	struct mx3_camera_dev	*mx3_cam;
+	enum ipu_channel	id;
+};
+
+static u32 csi_reg_read(struct mx3_camera_dev *mx3, off_t reg)
+{
+	return __raw_readl(mx3->base + reg);
+}
+
+static void csi_reg_write(struct mx3_camera_dev *mx3, u32 value, off_t reg)
+{
+	__raw_writel(value, mx3->base + reg);
+}
+
+static struct mx3_camera_buffer *to_mx3_vb(struct vb2_buffer *vb)
+{
+	return container_of(vb, struct mx3_camera_buffer, vb);
+}
+
+/* Called from the IPU IDMAC ISR */
+static void mx3_cam_dma_done(void *arg)
+{
+	struct idmac_tx_desc *desc = to_tx_desc(arg);
+	struct dma_chan *chan = desc->txd.chan;
+	struct idmac_channel *ichannel = to_idmac_chan(chan);
+	struct mx3_camera_dev *mx3_cam = ichannel->client;
+
+	dev_dbg(chan->device->dev, "callback cookie %d, active DMA 0x%08x\n",
+		desc->txd.cookie, mx3_cam->active ? sg_dma_address(&mx3_cam->active->sg) : 0);
+
+	spin_lock(&mx3_cam->lock);
+	if (mx3_cam->active) {
+		struct vb2_buffer *vb = &mx3_cam->active->vb;
+		struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+
+		list_del_init(&buf->queue);
+		do_gettimeofday(&vb->v4l2_buf.timestamp);
+		vb->v4l2_buf.field = mx3_cam->field;
+		vb->v4l2_buf.sequence = mx3_cam->sequence++;
+		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+	}
+
+	if (list_empty(&mx3_cam->capture)) {
+		mx3_cam->active = NULL;
+		spin_unlock(&mx3_cam->lock);
+
+		/*
+		 * stop capture - without further buffers IPU_CHA_BUF0_RDY will
+		 * not get updated
+		 */
+		return;
+	}
+
+	mx3_cam->active = list_entry(mx3_cam->capture.next,
+				     struct mx3_camera_buffer, queue);
+	spin_unlock(&mx3_cam->lock);
+}
+
+/*
+ * Videobuf operations
+ */
+
+/*
+ * Calculate the __buffer__ (not data) size and number of buffers.
+ */
+static int mx3_videobuf_setup(struct vb2_queue *vq,
+			const struct v4l2_format *fmt,
+			unsigned int *count, unsigned int *num_planes,
+			unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx3_camera_dev *mx3_cam = ici->priv;
+
+	if (!mx3_cam->idmac_channel[0])
+		return -EINVAL;
+
+	if (fmt) {
+		const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd,
+								fmt->fmt.pix.pixelformat);
+		unsigned int bytes_per_line;
+		int ret;
+
+		if (!xlate)
+			return -EINVAL;
+
+		ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
+					      xlate->host_fmt);
+		if (ret < 0)
+			return ret;
+
+		bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret);
+
+		ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line,
+					  fmt->fmt.pix.height);
+		if (ret < 0)
+			return ret;
+
+		sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret);
+	} else {
+		/* Called from VIDIOC_REQBUFS or in compatibility mode */
+		sizes[0] = icd->sizeimage;
+	}
+
+	alloc_ctxs[0] = mx3_cam->alloc_ctx;
+
+	if (!vq->num_buffers)
+		mx3_cam->sequence = 0;
+
+	if (!*count)
+		*count = 2;
+
+	/* If *num_planes != 0, we have already verified *count. */
+	if (!*num_planes &&
+	    sizes[0] * *count + mx3_cam->buf_total > MAX_VIDEO_MEM * 1024 * 1024)
+		*count = (MAX_VIDEO_MEM * 1024 * 1024 - mx3_cam->buf_total) /
+			sizes[0];
+
+	*num_planes = 1;
+
+	return 0;
+}
+
+static enum pixel_fmt fourcc_to_ipu_pix(__u32 fourcc)
+{
+	/* Add more formats as need arises and test possibilities appear... */
+	switch (fourcc) {
+	case V4L2_PIX_FMT_RGB24:
+		return IPU_PIX_FMT_RGB24;
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_RGB565:
+	default:
+		return IPU_PIX_FMT_GENERIC;
+	}
+}
+
+static void mx3_videobuf_queue(struct vb2_buffer *vb)
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx3_camera_dev *mx3_cam = ici->priv;
+	struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+	struct scatterlist *sg = &buf->sg;
+	struct dma_async_tx_descriptor *txd;
+	struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
+	struct idmac_video_param *video = &ichan->params.video;
+	const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt;
+	unsigned long flags;
+	dma_cookie_t cookie;
+	size_t new_size;
+
+	new_size = icd->sizeimage;
+
+	if (vb2_plane_size(vb, 0) < new_size) {
+		dev_err(icd->parent, "Buffer #%d too small (%lu < %zu)\n",
+			vb->v4l2_buf.index, vb2_plane_size(vb, 0), new_size);
+		goto error;
+	}
+
+	if (buf->state == CSI_BUF_NEEDS_INIT) {
+		sg_dma_address(sg)	= vb2_dma_contig_plane_dma_addr(vb, 0);
+		sg_dma_len(sg)		= new_size;
+
+		txd = dmaengine_prep_slave_sg(
+			&ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM,
+			DMA_PREP_INTERRUPT);
+		if (!txd)
+			goto error;
+
+		txd->callback_param	= txd;
+		txd->callback		= mx3_cam_dma_done;
+
+		buf->state		= CSI_BUF_PREPARED;
+		buf->txd		= txd;
+	} else {
+		txd = buf->txd;
+	}
+
+	vb2_set_plane_payload(vb, 0, new_size);
+
+	/* This is the configuration of one sg-element */
+	video->out_pixel_fmt = fourcc_to_ipu_pix(host_fmt->fourcc);
+
+	if (video->out_pixel_fmt == IPU_PIX_FMT_GENERIC) {
+		/*
+		 * If the IPU DMA channel is configured to transfer generic
+		 * 8-bit data, we have to set up the geometry parameters
+		 * correctly, according to the current pixel format. The DMA
+		 * horizontal parameters in this case are expressed in bytes,
+		 * not in pixels.
+		 */
+		video->out_width	= icd->bytesperline;
+		video->out_height	= icd->user_height;
+		video->out_stride	= icd->bytesperline;
+	} else {
+		/*
+		 * For IPU known formats the pixel unit will be managed
+		 * successfully by the IPU code
+		 */
+		video->out_width	= icd->user_width;
+		video->out_height	= icd->user_height;
+		video->out_stride	= icd->user_width;
+	}
+
+#ifdef DEBUG
+	/* helps to see what DMA actually has written */
+	if (vb2_plane_vaddr(vb, 0))
+		memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
+#endif
+
+	spin_lock_irqsave(&mx3_cam->lock, flags);
+	list_add_tail(&buf->queue, &mx3_cam->capture);
+
+	if (!mx3_cam->active)
+		mx3_cam->active = buf;
+
+	spin_unlock_irq(&mx3_cam->lock);
+
+	cookie = txd->tx_submit(txd);
+	dev_dbg(icd->parent, "Submitted cookie %d DMA 0x%08x\n",
+		cookie, sg_dma_address(&buf->sg));
+
+	if (cookie >= 0)
+		return;
+
+	spin_lock_irq(&mx3_cam->lock);
+
+	/* Submit error */
+	list_del_init(&buf->queue);
+
+	if (mx3_cam->active == buf)
+		mx3_cam->active = NULL;
+
+	spin_unlock_irqrestore(&mx3_cam->lock, flags);
+error:
+	vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+}
+
+static void mx3_videobuf_release(struct vb2_buffer *vb)
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx3_camera_dev *mx3_cam = ici->priv;
+	struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+	struct dma_async_tx_descriptor *txd = buf->txd;
+	unsigned long flags;
+
+	dev_dbg(icd->parent,
+		"Release%s DMA 0x%08x, queue %sempty\n",
+		mx3_cam->active == buf ? " active" : "", sg_dma_address(&buf->sg),
+		list_empty(&buf->queue) ? "" : "not ");
+
+	spin_lock_irqsave(&mx3_cam->lock, flags);
+
+	if (mx3_cam->active == buf)
+		mx3_cam->active = NULL;
+
+	/* Doesn't hurt also if the list is empty */
+	list_del_init(&buf->queue);
+	buf->state = CSI_BUF_NEEDS_INIT;
+
+	if (txd) {
+		buf->txd = NULL;
+		if (mx3_cam->idmac_channel[0])
+			async_tx_ack(txd);
+	}
+
+	spin_unlock_irqrestore(&mx3_cam->lock, flags);
+
+	mx3_cam->buf_total -= vb2_plane_size(vb, 0);
+}
+
+static int mx3_videobuf_init(struct vb2_buffer *vb)
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx3_camera_dev *mx3_cam = ici->priv;
+	struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+
+	/* This is for locking debugging only */
+	INIT_LIST_HEAD(&buf->queue);
+	sg_init_table(&buf->sg, 1);
+
+	buf->state = CSI_BUF_NEEDS_INIT;
+
+	mx3_cam->buf_total += vb2_plane_size(vb, 0);
+
+	return 0;
+}
+
+static int mx3_stop_streaming(struct vb2_queue *q)
+{
+	struct soc_camera_device *icd = soc_camera_from_vb2q(q);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx3_camera_dev *mx3_cam = ici->priv;
+	struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
+	struct mx3_camera_buffer *buf, *tmp;
+	unsigned long flags;
+
+	if (ichan) {
+		struct dma_chan *chan = &ichan->dma_chan;
+		chan->device->device_control(chan, DMA_PAUSE, 0);
+	}
+
+	spin_lock_irqsave(&mx3_cam->lock, flags);
+
+	mx3_cam->active = NULL;
+
+	list_for_each_entry_safe(buf, tmp, &mx3_cam->capture, queue) {
+		list_del_init(&buf->queue);
+		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+	}
+
+	spin_unlock_irqrestore(&mx3_cam->lock, flags);
+
+	return 0;
+}
+
+static struct vb2_ops mx3_videobuf_ops = {
+	.queue_setup	= mx3_videobuf_setup,
+	.buf_queue	= mx3_videobuf_queue,
+	.buf_cleanup	= mx3_videobuf_release,
+	.buf_init	= mx3_videobuf_init,
+	.wait_prepare	= soc_camera_unlock,
+	.wait_finish	= soc_camera_lock,
+	.stop_streaming	= mx3_stop_streaming,
+};
+
+static int mx3_camera_init_videobuf(struct vb2_queue *q,
+				     struct soc_camera_device *icd)
+{
+	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	q->io_modes = VB2_MMAP | VB2_USERPTR;
+	q->drv_priv = icd;
+	q->ops = &mx3_videobuf_ops;
+	q->mem_ops = &vb2_dma_contig_memops;
+	q->buf_struct_size = sizeof(struct mx3_camera_buffer);
+
+	return vb2_queue_init(q);
+}
+
+/* First part of ipu_csi_init_interface() */
+static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
+				struct soc_camera_device *icd)
+{
+	u32 conf;
+	long rate;
+
+	/* Set default size: ipu_csi_set_window_size() */
+	csi_reg_write(mx3_cam, (640 - 1) | ((480 - 1) << 16), CSI_ACT_FRM_SIZE);
+	/* ...and position to 0:0: ipu_csi_set_window_pos() */
+	conf = csi_reg_read(mx3_cam, CSI_OUT_FRM_CTRL) & 0xffff0000;
+	csi_reg_write(mx3_cam, conf, CSI_OUT_FRM_CTRL);
+
+	/* We use only gated clock synchronisation mode so far */
+	conf = 0 << CSI_SENS_CONF_SENS_PRTCL_SHIFT;
+
+	/* Set generic data, platform-biggest bus-width */
+	conf |= CSI_SENS_CONF_DATA_FMT_BAYER;
+
+	if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15)
+		conf |= 3 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+	else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10)
+		conf |= 2 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+	else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8)
+		conf |= 1 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+	else/* if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4)*/
+		conf |= 0 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+
+	if (mx3_cam->platform_flags & MX3_CAMERA_CLK_SRC)
+		conf |= 1 << CSI_SENS_CONF_SENS_CLKSRC_SHIFT;
+	if (mx3_cam->platform_flags & MX3_CAMERA_EXT_VSYNC)
+		conf |= 1 << CSI_SENS_CONF_EXT_VSYNC_SHIFT;
+	if (mx3_cam->platform_flags & MX3_CAMERA_DP)
+		conf |= 1 << CSI_SENS_CONF_DATA_POL_SHIFT;
+	if (mx3_cam->platform_flags & MX3_CAMERA_PCP)
+		conf |= 1 << CSI_SENS_CONF_PIX_CLK_POL_SHIFT;
+	if (mx3_cam->platform_flags & MX3_CAMERA_HSP)
+		conf |= 1 << CSI_SENS_CONF_HSYNC_POL_SHIFT;
+	if (mx3_cam->platform_flags & MX3_CAMERA_VSP)
+		conf |= 1 << CSI_SENS_CONF_VSYNC_POL_SHIFT;
+
+	/* ipu_csi_init_interface() */
+	csi_reg_write(mx3_cam, conf, CSI_SENS_CONF);
+
+	clk_prepare_enable(mx3_cam->clk);
+	rate = clk_round_rate(mx3_cam->clk, mx3_cam->mclk);
+	dev_dbg(icd->parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate);
+	if (rate)
+		clk_set_rate(mx3_cam->clk, rate);
+}
+
+/* Called with .video_lock held */
+static int mx3_camera_add_device(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx3_camera_dev *mx3_cam = ici->priv;
+
+	if (mx3_cam->icd)
+		return -EBUSY;
+
+	mx3_camera_activate(mx3_cam, icd);
+
+	mx3_cam->buf_total = 0;
+	mx3_cam->icd = icd;
+
+	dev_info(icd->parent, "MX3 Camera driver attached to camera %d\n",
+		 icd->devnum);
+
+	return 0;
+}
+
+/* Called with .video_lock held */
+static void mx3_camera_remove_device(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx3_camera_dev *mx3_cam = ici->priv;
+	struct idmac_channel **ichan = &mx3_cam->idmac_channel[0];
+
+	BUG_ON(icd != mx3_cam->icd);
+
+	if (*ichan) {
+		dma_release_channel(&(*ichan)->dma_chan);
+		*ichan = NULL;
+	}
+
+	clk_disable_unprepare(mx3_cam->clk);
+
+	mx3_cam->icd = NULL;
+
+	dev_info(icd->parent, "MX3 Camera driver detached from camera %d\n",
+		 icd->devnum);
+}
+
+static int test_platform_param(struct mx3_camera_dev *mx3_cam,
+			       unsigned char buswidth, unsigned long *flags)
+{
+	/*
+	 * If requested data width is supported by the platform, use it or any
+	 * possible lower value - i.MX31 is smart enough to shift bits
+	 */
+	if (buswidth > fls(mx3_cam->width_flags))
+		return -EINVAL;
+
+	/*
+	 * Platform specified synchronization and pixel clock polarities are
+	 * only a recommendation and are only used during probing. MX3x
+	 * camera interface only works in master mode, i.e., uses HSYNC and
+	 * VSYNC signals from the sensor
+	 */
+	*flags = V4L2_MBUS_MASTER |
+		V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+		V4L2_MBUS_HSYNC_ACTIVE_LOW |
+		V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+		V4L2_MBUS_VSYNC_ACTIVE_LOW |
+		V4L2_MBUS_PCLK_SAMPLE_RISING |
+		V4L2_MBUS_PCLK_SAMPLE_FALLING |
+		V4L2_MBUS_DATA_ACTIVE_HIGH |
+		V4L2_MBUS_DATA_ACTIVE_LOW;
+
+	return 0;
+}
+
+static int mx3_camera_try_bus_param(struct soc_camera_device *icd,
+				    const unsigned int depth)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx3_camera_dev *mx3_cam = ici->priv;
+	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+	unsigned long bus_flags, common_flags;
+	int ret = test_platform_param(mx3_cam, depth, &bus_flags);
+
+	dev_dbg(icd->parent, "request bus width %d bit: %d\n", depth, ret);
+
+	if (ret < 0)
+		return ret;
+
+	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+	if (!ret) {
+		common_flags = soc_mbus_config_compatible(&cfg,
+							  bus_flags);
+		if (!common_flags) {
+			dev_warn(icd->parent,
+				 "Flags incompatible: camera 0x%x, host 0x%lx\n",
+				 cfg.flags, bus_flags);
+			return -EINVAL;
+		}
+	} else if (ret != -ENOIOCTLCMD) {
+		return ret;
+	}
+
+	return 0;
+}
+
+static bool chan_filter(struct dma_chan *chan, void *arg)
+{
+	struct dma_chan_request *rq = arg;
+	struct mx3_camera_pdata *pdata;
+
+	if (!imx_dma_is_ipu(chan))
+		return false;
+
+	if (!rq)
+		return false;
+
+	pdata = rq->mx3_cam->soc_host.v4l2_dev.dev->platform_data;
+
+	return rq->id == chan->chan_id &&
+		pdata->dma_dev == chan->device->dev;
+}
+
+static const struct soc_mbus_pixelfmt mx3_camera_formats[] = {
+	{
+		.fourcc			= V4L2_PIX_FMT_SBGGR8,
+		.name			= "Bayer BGGR (sRGB) 8 bit",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_NONE,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	}, {
+		.fourcc			= V4L2_PIX_FMT_GREY,
+		.name			= "Monochrome 8 bit",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_NONE,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+};
+
+/* This will be corrected as we get more formats */
+static bool mx3_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
+{
+	return	fmt->packing == SOC_MBUS_PACKING_NONE ||
+		(fmt->bits_per_sample == 8 &&
+		 fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+		(fmt->bits_per_sample > 8 &&
+		 fmt->packing == SOC_MBUS_PACKING_EXTEND16);
+}
+
+static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int idx,
+				  struct soc_camera_format_xlate *xlate)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct device *dev = icd->parent;
+	int formats = 0, ret;
+	enum v4l2_mbus_pixelcode code;
+	const struct soc_mbus_pixelfmt *fmt;
+
+	ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+	if (ret < 0)
+		/* No more formats */
+		return 0;
+
+	fmt = soc_mbus_get_fmtdesc(code);
+	if (!fmt) {
+		dev_warn(icd->parent,
+			 "Unsupported format code #%u: %d\n", idx, code);
+		return 0;
+	}
+
+	/* This also checks support for the requested bits-per-sample */
+	ret = mx3_camera_try_bus_param(icd, fmt->bits_per_sample);
+	if (ret < 0)
+		return 0;
+
+	switch (code) {
+	case V4L2_MBUS_FMT_SBGGR10_1X10:
+		formats++;
+		if (xlate) {
+			xlate->host_fmt	= &mx3_camera_formats[0];
+			xlate->code	= code;
+			xlate++;
+			dev_dbg(dev, "Providing format %s using code %d\n",
+				mx3_camera_formats[0].name, code);
+		}
+		break;
+	case V4L2_MBUS_FMT_Y10_1X10:
+		formats++;
+		if (xlate) {
+			xlate->host_fmt	= &mx3_camera_formats[1];
+			xlate->code	= code;
+			xlate++;
+			dev_dbg(dev, "Providing format %s using code %d\n",
+				mx3_camera_formats[1].name, code);
+		}
+		break;
+	default:
+		if (!mx3_camera_packing_supported(fmt))
+			return 0;
+	}
+
+	/* Generic pass-through */
+	formats++;
+	if (xlate) {
+		xlate->host_fmt	= fmt;
+		xlate->code	= code;
+		dev_dbg(dev, "Providing format %c%c%c%c in pass-through mode\n",
+			(fmt->fourcc >> (0*8)) & 0xFF,
+			(fmt->fourcc >> (1*8)) & 0xFF,
+			(fmt->fourcc >> (2*8)) & 0xFF,
+			(fmt->fourcc >> (3*8)) & 0xFF);
+		xlate++;
+	}
+
+	return formats;
+}
+
+static void configure_geometry(struct mx3_camera_dev *mx3_cam,
+			       unsigned int width, unsigned int height,
+			       const struct soc_mbus_pixelfmt *fmt)
+{
+	u32 ctrl, width_field, height_field;
+
+	if (fourcc_to_ipu_pix(fmt->fourcc) == IPU_PIX_FMT_GENERIC) {
+		/*
+		 * As the CSI will be configured to output BAYER, here
+		 * the width parameter count the number of samples to
+		 * capture to complete the whole image width.
+		 */
+		unsigned int num, den;
+		int ret = soc_mbus_samples_per_pixel(fmt, &num, &den);
+		BUG_ON(ret < 0);
+		width = width * num / den;
+	}
+
+	/* Setup frame size - this cannot be changed on-the-fly... */
+	width_field = width - 1;
+	height_field = height - 1;
+	csi_reg_write(mx3_cam, width_field | (height_field << 16), CSI_SENS_FRM_SIZE);
+
+	csi_reg_write(mx3_cam, width_field << 16, CSI_FLASH_STROBE_1);
+	csi_reg_write(mx3_cam, (height_field << 16) | 0x22, CSI_FLASH_STROBE_2);
+
+	csi_reg_write(mx3_cam, width_field | (height_field << 16), CSI_ACT_FRM_SIZE);
+
+	/* ...and position */
+	ctrl = csi_reg_read(mx3_cam, CSI_OUT_FRM_CTRL) & 0xffff0000;
+	/* Sensor does the cropping */
+	csi_reg_write(mx3_cam, ctrl | 0 | (0 << 8), CSI_OUT_FRM_CTRL);
+}
+
+static int acquire_dma_channel(struct mx3_camera_dev *mx3_cam)
+{
+	dma_cap_mask_t mask;
+	struct dma_chan *chan;
+	struct idmac_channel **ichan = &mx3_cam->idmac_channel[0];
+	/* We have to use IDMAC_IC_7 for Bayer / generic data */
+	struct dma_chan_request rq = {.mx3_cam = mx3_cam,
+				      .id = IDMAC_IC_7};
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+	dma_cap_set(DMA_PRIVATE, mask);
+	chan = dma_request_channel(mask, chan_filter, &rq);
+	if (!chan)
+		return -EBUSY;
+
+	*ichan = to_idmac_chan(chan);
+	(*ichan)->client = mx3_cam;
+
+	return 0;
+}
+
+/*
+ * FIXME: learn to use stride != width, then we can keep stride properly aligned
+ * and support arbitrary (even) widths.
+ */
+static inline void stride_align(__u32 *width)
+{
+	if (ALIGN(*width, 8) < 4096)
+		*width = ALIGN(*width, 8);
+	else
+		*width = *width &  ~7;
+}
+
+/*
+ * As long as we don't implement host-side cropping and scaling, we can use
+ * default g_crop and cropcap from soc_camera.c
+ */
+static int mx3_camera_set_crop(struct soc_camera_device *icd,
+			       struct v4l2_crop *a)
+{
+	struct v4l2_rect *rect = &a->c;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx3_camera_dev *mx3_cam = ici->priv;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct v4l2_mbus_framefmt mf;
+	int ret;
+
+	soc_camera_limit_side(&rect->left, &rect->width, 0, 2, 4096);
+	soc_camera_limit_side(&rect->top, &rect->height, 0, 2, 4096);
+
+	ret = v4l2_subdev_call(sd, video, s_crop, a);
+	if (ret < 0)
+		return ret;
+
+	/* The capture device might have changed its output sizes */
+	ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+	if (ret < 0)
+		return ret;
+
+	if (mf.code != icd->current_fmt->code)
+		return -EINVAL;
+
+	if (mf.width & 7) {
+		/* Ouch! We can only handle 8-byte aligned width... */
+		stride_align(&mf.width);
+		ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (mf.width != icd->user_width || mf.height != icd->user_height)
+		configure_geometry(mx3_cam, mf.width, mf.height,
+				   icd->current_fmt->host_fmt);
+
+	dev_dbg(icd->parent, "Sensor cropped %dx%d\n",
+		mf.width, mf.height);
+
+	icd->user_width		= mf.width;
+	icd->user_height	= mf.height;
+
+	return ret;
+}
+
+static int mx3_camera_set_fmt(struct soc_camera_device *icd,
+			      struct v4l2_format *f)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx3_camera_dev *mx3_cam = ici->priv;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	const struct soc_camera_format_xlate *xlate;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_mbus_framefmt mf;
+	int ret;
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+	if (!xlate) {
+		dev_warn(icd->parent, "Format %x not found\n",
+			 pix->pixelformat);
+		return -EINVAL;
+	}
+
+	stride_align(&pix->width);
+	dev_dbg(icd->parent, "Set format %dx%d\n", pix->width, pix->height);
+
+	/*
+	 * Might have to perform a complete interface initialisation like in
+	 * ipu_csi_init_interface() in mxc_v4l2_s_param(). Also consider
+	 * mxc_v4l2_s_fmt()
+	 */
+
+	configure_geometry(mx3_cam, pix->width, pix->height, xlate->host_fmt);
+
+	mf.width	= pix->width;
+	mf.height	= pix->height;
+	mf.field	= pix->field;
+	mf.colorspace	= pix->colorspace;
+	mf.code		= xlate->code;
+
+	ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+	if (ret < 0)
+		return ret;
+
+	if (mf.code != xlate->code)
+		return -EINVAL;
+
+	if (!mx3_cam->idmac_channel[0]) {
+		ret = acquire_dma_channel(mx3_cam);
+		if (ret < 0)
+			return ret;
+	}
+
+	pix->width		= mf.width;
+	pix->height		= mf.height;
+	pix->field		= mf.field;
+	mx3_cam->field		= mf.field;
+	pix->colorspace		= mf.colorspace;
+	icd->current_fmt	= xlate;
+
+	dev_dbg(icd->parent, "Sensor set %dx%d\n", pix->width, pix->height);
+
+	return ret;
+}
+
+static int mx3_camera_try_fmt(struct soc_camera_device *icd,
+			      struct v4l2_format *f)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	const struct soc_camera_format_xlate *xlate;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_mbus_framefmt mf;
+	__u32 pixfmt = pix->pixelformat;
+	int ret;
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+	if (pixfmt && !xlate) {
+		dev_warn(icd->parent, "Format %x not found\n", pixfmt);
+		return -EINVAL;
+	}
+
+	/* limit to MX3 hardware capabilities */
+	if (pix->height > 4096)
+		pix->height = 4096;
+	if (pix->width > 4096)
+		pix->width = 4096;
+
+	/* limit to sensor capabilities */
+	mf.width	= pix->width;
+	mf.height	= pix->height;
+	mf.field	= pix->field;
+	mf.colorspace	= pix->colorspace;
+	mf.code		= xlate->code;
+
+	ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+	if (ret < 0)
+		return ret;
+
+	pix->width	= mf.width;
+	pix->height	= mf.height;
+	pix->colorspace	= mf.colorspace;
+
+	switch (mf.field) {
+	case V4L2_FIELD_ANY:
+		pix->field = V4L2_FIELD_NONE;
+		break;
+	case V4L2_FIELD_NONE:
+		break;
+	default:
+		dev_err(icd->parent, "Field type %d unsupported.\n",
+			mf.field);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int mx3_camera_reqbufs(struct soc_camera_device *icd,
+			      struct v4l2_requestbuffers *p)
+{
+	return 0;
+}
+
+static unsigned int mx3_camera_poll(struct file *file, poll_table *pt)
+{
+	struct soc_camera_device *icd = file->private_data;
+
+	return vb2_poll(&icd->vb2_vidq, file, pt);
+}
+
+static int mx3_camera_querycap(struct soc_camera_host *ici,
+			       struct v4l2_capability *cap)
+{
+	/* cap->name is set by the firendly caller:-> */
+	strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card));
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+	return 0;
+}
+
+static int mx3_camera_set_bus_param(struct soc_camera_device *icd)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct mx3_camera_dev *mx3_cam = ici->priv;
+	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+	u32 pixfmt = icd->current_fmt->host_fmt->fourcc;
+	unsigned long bus_flags, common_flags;
+	u32 dw, sens_conf;
+	const struct soc_mbus_pixelfmt *fmt;
+	int buswidth;
+	int ret;
+	const struct soc_camera_format_xlate *xlate;
+	struct device *dev = icd->parent;
+
+	fmt = soc_mbus_get_fmtdesc(icd->current_fmt->code);
+	if (!fmt)
+		return -EINVAL;
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+	if (!xlate) {
+		dev_warn(dev, "Format %x not found\n", pixfmt);
+		return -EINVAL;
+	}
+
+	buswidth = fmt->bits_per_sample;
+	ret = test_platform_param(mx3_cam, buswidth, &bus_flags);
+
+	dev_dbg(dev, "requested bus width %d bit: %d\n", buswidth, ret);
+
+	if (ret < 0)
+		return ret;
+
+	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+	if (!ret) {
+		common_flags = soc_mbus_config_compatible(&cfg,
+							  bus_flags);
+		if (!common_flags) {
+			dev_warn(icd->parent,
+				 "Flags incompatible: camera 0x%x, host 0x%lx\n",
+				 cfg.flags, bus_flags);
+			return -EINVAL;
+		}
+	} else if (ret != -ENOIOCTLCMD) {
+		return ret;
+	} else {
+		common_flags = bus_flags;
+	}
+
+	dev_dbg(dev, "Flags cam: 0x%x host: 0x%lx common: 0x%lx\n",
+		cfg.flags, bus_flags, common_flags);
+
+	/* Make choices, based on platform preferences */
+	if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+	    (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
+		if (mx3_cam->platform_flags & MX3_CAMERA_HSP)
+			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+		else
+			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
+	}
+
+	if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+	    (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
+		if (mx3_cam->platform_flags & MX3_CAMERA_VSP)
+			common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+		else
+			common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
+	}
+
+	if ((common_flags & V4L2_MBUS_DATA_ACTIVE_HIGH) &&
+	    (common_flags & V4L2_MBUS_DATA_ACTIVE_LOW)) {
+		if (mx3_cam->platform_flags & MX3_CAMERA_DP)
+			common_flags &= ~V4L2_MBUS_DATA_ACTIVE_HIGH;
+		else
+			common_flags &= ~V4L2_MBUS_DATA_ACTIVE_LOW;
+	}
+
+	if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+	    (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
+		if (mx3_cam->platform_flags & MX3_CAMERA_PCP)
+			common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
+		else
+			common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
+	}
+
+	cfg.flags = common_flags;
+	ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+	if (ret < 0 && ret != -ENOIOCTLCMD) {
+		dev_dbg(dev, "camera s_mbus_config(0x%lx) returned %d\n",
+			common_flags, ret);
+		return ret;
+	}
+
+	/*
+	 * So far only gated clock mode is supported. Add a line
+	 *	(3 << CSI_SENS_CONF_SENS_PRTCL_SHIFT) |
+	 * below and select the required mode when supporting other
+	 * synchronisation protocols.
+	 */
+	sens_conf = csi_reg_read(mx3_cam, CSI_SENS_CONF) &
+		~((1 << CSI_SENS_CONF_VSYNC_POL_SHIFT) |
+		  (1 << CSI_SENS_CONF_HSYNC_POL_SHIFT) |
+		  (1 << CSI_SENS_CONF_DATA_POL_SHIFT) |
+		  (1 << CSI_SENS_CONF_PIX_CLK_POL_SHIFT) |
+		  (3 << CSI_SENS_CONF_DATA_FMT_SHIFT) |
+		  (3 << CSI_SENS_CONF_DATA_WIDTH_SHIFT));
+
+	/* TODO: Support RGB and YUV formats */
+
+	/* This has been set in mx3_camera_activate(), but we clear it above */
+	sens_conf |= CSI_SENS_CONF_DATA_FMT_BAYER;
+
+	if (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+		sens_conf |= 1 << CSI_SENS_CONF_PIX_CLK_POL_SHIFT;
+	if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+		sens_conf |= 1 << CSI_SENS_CONF_HSYNC_POL_SHIFT;
+	if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+		sens_conf |= 1 << CSI_SENS_CONF_VSYNC_POL_SHIFT;
+	if (common_flags & V4L2_MBUS_DATA_ACTIVE_LOW)
+		sens_conf |= 1 << CSI_SENS_CONF_DATA_POL_SHIFT;
+
+	/* Just do what we're asked to do */
+	switch (xlate->host_fmt->bits_per_sample) {
+	case 4:
+		dw = 0 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+		break;
+	case 8:
+		dw = 1 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+		break;
+	case 10:
+		dw = 2 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+		break;
+	default:
+		/*
+		 * Actually it can only be 15 now, default is just to silence
+		 * compiler warnings
+		 */
+	case 15:
+		dw = 3 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+	}
+
+	csi_reg_write(mx3_cam, sens_conf | dw, CSI_SENS_CONF);
+
+	dev_dbg(dev, "Set SENS_CONF to %x\n", sens_conf | dw);
+
+	return 0;
+}
+
+static struct soc_camera_host_ops mx3_soc_camera_host_ops = {
+	.owner		= THIS_MODULE,
+	.add		= mx3_camera_add_device,
+	.remove		= mx3_camera_remove_device,
+	.set_crop	= mx3_camera_set_crop,
+	.set_fmt	= mx3_camera_set_fmt,
+	.try_fmt	= mx3_camera_try_fmt,
+	.get_formats	= mx3_camera_get_formats,
+	.init_videobuf2	= mx3_camera_init_videobuf,
+	.reqbufs	= mx3_camera_reqbufs,
+	.poll		= mx3_camera_poll,
+	.querycap	= mx3_camera_querycap,
+	.set_bus_param	= mx3_camera_set_bus_param,
+};
+
+static int __devinit mx3_camera_probe(struct platform_device *pdev)
+{
+	struct mx3_camera_dev *mx3_cam;
+	struct resource *res;
+	void __iomem *base;
+	int err = 0;
+	struct soc_camera_host *soc_host;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		err = -ENODEV;
+		goto egetres;
+	}
+
+	mx3_cam = vzalloc(sizeof(*mx3_cam));
+	if (!mx3_cam) {
+		dev_err(&pdev->dev, "Could not allocate mx3 camera object\n");
+		err = -ENOMEM;
+		goto ealloc;
+	}
+
+	mx3_cam->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(mx3_cam->clk)) {
+		err = PTR_ERR(mx3_cam->clk);
+		goto eclkget;
+	}
+
+	mx3_cam->pdata = pdev->dev.platform_data;
+	mx3_cam->platform_flags = mx3_cam->pdata->flags;
+	if (!(mx3_cam->platform_flags & (MX3_CAMERA_DATAWIDTH_4 |
+			MX3_CAMERA_DATAWIDTH_8 | MX3_CAMERA_DATAWIDTH_10 |
+			MX3_CAMERA_DATAWIDTH_15))) {
+		/*
+		 * Platform hasn't set available data widths. This is bad.
+		 * Warn and use a default.
+		 */
+		dev_warn(&pdev->dev, "WARNING! Platform hasn't set available "
+			 "data widths, using default 8 bit\n");
+		mx3_cam->platform_flags |= MX3_CAMERA_DATAWIDTH_8;
+	}
+	if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4)
+		mx3_cam->width_flags = 1 << 3;
+	if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8)
+		mx3_cam->width_flags |= 1 << 7;
+	if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10)
+		mx3_cam->width_flags |= 1 << 9;
+	if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15)
+		mx3_cam->width_flags |= 1 << 14;
+
+	mx3_cam->mclk = mx3_cam->pdata->mclk_10khz * 10000;
+	if (!mx3_cam->mclk) {
+		dev_warn(&pdev->dev,
+			 "mclk_10khz == 0! Please, fix your platform data. "
+			 "Using default 20MHz\n");
+		mx3_cam->mclk = 20000000;
+	}
+
+	/* list of video-buffers */
+	INIT_LIST_HEAD(&mx3_cam->capture);
+	spin_lock_init(&mx3_cam->lock);
+
+	base = ioremap(res->start, resource_size(res));
+	if (!base) {
+		pr_err("Couldn't map %x@%x\n", resource_size(res), res->start);
+		err = -ENOMEM;
+		goto eioremap;
+	}
+
+	mx3_cam->base	= base;
+
+	soc_host		= &mx3_cam->soc_host;
+	soc_host->drv_name	= MX3_CAM_DRV_NAME;
+	soc_host->ops		= &mx3_soc_camera_host_ops;
+	soc_host->priv		= mx3_cam;
+	soc_host->v4l2_dev.dev	= &pdev->dev;
+	soc_host->nr		= pdev->id;
+
+	mx3_cam->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+	if (IS_ERR(mx3_cam->alloc_ctx)) {
+		err = PTR_ERR(mx3_cam->alloc_ctx);
+		goto eallocctx;
+	}
+
+	err = soc_camera_host_register(soc_host);
+	if (err)
+		goto ecamhostreg;
+
+	/* IDMAC interface */
+	dmaengine_get();
+
+	return 0;
+
+ecamhostreg:
+	vb2_dma_contig_cleanup_ctx(mx3_cam->alloc_ctx);
+eallocctx:
+	iounmap(base);
+eioremap:
+	clk_put(mx3_cam->clk);
+eclkget:
+	vfree(mx3_cam);
+ealloc:
+egetres:
+	return err;
+}
+
+static int __devexit mx3_camera_remove(struct platform_device *pdev)
+{
+	struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+	struct mx3_camera_dev *mx3_cam = container_of(soc_host,
+					struct mx3_camera_dev, soc_host);
+
+	clk_put(mx3_cam->clk);
+
+	soc_camera_host_unregister(soc_host);
+
+	iounmap(mx3_cam->base);
+
+	/*
+	 * The channel has either not been allocated,
+	 * or should have been released
+	 */
+	if (WARN_ON(mx3_cam->idmac_channel[0]))
+		dma_release_channel(&mx3_cam->idmac_channel[0]->dma_chan);
+
+	vb2_dma_contig_cleanup_ctx(mx3_cam->alloc_ctx);
+
+	vfree(mx3_cam);
+
+	dmaengine_put();
+
+	return 0;
+}
+
+static struct platform_driver mx3_camera_driver = {
+	.driver 	= {
+		.name	= MX3_CAM_DRV_NAME,
+	},
+	.probe		= mx3_camera_probe,
+	.remove		= __devexit_p(mx3_camera_remove),
+};
+
+module_platform_driver(mx3_camera_driver);
+
+MODULE_DESCRIPTION("i.MX3x SoC Camera Host driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.2.3");
+MODULE_ALIAS("platform:" MX3_CAM_DRV_NAME);
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
new file mode 100644
index 0000000..390ab09
--- /dev/null
+++ b/drivers/media/platform/omap/Kconfig
@@ -0,0 +1,14 @@
+config VIDEO_OMAP2_VOUT_VRFB
+	bool
+
+config VIDEO_OMAP2_VOUT
+	tristate "OMAP2/OMAP3 V4L2-Display driver"
+	depends on ARCH_OMAP2 || ARCH_OMAP3
+	select VIDEOBUF_GEN
+	select VIDEOBUF_DMA_CONTIG
+	select OMAP2_DSS
+	select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
+	select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB
+	default n
+	---help---
+	  V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/platform/omap/Makefile b/drivers/media/platform/omap/Makefile
new file mode 100644
index 0000000..fc410b4
--- /dev/null
+++ b/drivers/media/platform/omap/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the omap video device drivers.
+#
+
+# OMAP2/3 Display driver
+omap-vout-y := omap_vout.o omap_voutlib.o
+omap-vout-$(CONFIG_VIDEO_OMAP2_VOUT_VRFB) += omap_vout_vrfb.o
+obj-$(CONFIG_VIDEO_OMAP2_VOUT) += omap-vout.o
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
new file mode 100644
index 0000000..88cf9d9
--- /dev/null
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -0,0 +1,2289 @@
+/*
+ * omap_vout.c
+ *
+ * Copyright (C) 2005-2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Leveraged code from the OMAP2 camera driver
+ * Video-for-Linux (Version 2) camera capture driver for
+ * the OMAP24xx camera controller.
+ *
+ * Author: Andy Lowe (source@mvista.com)
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * History:
+ * 20-APR-2006 Khasim		Modified VRFB based Rotation,
+ *				The image data is always read from 0 degree
+ *				view and written
+ *				to the virtual space of desired rotation angle
+ * 4-DEC-2006  Jian		Changed to support better memory management
+ *
+ * 17-Nov-2008 Hardik		Changed driver to use video_ioctl2
+ *
+ * 23-Feb-2010 Vaibhav H	Modified to use new DSS2 interface
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/videodev2.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <media/videobuf-dma-contig.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+
+#include <plat/dma.h>
+#include <plat/vrfb.h>
+#include <video/omapdss.h>
+
+#include "omap_voutlib.h"
+#include "omap_voutdef.h"
+#include "omap_vout_vrfb.h"
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("OMAP Video for Linux Video out driver");
+MODULE_LICENSE("GPL");
+
+/* Driver Configuration macros */
+#define VOUT_NAME		"omap_vout"
+
+enum omap_vout_channels {
+	OMAP_VIDEO1,
+	OMAP_VIDEO2,
+};
+
+static struct videobuf_queue_ops video_vbq_ops;
+/* Variables configurable through module params*/
+static u32 video1_numbuffers = 3;
+static u32 video2_numbuffers = 3;
+static u32 video1_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+static u32 video2_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+static bool vid1_static_vrfb_alloc;
+static bool vid2_static_vrfb_alloc;
+static bool debug;
+
+/* Module parameters */
+module_param(video1_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_numbuffers,
+	"Number of buffers to be allocated at init time for Video1 device.");
+
+module_param(video2_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video2_numbuffers,
+	"Number of buffers to be allocated at init time for Video2 device.");
+
+module_param(video1_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_bufsize,
+	"Size of the buffer to be allocated for video1 device");
+
+module_param(video2_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video2_bufsize,
+	"Size of the buffer to be allocated for video2 device");
+
+module_param(vid1_static_vrfb_alloc, bool, S_IRUGO);
+MODULE_PARM_DESC(vid1_static_vrfb_alloc,
+	"Static allocation of the VRFB buffer for video1 device");
+
+module_param(vid2_static_vrfb_alloc, bool, S_IRUGO);
+MODULE_PARM_DESC(vid2_static_vrfb_alloc,
+	"Static allocation of the VRFB buffer for video2 device");
+
+module_param(debug, bool, S_IRUGO);
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
+
+/* list of image formats supported by OMAP2 video pipelines */
+static const struct v4l2_fmtdesc omap_formats[] = {
+	{
+		/* Note:  V4L2 defines RGB565 as:
+		 *
+		 *      Byte 0                    Byte 1
+		 *      g2 g1 g0 r4 r3 r2 r1 r0   b4 b3 b2 b1 b0 g5 g4 g3
+		 *
+		 * We interpret RGB565 as:
+		 *
+		 *      Byte 0                    Byte 1
+		 *      g2 g1 g0 b4 b3 b2 b1 b0   r4 r3 r2 r1 r0 g5 g4 g3
+		 */
+		.description = "RGB565, le",
+		.pixelformat = V4L2_PIX_FMT_RGB565,
+	},
+	{
+		/* Note:  V4L2 defines RGB32 as: RGB-8-8-8-8  we use
+		 *  this for RGB24 unpack mode, the last 8 bits are ignored
+		 * */
+		.description = "RGB32, le",
+		.pixelformat = V4L2_PIX_FMT_RGB32,
+	},
+	{
+		/* Note:  V4L2 defines RGB24 as: RGB-8-8-8  we use
+		 *        this for RGB24 packed mode
+		 *
+		 */
+		.description = "RGB24, le",
+		.pixelformat = V4L2_PIX_FMT_RGB24,
+	},
+	{
+		.description = "YUYV (YUV 4:2:2), packed",
+		.pixelformat = V4L2_PIX_FMT_YUYV,
+	},
+	{
+		.description = "UYVY, packed",
+		.pixelformat = V4L2_PIX_FMT_UYVY,
+	},
+};
+
+#define NUM_OUTPUT_FORMATS (ARRAY_SIZE(omap_formats))
+
+/*
+ * Try format
+ */
+static int omap_vout_try_format(struct v4l2_pix_format *pix)
+{
+	int ifmt, bpp = 0;
+
+	pix->height = clamp(pix->height, (u32)VID_MIN_HEIGHT,
+						(u32)VID_MAX_HEIGHT);
+	pix->width = clamp(pix->width, (u32)VID_MIN_WIDTH, (u32)VID_MAX_WIDTH);
+
+	for (ifmt = 0; ifmt < NUM_OUTPUT_FORMATS; ifmt++) {
+		if (pix->pixelformat == omap_formats[ifmt].pixelformat)
+			break;
+	}
+
+	if (ifmt == NUM_OUTPUT_FORMATS)
+		ifmt = 0;
+
+	pix->pixelformat = omap_formats[ifmt].pixelformat;
+	pix->field = V4L2_FIELD_ANY;
+	pix->priv = 0;
+
+	switch (pix->pixelformat) {
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_UYVY:
+	default:
+		pix->colorspace = V4L2_COLORSPACE_JPEG;
+		bpp = YUYV_BPP;
+		break;
+	case V4L2_PIX_FMT_RGB565:
+	case V4L2_PIX_FMT_RGB565X:
+		pix->colorspace = V4L2_COLORSPACE_SRGB;
+		bpp = RGB565_BPP;
+		break;
+	case V4L2_PIX_FMT_RGB24:
+		pix->colorspace = V4L2_COLORSPACE_SRGB;
+		bpp = RGB24_BPP;
+		break;
+	case V4L2_PIX_FMT_RGB32:
+	case V4L2_PIX_FMT_BGR32:
+		pix->colorspace = V4L2_COLORSPACE_SRGB;
+		bpp = RGB32_BPP;
+		break;
+	}
+	pix->bytesperline = pix->width * bpp;
+	pix->sizeimage = pix->bytesperline * pix->height;
+
+	return bpp;
+}
+
+/*
+ * omap_vout_uservirt_to_phys: This inline function is used to convert user
+ * space virtual address to physical address.
+ */
+static u32 omap_vout_uservirt_to_phys(u32 virtp)
+{
+	unsigned long physp = 0;
+	struct vm_area_struct *vma;
+	struct mm_struct *mm = current->mm;
+
+	vma = find_vma(mm, virtp);
+	/* For kernel direct-mapped memory, take the easy way */
+	if (virtp >= PAGE_OFFSET) {
+		physp = virt_to_phys((void *) virtp);
+	} else if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
+		/* this will catch, kernel-allocated, mmaped-to-usermode
+		   addresses */
+		physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
+	} else {
+		/* otherwise, use get_user_pages() for general userland pages */
+		int res, nr_pages = 1;
+		struct page *pages;
+		down_read(&current->mm->mmap_sem);
+
+		res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
+				0, &pages, NULL);
+		up_read(&current->mm->mmap_sem);
+
+		if (res == nr_pages) {
+			physp =  __pa(page_address(&pages[0]) +
+					(virtp & ~PAGE_MASK));
+		} else {
+			printk(KERN_WARNING VOUT_NAME
+					"get_user_pages failed\n");
+			return 0;
+		}
+	}
+
+	return physp;
+}
+
+/*
+ * Free the V4L2 buffers
+ */
+void omap_vout_free_buffers(struct omap_vout_device *vout)
+{
+	int i, numbuffers;
+
+	/* Allocate memory for the buffers */
+	numbuffers = (vout->vid) ?  video2_numbuffers : video1_numbuffers;
+	vout->buffer_size = (vout->vid) ? video2_bufsize : video1_bufsize;
+
+	for (i = 0; i < numbuffers; i++) {
+		omap_vout_free_buffer(vout->buf_virt_addr[i],
+				vout->buffer_size);
+		vout->buf_phy_addr[i] = 0;
+		vout->buf_virt_addr[i] = 0;
+	}
+}
+
+/*
+ * Convert V4L2 rotation to DSS rotation
+ *	V4L2 understand 0, 90, 180, 270.
+ *	Convert to 0, 1, 2 and 3 respectively for DSS
+ */
+static int v4l2_rot_to_dss_rot(int v4l2_rotation,
+			enum dss_rotation *rotation, bool mirror)
+{
+	int ret = 0;
+
+	switch (v4l2_rotation) {
+	case 90:
+		*rotation = dss_rotation_90_degree;
+		break;
+	case 180:
+		*rotation = dss_rotation_180_degree;
+		break;
+	case 270:
+		*rotation = dss_rotation_270_degree;
+		break;
+	case 0:
+		*rotation = dss_rotation_0_degree;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static int omap_vout_calculate_offset(struct omap_vout_device *vout)
+{
+	struct omapvideo_info *ovid;
+	struct v4l2_rect *crop = &vout->crop;
+	struct v4l2_pix_format *pix = &vout->pix;
+	int *cropped_offset = &vout->cropped_offset;
+	int ps = 2, line_length = 0;
+
+	ovid = &vout->vid_info;
+
+	if (ovid->rotation_type == VOUT_ROT_VRFB) {
+		omap_vout_calculate_vrfb_offset(vout);
+	} else {
+		vout->line_length = line_length = pix->width;
+
+		if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
+			V4L2_PIX_FMT_UYVY == pix->pixelformat)
+			ps = 2;
+		else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat)
+			ps = 4;
+		else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat)
+			ps = 3;
+
+		vout->ps = ps;
+
+		*cropped_offset = (line_length * ps) *
+			crop->top + crop->left * ps;
+	}
+
+	v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s Offset:%x\n",
+			__func__, vout->cropped_offset);
+
+	return 0;
+}
+
+/*
+ * Convert V4L2 pixel format to DSS pixel format
+ */
+static int video_mode_to_dss_mode(struct omap_vout_device *vout)
+{
+	struct omap_overlay *ovl;
+	struct omapvideo_info *ovid;
+	struct v4l2_pix_format *pix = &vout->pix;
+	enum omap_color_mode mode;
+
+	ovid = &vout->vid_info;
+	ovl = ovid->overlays[0];
+
+	switch (pix->pixelformat) {
+	case 0:
+		break;
+	case V4L2_PIX_FMT_YUYV:
+		mode = OMAP_DSS_COLOR_YUV2;
+		break;
+	case V4L2_PIX_FMT_UYVY:
+		mode = OMAP_DSS_COLOR_UYVY;
+		break;
+	case V4L2_PIX_FMT_RGB565:
+		mode = OMAP_DSS_COLOR_RGB16;
+		break;
+	case V4L2_PIX_FMT_RGB24:
+		mode = OMAP_DSS_COLOR_RGB24P;
+		break;
+	case V4L2_PIX_FMT_RGB32:
+		mode = (ovl->id == OMAP_DSS_VIDEO1) ?
+			OMAP_DSS_COLOR_RGB24U : OMAP_DSS_COLOR_ARGB32;
+		break;
+	case V4L2_PIX_FMT_BGR32:
+		mode = OMAP_DSS_COLOR_RGBX32;
+		break;
+	default:
+		mode = -EINVAL;
+	}
+	return mode;
+}
+
+/*
+ * Setup the overlay
+ */
+static int omapvid_setup_overlay(struct omap_vout_device *vout,
+		struct omap_overlay *ovl, int posx, int posy, int outw,
+		int outh, u32 addr)
+{
+	int ret = 0;
+	struct omap_overlay_info info;
+	int cropheight, cropwidth, pixheight, pixwidth;
+
+	if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0 &&
+			(outw != vout->pix.width || outh != vout->pix.height)) {
+		ret = -EINVAL;
+		goto setup_ovl_err;
+	}
+
+	vout->dss_mode = video_mode_to_dss_mode(vout);
+	if (vout->dss_mode == -EINVAL) {
+		ret = -EINVAL;
+		goto setup_ovl_err;
+	}
+
+	/* Setup the input plane parameters according to
+	 * rotation value selected.
+	 */
+	if (is_rotation_90_or_270(vout)) {
+		cropheight = vout->crop.width;
+		cropwidth = vout->crop.height;
+		pixheight = vout->pix.width;
+		pixwidth = vout->pix.height;
+	} else {
+		cropheight = vout->crop.height;
+		cropwidth = vout->crop.width;
+		pixheight = vout->pix.height;
+		pixwidth = vout->pix.width;
+	}
+
+	ovl->get_overlay_info(ovl, &info);
+	info.paddr = addr;
+	info.width = cropwidth;
+	info.height = cropheight;
+	info.color_mode = vout->dss_mode;
+	info.mirror = vout->mirror;
+	info.pos_x = posx;
+	info.pos_y = posy;
+	info.out_width = outw;
+	info.out_height = outh;
+	info.global_alpha = vout->win.global_alpha;
+	if (!is_rotation_enabled(vout)) {
+		info.rotation = 0;
+		info.rotation_type = OMAP_DSS_ROT_DMA;
+		info.screen_width = pixwidth;
+	} else {
+		info.rotation = vout->rotation;
+		info.rotation_type = OMAP_DSS_ROT_VRFB;
+		info.screen_width = 2048;
+	}
+
+	v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+		"%s enable=%d addr=%x width=%d\n height=%d color_mode=%d\n"
+		"rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n"
+		"out_height=%d rotation_type=%d screen_width=%d\n",
+		__func__, ovl->is_enabled(ovl), info.paddr, info.width, info.height,
+		info.color_mode, info.rotation, info.mirror, info.pos_x,
+		info.pos_y, info.out_width, info.out_height, info.rotation_type,
+		info.screen_width);
+
+	ret = ovl->set_overlay_info(ovl, &info);
+	if (ret)
+		goto setup_ovl_err;
+
+	return 0;
+
+setup_ovl_err:
+	v4l2_warn(&vout->vid_dev->v4l2_dev, "setup_overlay failed\n");
+	return ret;
+}
+
+/*
+ * Initialize the overlay structure
+ */
+static int omapvid_init(struct omap_vout_device *vout, u32 addr)
+{
+	int ret = 0, i;
+	struct v4l2_window *win;
+	struct omap_overlay *ovl;
+	int posx, posy, outw, outh, temp;
+	struct omap_video_timings *timing;
+	struct omapvideo_info *ovid = &vout->vid_info;
+
+	win = &vout->win;
+	for (i = 0; i < ovid->num_overlays; i++) {
+		ovl = ovid->overlays[i];
+		if (!ovl->manager || !ovl->manager->device)
+			return -EINVAL;
+
+		timing = &ovl->manager->device->panel.timings;
+
+		outw = win->w.width;
+		outh = win->w.height;
+		switch (vout->rotation) {
+		case dss_rotation_90_degree:
+			/* Invert the height and width for 90
+			 * and 270 degree rotation
+			 */
+			temp = outw;
+			outw = outh;
+			outh = temp;
+			posy = (timing->y_res - win->w.width) - win->w.left;
+			posx = win->w.top;
+			break;
+
+		case dss_rotation_180_degree:
+			posx = (timing->x_res - win->w.width) - win->w.left;
+			posy = (timing->y_res - win->w.height) - win->w.top;
+			break;
+
+		case dss_rotation_270_degree:
+			temp = outw;
+			outw = outh;
+			outh = temp;
+			posy = win->w.left;
+			posx = (timing->x_res - win->w.height) - win->w.top;
+			break;
+
+		default:
+			posx = win->w.left;
+			posy = win->w.top;
+			break;
+		}
+
+		ret = omapvid_setup_overlay(vout, ovl, posx, posy,
+				outw, outh, addr);
+		if (ret)
+			goto omapvid_init_err;
+	}
+	return 0;
+
+omapvid_init_err:
+	v4l2_warn(&vout->vid_dev->v4l2_dev, "apply_changes failed\n");
+	return ret;
+}
+
+/*
+ * Apply the changes set the go bit of DSS
+ */
+static int omapvid_apply_changes(struct omap_vout_device *vout)
+{
+	int i;
+	struct omap_overlay *ovl;
+	struct omapvideo_info *ovid = &vout->vid_info;
+
+	for (i = 0; i < ovid->num_overlays; i++) {
+		ovl = ovid->overlays[i];
+		if (!ovl->manager || !ovl->manager->device)
+			return -EINVAL;
+		ovl->manager->apply(ovl->manager);
+	}
+
+	return 0;
+}
+
+static int omapvid_handle_interlace_display(struct omap_vout_device *vout,
+		unsigned int irqstatus, struct timeval timevalue)
+{
+	u32 fid;
+
+	if (vout->first_int) {
+		vout->first_int = 0;
+		goto err;
+	}
+
+	if (irqstatus & DISPC_IRQ_EVSYNC_ODD)
+		fid = 1;
+	else if (irqstatus & DISPC_IRQ_EVSYNC_EVEN)
+		fid = 0;
+	else
+		goto err;
+
+	vout->field_id ^= 1;
+	if (fid != vout->field_id) {
+		if (fid == 0)
+			vout->field_id = fid;
+	} else if (0 == fid) {
+		if (vout->cur_frm == vout->next_frm)
+			goto err;
+
+		vout->cur_frm->ts = timevalue;
+		vout->cur_frm->state = VIDEOBUF_DONE;
+		wake_up_interruptible(&vout->cur_frm->done);
+		vout->cur_frm = vout->next_frm;
+	} else {
+		if (list_empty(&vout->dma_queue) ||
+				(vout->cur_frm != vout->next_frm))
+			goto err;
+	}
+
+	return vout->field_id;
+err:
+	return 0;
+}
+
+static void omap_vout_isr(void *arg, unsigned int irqstatus)
+{
+	int ret, fid, mgr_id;
+	u32 addr, irq;
+	struct omap_overlay *ovl;
+	struct timeval timevalue;
+	struct omapvideo_info *ovid;
+	struct omap_dss_device *cur_display;
+	struct omap_vout_device *vout = (struct omap_vout_device *)arg;
+
+	if (!vout->streaming)
+		return;
+
+	ovid = &vout->vid_info;
+	ovl = ovid->overlays[0];
+	/* get the display device attached to the overlay */
+	if (!ovl->manager || !ovl->manager->device)
+		return;
+
+	mgr_id = ovl->manager->id;
+	cur_display = ovl->manager->device;
+
+	spin_lock(&vout->vbq_lock);
+	do_gettimeofday(&timevalue);
+
+	switch (cur_display->type) {
+	case OMAP_DISPLAY_TYPE_DSI:
+	case OMAP_DISPLAY_TYPE_DPI:
+		if (mgr_id == OMAP_DSS_CHANNEL_LCD)
+			irq = DISPC_IRQ_VSYNC;
+		else if (mgr_id == OMAP_DSS_CHANNEL_LCD2)
+			irq = DISPC_IRQ_VSYNC2;
+		else
+			goto vout_isr_err;
+
+		if (!(irqstatus & irq))
+			goto vout_isr_err;
+		break;
+	case OMAP_DISPLAY_TYPE_VENC:
+		fid = omapvid_handle_interlace_display(vout, irqstatus,
+				timevalue);
+		if (!fid)
+			goto vout_isr_err;
+		break;
+	case OMAP_DISPLAY_TYPE_HDMI:
+		if (!(irqstatus & DISPC_IRQ_EVSYNC_EVEN))
+			goto vout_isr_err;
+		break;
+	default:
+		goto vout_isr_err;
+	}
+
+	if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
+		vout->cur_frm->ts = timevalue;
+		vout->cur_frm->state = VIDEOBUF_DONE;
+		wake_up_interruptible(&vout->cur_frm->done);
+		vout->cur_frm = vout->next_frm;
+	}
+
+	vout->first_int = 0;
+	if (list_empty(&vout->dma_queue))
+		goto vout_isr_err;
+
+	vout->next_frm = list_entry(vout->dma_queue.next,
+			struct videobuf_buffer, queue);
+	list_del(&vout->next_frm->queue);
+
+	vout->next_frm->state = VIDEOBUF_ACTIVE;
+
+	addr = (unsigned long) vout->queued_buf_addr[vout->next_frm->i]
+		+ vout->cropped_offset;
+
+	/* First save the configuration in ovelray structure */
+	ret = omapvid_init(vout, addr);
+	if (ret)
+		printk(KERN_ERR VOUT_NAME
+			"failed to set overlay info\n");
+	/* Enable the pipeline and set the Go bit */
+	ret = omapvid_apply_changes(vout);
+	if (ret)
+		printk(KERN_ERR VOUT_NAME "failed to change mode\n");
+
+vout_isr_err:
+	spin_unlock(&vout->vbq_lock);
+}
+
+/* Video buffer call backs */
+
+/*
+ * Buffer setup function is called by videobuf layer when REQBUF ioctl is
+ * called. This is used to setup buffers and return size and count of
+ * buffers allocated. After the call to this buffer, videobuf layer will
+ * setup buffer queue depending on the size and count of buffers
+ */
+static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
+			  unsigned int *size)
+{
+	int startindex = 0, i, j;
+	u32 phy_addr = 0, virt_addr = 0;
+	struct omap_vout_device *vout = q->priv_data;
+	struct omapvideo_info *ovid = &vout->vid_info;
+	int vid_max_buf_size;
+
+	if (!vout)
+		return -EINVAL;
+
+	vid_max_buf_size = vout->vid == OMAP_VIDEO1 ? video1_bufsize :
+		video2_bufsize;
+
+	if (V4L2_BUF_TYPE_VIDEO_OUTPUT != q->type)
+		return -EINVAL;
+
+	startindex = (vout->vid == OMAP_VIDEO1) ?
+		video1_numbuffers : video2_numbuffers;
+	if (V4L2_MEMORY_MMAP == vout->memory && *count < startindex)
+		*count = startindex;
+
+	if (ovid->rotation_type == VOUT_ROT_VRFB) {
+		if (omap_vout_vrfb_buffer_setup(vout, count, startindex))
+			return -ENOMEM;
+	}
+
+	if (V4L2_MEMORY_MMAP != vout->memory)
+		return 0;
+
+	/* Now allocated the V4L2 buffers */
+	*size = PAGE_ALIGN(vout->pix.width * vout->pix.height * vout->bpp);
+	startindex = (vout->vid == OMAP_VIDEO1) ?
+		video1_numbuffers : video2_numbuffers;
+
+	/* Check the size of the buffer */
+	if (*size > vid_max_buf_size) {
+		v4l2_err(&vout->vid_dev->v4l2_dev,
+				"buffer allocation mismatch [%u] [%u]\n",
+				*size, vout->buffer_size);
+		return -ENOMEM;
+	}
+
+	for (i = startindex; i < *count; i++) {
+		vout->buffer_size = *size;
+
+		virt_addr = omap_vout_alloc_buffer(vout->buffer_size,
+				&phy_addr);
+		if (!virt_addr) {
+			if (ovid->rotation_type == VOUT_ROT_NONE) {
+				break;
+			} else {
+				if (!is_rotation_enabled(vout))
+					break;
+			/* Free the VRFB buffers if no space for V4L2 buffers */
+			for (j = i; j < *count; j++) {
+				omap_vout_free_buffer(
+						vout->smsshado_virt_addr[j],
+						vout->smsshado_size);
+				vout->smsshado_virt_addr[j] = 0;
+				vout->smsshado_phy_addr[j] = 0;
+				}
+			}
+		}
+		vout->buf_virt_addr[i] = virt_addr;
+		vout->buf_phy_addr[i] = phy_addr;
+	}
+	*count = vout->buffer_allocated = i;
+
+	return 0;
+}
+
+/*
+ * Free the V4L2 buffers additionally allocated than default
+ * number of buffers
+ */
+static void omap_vout_free_extra_buffers(struct omap_vout_device *vout)
+{
+	int num_buffers = 0, i;
+
+	num_buffers = (vout->vid == OMAP_VIDEO1) ?
+		video1_numbuffers : video2_numbuffers;
+
+	for (i = num_buffers; i < vout->buffer_allocated; i++) {
+		if (vout->buf_virt_addr[i])
+			omap_vout_free_buffer(vout->buf_virt_addr[i],
+					vout->buffer_size);
+
+		vout->buf_virt_addr[i] = 0;
+		vout->buf_phy_addr[i] = 0;
+	}
+	vout->buffer_allocated = num_buffers;
+}
+
+/*
+ * This function will be called when VIDIOC_QBUF ioctl is called.
+ * It prepare buffers before give out for the display. This function
+ * converts user space virtual address into physical address if userptr memory
+ * exchange mechanism is used. If rotation is enabled, it copies entire
+ * buffer into VRFB memory space before giving it to the DSS.
+ */
+static int omap_vout_buffer_prepare(struct videobuf_queue *q,
+			struct videobuf_buffer *vb,
+			enum v4l2_field field)
+{
+	struct omap_vout_device *vout = q->priv_data;
+	struct omapvideo_info *ovid = &vout->vid_info;
+
+	if (VIDEOBUF_NEEDS_INIT == vb->state) {
+		vb->width = vout->pix.width;
+		vb->height = vout->pix.height;
+		vb->size = vb->width * vb->height * vout->bpp;
+		vb->field = field;
+	}
+	vb->state = VIDEOBUF_PREPARED;
+	/* if user pointer memory mechanism is used, get the physical
+	 * address of the buffer
+	 */
+	if (V4L2_MEMORY_USERPTR == vb->memory) {
+		if (0 == vb->baddr)
+			return -EINVAL;
+		/* Physical address */
+		vout->queued_buf_addr[vb->i] = (u8 *)
+			omap_vout_uservirt_to_phys(vb->baddr);
+	} else {
+		u32 addr, dma_addr;
+		unsigned long size;
+
+		addr = (unsigned long) vout->buf_virt_addr[vb->i];
+		size = (unsigned long) vb->size;
+
+		dma_addr = dma_map_single(vout->vid_dev->v4l2_dev.dev, (void *) addr,
+				size, DMA_TO_DEVICE);
+		if (dma_mapping_error(vout->vid_dev->v4l2_dev.dev, dma_addr))
+			v4l2_err(&vout->vid_dev->v4l2_dev, "dma_map_single failed\n");
+
+		vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i];
+	}
+
+	if (ovid->rotation_type == VOUT_ROT_VRFB)
+		return omap_vout_prepare_vrfb(vout, vb);
+	else
+		return 0;
+}
+
+/*
+ * Buffer queue function will be called from the videobuf layer when _QBUF
+ * ioctl is called. It is used to enqueue buffer, which is ready to be
+ * displayed.
+ */
+static void omap_vout_buffer_queue(struct videobuf_queue *q,
+			  struct videobuf_buffer *vb)
+{
+	struct omap_vout_device *vout = q->priv_data;
+
+	/* Driver is also maintainig a queue. So enqueue buffer in the driver
+	 * queue */
+	list_add_tail(&vb->queue, &vout->dma_queue);
+
+	vb->state = VIDEOBUF_QUEUED;
+}
+
+/*
+ * Buffer release function is called from videobuf layer to release buffer
+ * which are already allocated
+ */
+static void omap_vout_buffer_release(struct videobuf_queue *q,
+			    struct videobuf_buffer *vb)
+{
+	struct omap_vout_device *vout = q->priv_data;
+
+	vb->state = VIDEOBUF_NEEDS_INIT;
+
+	if (V4L2_MEMORY_MMAP != vout->memory)
+		return;
+}
+
+/*
+ *  File operations
+ */
+static unsigned int omap_vout_poll(struct file *file,
+				   struct poll_table_struct *wait)
+{
+	struct omap_vout_device *vout = file->private_data;
+	struct videobuf_queue *q = &vout->vbq;
+
+	return videobuf_poll_stream(file, q, wait);
+}
+
+static void omap_vout_vm_open(struct vm_area_struct *vma)
+{
+	struct omap_vout_device *vout = vma->vm_private_data;
+
+	v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+		"vm_open [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+	vout->mmap_count++;
+}
+
+static void omap_vout_vm_close(struct vm_area_struct *vma)
+{
+	struct omap_vout_device *vout = vma->vm_private_data;
+
+	v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+		"vm_close [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+	vout->mmap_count--;
+}
+
+static struct vm_operations_struct omap_vout_vm_ops = {
+	.open	= omap_vout_vm_open,
+	.close	= omap_vout_vm_close,
+};
+
+static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int i;
+	void *pos;
+	unsigned long start = vma->vm_start;
+	unsigned long size = (vma->vm_end - vma->vm_start);
+	struct omap_vout_device *vout = file->private_data;
+	struct videobuf_queue *q = &vout->vbq;
+
+	v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+			" %s pgoff=0x%lx, start=0x%lx, end=0x%lx\n", __func__,
+			vma->vm_pgoff, vma->vm_start, vma->vm_end);
+
+	/* look for the buffer to map */
+	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+		if (NULL == q->bufs[i])
+			continue;
+		if (V4L2_MEMORY_MMAP != q->bufs[i]->memory)
+			continue;
+		if (q->bufs[i]->boff == (vma->vm_pgoff << PAGE_SHIFT))
+			break;
+	}
+
+	if (VIDEO_MAX_FRAME == i) {
+		v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+				"offset invalid [offset=0x%lx]\n",
+				(vma->vm_pgoff << PAGE_SHIFT));
+		return -EINVAL;
+	}
+	/* Check the size of the buffer */
+	if (size > vout->buffer_size) {
+		v4l2_err(&vout->vid_dev->v4l2_dev,
+				"insufficient memory [%lu] [%u]\n",
+				size, vout->buffer_size);
+		return -ENOMEM;
+	}
+
+	q->bufs[i]->baddr = vma->vm_start;
+
+	vma->vm_flags |= VM_RESERVED;
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	vma->vm_ops = &omap_vout_vm_ops;
+	vma->vm_private_data = (void *) vout;
+	pos = (void *)vout->buf_virt_addr[i];
+	vma->vm_pgoff = virt_to_phys((void *)pos) >> PAGE_SHIFT;
+	while (size > 0) {
+		unsigned long pfn;
+		pfn = virt_to_phys((void *) pos) >> PAGE_SHIFT;
+		if (remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED))
+			return -EAGAIN;
+		start += PAGE_SIZE;
+		pos += PAGE_SIZE;
+		size -= PAGE_SIZE;
+	}
+	vout->mmap_count++;
+	v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+
+	return 0;
+}
+
+static int omap_vout_release(struct file *file)
+{
+	unsigned int ret, i;
+	struct videobuf_queue *q;
+	struct omapvideo_info *ovid;
+	struct omap_vout_device *vout = file->private_data;
+
+	v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+	ovid = &vout->vid_info;
+
+	if (!vout)
+		return 0;
+
+	q = &vout->vbq;
+	/* Disable all the overlay managers connected with this interface */
+	for (i = 0; i < ovid->num_overlays; i++) {
+		struct omap_overlay *ovl = ovid->overlays[i];
+		if (ovl->manager && ovl->manager->device)
+			ovl->disable(ovl);
+	}
+	/* Turn off the pipeline */
+	ret = omapvid_apply_changes(vout);
+	if (ret)
+		v4l2_warn(&vout->vid_dev->v4l2_dev,
+				"Unable to apply changes\n");
+
+	/* Free all buffers */
+	omap_vout_free_extra_buffers(vout);
+
+	/* Free the VRFB buffers only if they are allocated
+	 * during reqbufs.  Don't free if init time allocated
+	 */
+	if (ovid->rotation_type == VOUT_ROT_VRFB) {
+		if (!vout->vrfb_static_allocation)
+			omap_vout_free_vrfb_buffers(vout);
+	}
+	videobuf_mmap_free(q);
+
+	/* Even if apply changes fails we should continue
+	   freeing allocated memory */
+	if (vout->streaming) {
+		u32 mask = 0;
+
+		mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
+			DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_VSYNC2;
+		omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+		vout->streaming = 0;
+
+		videobuf_streamoff(q);
+		videobuf_queue_cancel(q);
+	}
+
+	if (vout->mmap_count != 0)
+		vout->mmap_count = 0;
+
+	vout->opened -= 1;
+	file->private_data = NULL;
+
+	if (vout->buffer_allocated)
+		videobuf_mmap_free(q);
+
+	v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+	return ret;
+}
+
+static int omap_vout_open(struct file *file)
+{
+	struct videobuf_queue *q;
+	struct omap_vout_device *vout = NULL;
+
+	vout = video_drvdata(file);
+	v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+
+	if (vout == NULL)
+		return -ENODEV;
+
+	/* for now, we only support single open */
+	if (vout->opened)
+		return -EBUSY;
+
+	vout->opened += 1;
+
+	file->private_data = vout;
+	vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+	q = &vout->vbq;
+	video_vbq_ops.buf_setup = omap_vout_buffer_setup;
+	video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
+	video_vbq_ops.buf_release = omap_vout_buffer_release;
+	video_vbq_ops.buf_queue = omap_vout_buffer_queue;
+	spin_lock_init(&vout->vbq_lock);
+
+	videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
+			&vout->vbq_lock, vout->type, V4L2_FIELD_NONE,
+			sizeof(struct videobuf_buffer), vout, NULL);
+
+	v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+	return 0;
+}
+
+/*
+ * V4L2 ioctls
+ */
+static int vidioc_querycap(struct file *file, void *fh,
+		struct v4l2_capability *cap)
+{
+	struct omap_vout_device *vout = fh;
+
+	strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
+	cap->bus_info[0] = '\0';
+	cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
+		V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+
+	return 0;
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *fh,
+			struct v4l2_fmtdesc *fmt)
+{
+	int index = fmt->index;
+
+	if (index >= NUM_OUTPUT_FORMATS)
+		return -EINVAL;
+
+	fmt->flags = omap_formats[index].flags;
+	strlcpy(fmt->description, omap_formats[index].description,
+			sizeof(fmt->description));
+	fmt->pixelformat = omap_formats[index].pixelformat;
+
+	return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *fh,
+			struct v4l2_format *f)
+{
+	struct omap_vout_device *vout = fh;
+
+	f->fmt.pix = vout->pix;
+	return 0;
+
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *fh,
+			struct v4l2_format *f)
+{
+	struct omap_overlay *ovl;
+	struct omapvideo_info *ovid;
+	struct omap_video_timings *timing;
+	struct omap_vout_device *vout = fh;
+
+	ovid = &vout->vid_info;
+	ovl = ovid->overlays[0];
+
+	if (!ovl->manager || !ovl->manager->device)
+		return -EINVAL;
+	/* get the display device attached to the overlay */
+	timing = &ovl->manager->device->panel.timings;
+
+	vout->fbuf.fmt.height = timing->y_res;
+	vout->fbuf.fmt.width = timing->x_res;
+
+	omap_vout_try_format(&f->fmt.pix);
+	return 0;
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
+			struct v4l2_format *f)
+{
+	int ret, bpp;
+	struct omap_overlay *ovl;
+	struct omapvideo_info *ovid;
+	struct omap_video_timings *timing;
+	struct omap_vout_device *vout = fh;
+
+	if (vout->streaming)
+		return -EBUSY;
+
+	mutex_lock(&vout->lock);
+
+	ovid = &vout->vid_info;
+	ovl = ovid->overlays[0];
+
+	/* get the display device attached to the overlay */
+	if (!ovl->manager || !ovl->manager->device) {
+		ret = -EINVAL;
+		goto s_fmt_vid_out_exit;
+	}
+	timing = &ovl->manager->device->panel.timings;
+
+	/* We dont support RGB24-packed mode if vrfb rotation
+	 * is enabled*/
+	if ((is_rotation_enabled(vout)) &&
+			f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+		ret = -EINVAL;
+		goto s_fmt_vid_out_exit;
+	}
+
+	/* get the framebuffer parameters */
+
+	if (is_rotation_90_or_270(vout)) {
+		vout->fbuf.fmt.height = timing->x_res;
+		vout->fbuf.fmt.width = timing->y_res;
+	} else {
+		vout->fbuf.fmt.height = timing->y_res;
+		vout->fbuf.fmt.width = timing->x_res;
+	}
+
+	/* change to samller size is OK */
+
+	bpp = omap_vout_try_format(&f->fmt.pix);
+	f->fmt.pix.sizeimage = f->fmt.pix.width * f->fmt.pix.height * bpp;
+
+	/* try & set the new output format */
+	vout->bpp = bpp;
+	vout->pix = f->fmt.pix;
+	vout->vrfb_bpp = 1;
+
+	/* If YUYV then vrfb bpp is 2, for  others its 1 */
+	if (V4L2_PIX_FMT_YUYV == vout->pix.pixelformat ||
+			V4L2_PIX_FMT_UYVY == vout->pix.pixelformat)
+		vout->vrfb_bpp = 2;
+
+	/* set default crop and win */
+	omap_vout_new_format(&vout->pix, &vout->fbuf, &vout->crop, &vout->win);
+
+	/* Save the changes in the overlay strcuture */
+	ret = omapvid_init(vout, 0);
+	if (ret) {
+		v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
+		goto s_fmt_vid_out_exit;
+	}
+
+	ret = 0;
+
+s_fmt_vid_out_exit:
+	mutex_unlock(&vout->lock);
+	return ret;
+}
+
+static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh,
+			struct v4l2_format *f)
+{
+	int ret = 0;
+	struct omap_vout_device *vout = fh;
+	struct omap_overlay *ovl;
+	struct omapvideo_info *ovid;
+	struct v4l2_window *win = &f->fmt.win;
+
+	ovid = &vout->vid_info;
+	ovl = ovid->overlays[0];
+
+	ret = omap_vout_try_window(&vout->fbuf, win);
+
+	if (!ret) {
+		if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
+			win->global_alpha = 255;
+		else
+			win->global_alpha = f->fmt.win.global_alpha;
+	}
+
+	return ret;
+}
+
+static int vidioc_s_fmt_vid_overlay(struct file *file, void *fh,
+			struct v4l2_format *f)
+{
+	int ret = 0;
+	struct omap_overlay *ovl;
+	struct omapvideo_info *ovid;
+	struct omap_vout_device *vout = fh;
+	struct v4l2_window *win = &f->fmt.win;
+
+	mutex_lock(&vout->lock);
+	ovid = &vout->vid_info;
+	ovl = ovid->overlays[0];
+
+	ret = omap_vout_new_window(&vout->crop, &vout->win, &vout->fbuf, win);
+	if (!ret) {
+		/* Video1 plane does not support global alpha on OMAP3 */
+		if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
+			vout->win.global_alpha = 255;
+		else
+			vout->win.global_alpha = f->fmt.win.global_alpha;
+
+		vout->win.chromakey = f->fmt.win.chromakey;
+	}
+	mutex_unlock(&vout->lock);
+	return ret;
+}
+
+static int vidioc_enum_fmt_vid_overlay(struct file *file, void *fh,
+			struct v4l2_fmtdesc *fmt)
+{
+	int index = fmt->index;
+
+	if (index >= NUM_OUTPUT_FORMATS)
+		return -EINVAL;
+
+	fmt->flags = omap_formats[index].flags;
+	strlcpy(fmt->description, omap_formats[index].description,
+			sizeof(fmt->description));
+	fmt->pixelformat = omap_formats[index].pixelformat;
+	return 0;
+}
+
+static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh,
+			struct v4l2_format *f)
+{
+	u32 key_value =  0;
+	struct omap_overlay *ovl;
+	struct omapvideo_info *ovid;
+	struct omap_vout_device *vout = fh;
+	struct omap_overlay_manager_info info;
+	struct v4l2_window *win = &f->fmt.win;
+
+	ovid = &vout->vid_info;
+	ovl = ovid->overlays[0];
+
+	win->w = vout->win.w;
+	win->field = vout->win.field;
+	win->global_alpha = vout->win.global_alpha;
+
+	if (ovl->manager && ovl->manager->get_manager_info) {
+		ovl->manager->get_manager_info(ovl->manager, &info);
+		key_value = info.trans_key;
+	}
+	win->chromakey = key_value;
+	return 0;
+}
+
+static int vidioc_cropcap(struct file *file, void *fh,
+		struct v4l2_cropcap *cropcap)
+{
+	struct omap_vout_device *vout = fh;
+	struct v4l2_pix_format *pix = &vout->pix;
+
+	if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+
+	/* Width and height are always even */
+	cropcap->bounds.width = pix->width & ~1;
+	cropcap->bounds.height = pix->height & ~1;
+
+	omap_vout_default_crop(&vout->pix, &vout->fbuf, &cropcap->defrect);
+	cropcap->pixelaspect.numerator = 1;
+	cropcap->pixelaspect.denominator = 1;
+	return 0;
+}
+
+static int vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+	struct omap_vout_device *vout = fh;
+
+	if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+	crop->c = vout->crop;
+	return 0;
+}
+
+static int vidioc_s_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+	int ret = -EINVAL;
+	struct omap_vout_device *vout = fh;
+	struct omapvideo_info *ovid;
+	struct omap_overlay *ovl;
+	struct omap_video_timings *timing;
+
+	if (vout->streaming)
+		return -EBUSY;
+
+	mutex_lock(&vout->lock);
+	ovid = &vout->vid_info;
+	ovl = ovid->overlays[0];
+
+	if (!ovl->manager || !ovl->manager->device) {
+		ret = -EINVAL;
+		goto s_crop_err;
+	}
+	/* get the display device attached to the overlay */
+	timing = &ovl->manager->device->panel.timings;
+
+	if (is_rotation_90_or_270(vout)) {
+		vout->fbuf.fmt.height = timing->x_res;
+		vout->fbuf.fmt.width = timing->y_res;
+	} else {
+		vout->fbuf.fmt.height = timing->y_res;
+		vout->fbuf.fmt.width = timing->x_res;
+	}
+
+	if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		ret = omap_vout_new_crop(&vout->pix, &vout->crop, &vout->win,
+				&vout->fbuf, &crop->c);
+
+s_crop_err:
+	mutex_unlock(&vout->lock);
+	return ret;
+}
+
+static int vidioc_queryctrl(struct file *file, void *fh,
+		struct v4l2_queryctrl *ctrl)
+{
+	int ret = 0;
+
+	switch (ctrl->id) {
+	case V4L2_CID_ROTATE:
+		ret = v4l2_ctrl_query_fill(ctrl, 0, 270, 90, 0);
+		break;
+	case V4L2_CID_BG_COLOR:
+		ret = v4l2_ctrl_query_fill(ctrl, 0, 0xFFFFFF, 1, 0);
+		break;
+	case V4L2_CID_VFLIP:
+		ret = v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0);
+		break;
+	default:
+		ctrl->name[0] = '\0';
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *ctrl)
+{
+	int ret = 0;
+	struct omap_vout_device *vout = fh;
+
+	switch (ctrl->id) {
+	case V4L2_CID_ROTATE:
+		ctrl->value = vout->control[0].value;
+		break;
+	case V4L2_CID_BG_COLOR:
+	{
+		struct omap_overlay_manager_info info;
+		struct omap_overlay *ovl;
+
+		ovl = vout->vid_info.overlays[0];
+		if (!ovl->manager || !ovl->manager->get_manager_info) {
+			ret = -EINVAL;
+			break;
+		}
+
+		ovl->manager->get_manager_info(ovl->manager, &info);
+		ctrl->value = info.default_color;
+		break;
+	}
+	case V4L2_CID_VFLIP:
+		ctrl->value = vout->control[2].value;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
+{
+	int ret = 0;
+	struct omap_vout_device *vout = fh;
+
+	switch (a->id) {
+	case V4L2_CID_ROTATE:
+	{
+		struct omapvideo_info *ovid;
+		int rotation = a->value;
+
+		ovid = &vout->vid_info;
+
+		mutex_lock(&vout->lock);
+		if (rotation && ovid->rotation_type == VOUT_ROT_NONE) {
+			mutex_unlock(&vout->lock);
+			ret = -ERANGE;
+			break;
+		}
+
+		if (rotation && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+			mutex_unlock(&vout->lock);
+			ret = -EINVAL;
+			break;
+		}
+
+		if (v4l2_rot_to_dss_rot(rotation, &vout->rotation,
+							vout->mirror)) {
+			mutex_unlock(&vout->lock);
+			ret = -EINVAL;
+			break;
+		}
+
+		vout->control[0].value = rotation;
+		mutex_unlock(&vout->lock);
+		break;
+	}
+	case V4L2_CID_BG_COLOR:
+	{
+		struct omap_overlay *ovl;
+		unsigned int  color = a->value;
+		struct omap_overlay_manager_info info;
+
+		ovl = vout->vid_info.overlays[0];
+
+		mutex_lock(&vout->lock);
+		if (!ovl->manager || !ovl->manager->get_manager_info) {
+			mutex_unlock(&vout->lock);
+			ret = -EINVAL;
+			break;
+		}
+
+		ovl->manager->get_manager_info(ovl->manager, &info);
+		info.default_color = color;
+		if (ovl->manager->set_manager_info(ovl->manager, &info)) {
+			mutex_unlock(&vout->lock);
+			ret = -EINVAL;
+			break;
+		}
+
+		vout->control[1].value = color;
+		mutex_unlock(&vout->lock);
+		break;
+	}
+	case V4L2_CID_VFLIP:
+	{
+		struct omap_overlay *ovl;
+		struct omapvideo_info *ovid;
+		unsigned int  mirror = a->value;
+
+		ovid = &vout->vid_info;
+		ovl = ovid->overlays[0];
+
+		mutex_lock(&vout->lock);
+		if (mirror && ovid->rotation_type == VOUT_ROT_NONE) {
+			mutex_unlock(&vout->lock);
+			ret = -ERANGE;
+			break;
+		}
+
+		if (mirror  && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+			mutex_unlock(&vout->lock);
+			ret = -EINVAL;
+			break;
+		}
+		vout->mirror = mirror;
+		vout->control[2].value = mirror;
+		mutex_unlock(&vout->lock);
+		break;
+	}
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *fh,
+			struct v4l2_requestbuffers *req)
+{
+	int ret = 0;
+	unsigned int i, num_buffers = 0;
+	struct omap_vout_device *vout = fh;
+	struct videobuf_queue *q = &vout->vbq;
+
+	if ((req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) || (req->count < 0))
+		return -EINVAL;
+	/* if memory is not mmp or userptr
+	   return error */
+	if ((V4L2_MEMORY_MMAP != req->memory) &&
+			(V4L2_MEMORY_USERPTR != req->memory))
+		return -EINVAL;
+
+	mutex_lock(&vout->lock);
+	/* Cannot be requested when streaming is on */
+	if (vout->streaming) {
+		ret = -EBUSY;
+		goto reqbuf_err;
+	}
+
+	/* If buffers are already allocated free them */
+	if (q->bufs[0] && (V4L2_MEMORY_MMAP == q->bufs[0]->memory)) {
+		if (vout->mmap_count) {
+			ret = -EBUSY;
+			goto reqbuf_err;
+		}
+		num_buffers = (vout->vid == OMAP_VIDEO1) ?
+			video1_numbuffers : video2_numbuffers;
+		for (i = num_buffers; i < vout->buffer_allocated; i++) {
+			omap_vout_free_buffer(vout->buf_virt_addr[i],
+					vout->buffer_size);
+			vout->buf_virt_addr[i] = 0;
+			vout->buf_phy_addr[i] = 0;
+		}
+		vout->buffer_allocated = num_buffers;
+		videobuf_mmap_free(q);
+	} else if (q->bufs[0] && (V4L2_MEMORY_USERPTR == q->bufs[0]->memory)) {
+		if (vout->buffer_allocated) {
+			videobuf_mmap_free(q);
+			for (i = 0; i < vout->buffer_allocated; i++) {
+				kfree(q->bufs[i]);
+				q->bufs[i] = NULL;
+			}
+			vout->buffer_allocated = 0;
+		}
+	}
+
+	/*store the memory type in data structure */
+	vout->memory = req->memory;
+
+	INIT_LIST_HEAD(&vout->dma_queue);
+
+	/* call videobuf_reqbufs api */
+	ret = videobuf_reqbufs(q, req);
+	if (ret < 0)
+		goto reqbuf_err;
+
+	vout->buffer_allocated = req->count;
+
+reqbuf_err:
+	mutex_unlock(&vout->lock);
+	return ret;
+}
+
+static int vidioc_querybuf(struct file *file, void *fh,
+			struct v4l2_buffer *b)
+{
+	struct omap_vout_device *vout = fh;
+
+	return videobuf_querybuf(&vout->vbq, b);
+}
+
+static int vidioc_qbuf(struct file *file, void *fh,
+			struct v4l2_buffer *buffer)
+{
+	struct omap_vout_device *vout = fh;
+	struct videobuf_queue *q = &vout->vbq;
+
+	if ((V4L2_BUF_TYPE_VIDEO_OUTPUT != buffer->type) ||
+			(buffer->index >= vout->buffer_allocated) ||
+			(q->bufs[buffer->index]->memory != buffer->memory)) {
+		return -EINVAL;
+	}
+	if (V4L2_MEMORY_USERPTR == buffer->memory) {
+		if ((buffer->length < vout->pix.sizeimage) ||
+				(0 == buffer->m.userptr)) {
+			return -EINVAL;
+		}
+	}
+
+	if ((is_rotation_enabled(vout)) &&
+			vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) {
+		v4l2_warn(&vout->vid_dev->v4l2_dev,
+				"DMA Channel not allocated for Rotation\n");
+		return -EINVAL;
+	}
+
+	return videobuf_qbuf(q, buffer);
+}
+
+static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+	struct omap_vout_device *vout = fh;
+	struct videobuf_queue *q = &vout->vbq;
+
+	int ret;
+	u32 addr;
+	unsigned long size;
+	struct videobuf_buffer *vb;
+
+	vb = q->bufs[b->index];
+
+	if (!vout->streaming)
+		return -EINVAL;
+
+	if (file->f_flags & O_NONBLOCK)
+		/* Call videobuf_dqbuf for non blocking mode */
+		ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
+	else
+		/* Call videobuf_dqbuf for  blocking mode */
+		ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
+
+	addr = (unsigned long) vout->buf_phy_addr[vb->i];
+	size = (unsigned long) vb->size;
+	dma_unmap_single(vout->vid_dev->v4l2_dev.dev,  addr,
+				size, DMA_TO_DEVICE);
+	return ret;
+}
+
+static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+	int ret = 0, j;
+	u32 addr = 0, mask = 0;
+	struct omap_vout_device *vout = fh;
+	struct videobuf_queue *q = &vout->vbq;
+	struct omapvideo_info *ovid = &vout->vid_info;
+
+	mutex_lock(&vout->lock);
+
+	if (vout->streaming) {
+		ret = -EBUSY;
+		goto streamon_err;
+	}
+
+	ret = videobuf_streamon(q);
+	if (ret)
+		goto streamon_err;
+
+	if (list_empty(&vout->dma_queue)) {
+		ret = -EIO;
+		goto streamon_err1;
+	}
+
+	/* Get the next frame from the buffer queue */
+	vout->next_frm = vout->cur_frm = list_entry(vout->dma_queue.next,
+			struct videobuf_buffer, queue);
+	/* Remove buffer from the buffer queue */
+	list_del(&vout->cur_frm->queue);
+	/* Mark state of the current frame to active */
+	vout->cur_frm->state = VIDEOBUF_ACTIVE;
+	/* Initialize field_id and started member */
+	vout->field_id = 0;
+
+	/* set flag here. Next QBUF will start DMA */
+	vout->streaming = 1;
+
+	vout->first_int = 1;
+
+	if (omap_vout_calculate_offset(vout)) {
+		ret = -EINVAL;
+		goto streamon_err1;
+	}
+	addr = (unsigned long) vout->queued_buf_addr[vout->cur_frm->i]
+		+ vout->cropped_offset;
+
+	mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
+		| DISPC_IRQ_VSYNC2;
+
+	omap_dispc_register_isr(omap_vout_isr, vout, mask);
+
+	for (j = 0; j < ovid->num_overlays; j++) {
+		struct omap_overlay *ovl = ovid->overlays[j];
+
+		if (ovl->manager && ovl->manager->device) {
+			struct omap_overlay_info info;
+			ovl->get_overlay_info(ovl, &info);
+			info.paddr = addr;
+			if (ovl->set_overlay_info(ovl, &info)) {
+				ret = -EINVAL;
+				goto streamon_err1;
+			}
+		}
+	}
+
+	/* First save the configuration in ovelray structure */
+	ret = omapvid_init(vout, addr);
+	if (ret)
+		v4l2_err(&vout->vid_dev->v4l2_dev,
+				"failed to set overlay info\n");
+	/* Enable the pipeline and set the Go bit */
+	ret = omapvid_apply_changes(vout);
+	if (ret)
+		v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
+
+	for (j = 0; j < ovid->num_overlays; j++) {
+		struct omap_overlay *ovl = ovid->overlays[j];
+
+		if (ovl->manager && ovl->manager->device) {
+			ret = ovl->enable(ovl);
+			if (ret)
+				goto streamon_err1;
+		}
+	}
+
+	ret = 0;
+
+streamon_err1:
+	if (ret)
+		ret = videobuf_streamoff(q);
+streamon_err:
+	mutex_unlock(&vout->lock);
+	return ret;
+}
+
+static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+	u32 mask = 0;
+	int ret = 0, j;
+	struct omap_vout_device *vout = fh;
+	struct omapvideo_info *ovid = &vout->vid_info;
+
+	if (!vout->streaming)
+		return -EINVAL;
+
+	vout->streaming = 0;
+	mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
+		| DISPC_IRQ_VSYNC2;
+
+	omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+
+	for (j = 0; j < ovid->num_overlays; j++) {
+		struct omap_overlay *ovl = ovid->overlays[j];
+
+		if (ovl->manager && ovl->manager->device)
+			ovl->disable(ovl);
+	}
+
+	/* Turn of the pipeline */
+	ret = omapvid_apply_changes(vout);
+	if (ret)
+		v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode in"
+				" streamoff\n");
+
+	INIT_LIST_HEAD(&vout->dma_queue);
+	ret = videobuf_streamoff(&vout->vbq);
+
+	return ret;
+}
+
+static int vidioc_s_fbuf(struct file *file, void *fh,
+				struct v4l2_framebuffer *a)
+{
+	int enable = 0;
+	struct omap_overlay *ovl;
+	struct omapvideo_info *ovid;
+	struct omap_vout_device *vout = fh;
+	struct omap_overlay_manager_info info;
+	enum omap_dss_trans_key_type key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+
+	ovid = &vout->vid_info;
+	ovl = ovid->overlays[0];
+
+	/* OMAP DSS doesn't support Source and Destination color
+	   key together */
+	if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) &&
+			(a->flags & V4L2_FBUF_FLAG_CHROMAKEY))
+		return -EINVAL;
+	/* OMAP DSS Doesn't support the Destination color key
+	   and alpha blending together */
+	if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY) &&
+			(a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA))
+		return -EINVAL;
+
+	if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY)) {
+		vout->fbuf.flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+		key_type =  OMAP_DSS_COLOR_KEY_VID_SRC;
+	} else
+		vout->fbuf.flags &= ~V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+
+	if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY)) {
+		vout->fbuf.flags |= V4L2_FBUF_FLAG_CHROMAKEY;
+		key_type =  OMAP_DSS_COLOR_KEY_GFX_DST;
+	} else
+		vout->fbuf.flags &=  ~V4L2_FBUF_FLAG_CHROMAKEY;
+
+	if (a->flags & (V4L2_FBUF_FLAG_CHROMAKEY |
+				V4L2_FBUF_FLAG_SRC_CHROMAKEY))
+		enable = 1;
+	else
+		enable = 0;
+	if (ovl->manager && ovl->manager->get_manager_info &&
+			ovl->manager->set_manager_info) {
+
+		ovl->manager->get_manager_info(ovl->manager, &info);
+		info.trans_enabled = enable;
+		info.trans_key_type = key_type;
+		info.trans_key = vout->win.chromakey;
+
+		if (ovl->manager->set_manager_info(ovl->manager, &info))
+			return -EINVAL;
+	}
+	if (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) {
+		vout->fbuf.flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
+		enable = 1;
+	} else {
+		vout->fbuf.flags &= ~V4L2_FBUF_FLAG_LOCAL_ALPHA;
+		enable = 0;
+	}
+	if (ovl->manager && ovl->manager->get_manager_info &&
+			ovl->manager->set_manager_info) {
+		ovl->manager->get_manager_info(ovl->manager, &info);
+		/* enable this only if there is no zorder cap */
+		if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
+			info.partial_alpha_enabled = enable;
+		if (ovl->manager->set_manager_info(ovl->manager, &info))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vidioc_g_fbuf(struct file *file, void *fh,
+		struct v4l2_framebuffer *a)
+{
+	struct omap_overlay *ovl;
+	struct omapvideo_info *ovid;
+	struct omap_vout_device *vout = fh;
+	struct omap_overlay_manager_info info;
+
+	ovid = &vout->vid_info;
+	ovl = ovid->overlays[0];
+
+	/* The video overlay must stay within the framebuffer and can't be
+	   positioned independently. */
+	a->flags = V4L2_FBUF_FLAG_OVERLAY;
+	a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
+		| V4L2_FBUF_CAP_SRC_CHROMAKEY;
+
+	if (ovl->manager && ovl->manager->get_manager_info) {
+		ovl->manager->get_manager_info(ovl->manager, &info);
+		if (info.trans_key_type == OMAP_DSS_COLOR_KEY_VID_SRC)
+			a->flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+		if (info.trans_key_type == OMAP_DSS_COLOR_KEY_GFX_DST)
+			a->flags |= V4L2_FBUF_FLAG_CHROMAKEY;
+	}
+	if (ovl->manager && ovl->manager->get_manager_info) {
+		ovl->manager->get_manager_info(ovl->manager, &info);
+		if (info.partial_alpha_enabled)
+			a->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
+	}
+
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops vout_ioctl_ops = {
+	.vidioc_querycap      			= vidioc_querycap,
+	.vidioc_enum_fmt_vid_out 		= vidioc_enum_fmt_vid_out,
+	.vidioc_g_fmt_vid_out			= vidioc_g_fmt_vid_out,
+	.vidioc_try_fmt_vid_out			= vidioc_try_fmt_vid_out,
+	.vidioc_s_fmt_vid_out			= vidioc_s_fmt_vid_out,
+	.vidioc_queryctrl    			= vidioc_queryctrl,
+	.vidioc_g_ctrl       			= vidioc_g_ctrl,
+	.vidioc_s_fbuf				= vidioc_s_fbuf,
+	.vidioc_g_fbuf				= vidioc_g_fbuf,
+	.vidioc_s_ctrl       			= vidioc_s_ctrl,
+	.vidioc_try_fmt_vid_overlay 		= vidioc_try_fmt_vid_overlay,
+	.vidioc_s_fmt_vid_overlay		= vidioc_s_fmt_vid_overlay,
+	.vidioc_enum_fmt_vid_overlay		= vidioc_enum_fmt_vid_overlay,
+	.vidioc_g_fmt_vid_overlay		= vidioc_g_fmt_vid_overlay,
+	.vidioc_cropcap				= vidioc_cropcap,
+	.vidioc_g_crop				= vidioc_g_crop,
+	.vidioc_s_crop				= vidioc_s_crop,
+	.vidioc_reqbufs				= vidioc_reqbufs,
+	.vidioc_querybuf			= vidioc_querybuf,
+	.vidioc_qbuf				= vidioc_qbuf,
+	.vidioc_dqbuf				= vidioc_dqbuf,
+	.vidioc_streamon			= vidioc_streamon,
+	.vidioc_streamoff			= vidioc_streamoff,
+};
+
+static const struct v4l2_file_operations omap_vout_fops = {
+	.owner 		= THIS_MODULE,
+	.poll		= omap_vout_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap 		= omap_vout_mmap,
+	.open 		= omap_vout_open,
+	.release 	= omap_vout_release,
+};
+
+/* Init functions used during driver initialization */
+/* Initial setup of video_data */
+static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
+{
+	struct video_device *vfd;
+	struct v4l2_pix_format *pix;
+	struct v4l2_control *control;
+	struct omap_dss_device *display =
+		vout->vid_info.overlays[0]->manager->device;
+
+	/* set the default pix */
+	pix = &vout->pix;
+
+	/* Set the default picture of QVGA  */
+	pix->width = QQVGA_WIDTH;
+	pix->height = QQVGA_HEIGHT;
+
+	/* Default pixel format is RGB 5-6-5 */
+	pix->pixelformat = V4L2_PIX_FMT_RGB565;
+	pix->field = V4L2_FIELD_ANY;
+	pix->bytesperline = pix->width * 2;
+	pix->sizeimage = pix->bytesperline * pix->height;
+	pix->priv = 0;
+	pix->colorspace = V4L2_COLORSPACE_JPEG;
+
+	vout->bpp = RGB565_BPP;
+	vout->fbuf.fmt.width  =  display->panel.timings.x_res;
+	vout->fbuf.fmt.height =  display->panel.timings.y_res;
+
+	/* Set the data structures for the overlay parameters*/
+	vout->win.global_alpha = 255;
+	vout->fbuf.flags = 0;
+	vout->fbuf.capability = V4L2_FBUF_CAP_LOCAL_ALPHA |
+		V4L2_FBUF_CAP_SRC_CHROMAKEY | V4L2_FBUF_CAP_CHROMAKEY;
+	vout->win.chromakey = 0;
+
+	omap_vout_new_format(pix, &vout->fbuf, &vout->crop, &vout->win);
+
+	/*Initialize the control variables for
+	  rotation, flipping and background color. */
+	control = vout->control;
+	control[0].id = V4L2_CID_ROTATE;
+	control[0].value = 0;
+	vout->rotation = 0;
+	vout->mirror = 0;
+	vout->control[2].id = V4L2_CID_HFLIP;
+	vout->control[2].value = 0;
+	if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
+		vout->vrfb_bpp = 2;
+
+	control[1].id = V4L2_CID_BG_COLOR;
+	control[1].value = 0;
+
+	/* initialize the video_device struct */
+	vfd = vout->vfd = video_device_alloc();
+
+	if (!vfd) {
+		printk(KERN_ERR VOUT_NAME ": could not allocate"
+				" video device struct\n");
+		return -ENOMEM;
+	}
+	vfd->release = video_device_release;
+	vfd->ioctl_ops = &vout_ioctl_ops;
+
+	strlcpy(vfd->name, VOUT_NAME, sizeof(vfd->name));
+
+	vfd->fops = &omap_vout_fops;
+	vfd->v4l2_dev = &vout->vid_dev->v4l2_dev;
+	mutex_init(&vout->lock);
+
+	vfd->minor = -1;
+	return 0;
+
+}
+
+/* Setup video buffers */
+static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
+		int vid_num)
+{
+	u32 numbuffers;
+	int ret = 0, i;
+	struct omapvideo_info *ovid;
+	struct omap_vout_device *vout;
+	struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+	struct omap2video_device *vid_dev =
+		container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
+
+	vout = vid_dev->vouts[vid_num];
+	ovid = &vout->vid_info;
+
+	numbuffers = (vid_num == 0) ? video1_numbuffers : video2_numbuffers;
+	vout->buffer_size = (vid_num == 0) ? video1_bufsize : video2_bufsize;
+	dev_info(&pdev->dev, "Buffer Size = %d\n", vout->buffer_size);
+
+	for (i = 0; i < numbuffers; i++) {
+		vout->buf_virt_addr[i] =
+			omap_vout_alloc_buffer(vout->buffer_size,
+					(u32 *) &vout->buf_phy_addr[i]);
+		if (!vout->buf_virt_addr[i]) {
+			numbuffers = i;
+			ret = -ENOMEM;
+			goto free_buffers;
+		}
+	}
+
+	vout->cropped_offset = 0;
+
+	if (ovid->rotation_type == VOUT_ROT_VRFB) {
+		int static_vrfb_allocation = (vid_num == 0) ?
+			vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
+		ret = omap_vout_setup_vrfb_bufs(pdev, vid_num,
+				static_vrfb_allocation);
+	}
+
+	return ret;
+
+free_buffers:
+	for (i = 0; i < numbuffers; i++) {
+		omap_vout_free_buffer(vout->buf_virt_addr[i],
+						vout->buffer_size);
+		vout->buf_virt_addr[i] = 0;
+		vout->buf_phy_addr[i] = 0;
+	}
+	return ret;
+
+}
+
+/* Create video out devices */
+static int __init omap_vout_create_video_devices(struct platform_device *pdev)
+{
+	int ret = 0, k;
+	struct omap_vout_device *vout;
+	struct video_device *vfd = NULL;
+	struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+	struct omap2video_device *vid_dev = container_of(v4l2_dev,
+			struct omap2video_device, v4l2_dev);
+
+	for (k = 0; k < pdev->num_resources; k++) {
+
+		vout = kzalloc(sizeof(struct omap_vout_device), GFP_KERNEL);
+		if (!vout) {
+			dev_err(&pdev->dev, ": could not allocate memory\n");
+			return -ENOMEM;
+		}
+
+		vout->vid = k;
+		vid_dev->vouts[k] = vout;
+		vout->vid_dev = vid_dev;
+		/* Select video2 if only 1 overlay is controlled by V4L2 */
+		if (pdev->num_resources == 1)
+			vout->vid_info.overlays[0] = vid_dev->overlays[k + 2];
+		else
+			/* Else select video1 and video2 one by one. */
+			vout->vid_info.overlays[0] = vid_dev->overlays[k + 1];
+		vout->vid_info.num_overlays = 1;
+		vout->vid_info.id = k + 1;
+
+		/* Set VRFB as rotation_type for omap2 and omap3 */
+		if (cpu_is_omap24xx() || cpu_is_omap34xx())
+			vout->vid_info.rotation_type = VOUT_ROT_VRFB;
+
+		/* Setup the default configuration for the video devices
+		 */
+		if (omap_vout_setup_video_data(vout) != 0) {
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		/* Allocate default number of buffers for the video streaming
+		 * and reserve the VRFB space for rotation
+		 */
+		if (omap_vout_setup_video_bufs(pdev, k) != 0) {
+			ret = -ENOMEM;
+			goto error1;
+		}
+
+		/* Register the Video device with V4L2
+		 */
+		vfd = vout->vfd;
+		if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) {
+			dev_err(&pdev->dev, ": Could not register "
+					"Video for Linux device\n");
+			vfd->minor = -1;
+			ret = -ENODEV;
+			goto error2;
+		}
+		video_set_drvdata(vfd, vout);
+
+		/* Configure the overlay structure */
+		ret = omapvid_init(vid_dev->vouts[k], 0);
+		if (!ret)
+			goto success;
+
+error2:
+		if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
+			omap_vout_release_vrfb(vout);
+		omap_vout_free_buffers(vout);
+error1:
+		video_device_release(vfd);
+error:
+		kfree(vout);
+		return ret;
+
+success:
+		dev_info(&pdev->dev, ": registered and initialized"
+				" video device %d\n", vfd->minor);
+		if (k == (pdev->num_resources - 1))
+			return 0;
+	}
+
+	return -ENODEV;
+}
+/* Driver functions */
+static void omap_vout_cleanup_device(struct omap_vout_device *vout)
+{
+	struct video_device *vfd;
+	struct omapvideo_info *ovid;
+
+	if (!vout)
+		return;
+
+	vfd = vout->vfd;
+	ovid = &vout->vid_info;
+	if (vfd) {
+		if (!video_is_registered(vfd)) {
+			/*
+			 * The device was never registered, so release the
+			 * video_device struct directly.
+			 */
+			video_device_release(vfd);
+		} else {
+			/*
+			 * The unregister function will release the video_device
+			 * struct as well as unregistering it.
+			 */
+			video_unregister_device(vfd);
+		}
+	}
+	if (ovid->rotation_type == VOUT_ROT_VRFB) {
+		omap_vout_release_vrfb(vout);
+		/* Free the VRFB buffer if allocated
+		 * init time
+		 */
+		if (vout->vrfb_static_allocation)
+			omap_vout_free_vrfb_buffers(vout);
+	}
+	omap_vout_free_buffers(vout);
+
+	kfree(vout);
+}
+
+static int omap_vout_remove(struct platform_device *pdev)
+{
+	int k;
+	struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+	struct omap2video_device *vid_dev = container_of(v4l2_dev, struct
+			omap2video_device, v4l2_dev);
+
+	v4l2_device_unregister(v4l2_dev);
+	for (k = 0; k < pdev->num_resources; k++)
+		omap_vout_cleanup_device(vid_dev->vouts[k]);
+
+	for (k = 0; k < vid_dev->num_displays; k++) {
+		if (vid_dev->displays[k]->state != OMAP_DSS_DISPLAY_DISABLED)
+			vid_dev->displays[k]->driver->disable(vid_dev->displays[k]);
+
+		omap_dss_put_device(vid_dev->displays[k]);
+	}
+	kfree(vid_dev);
+	return 0;
+}
+
+static int __init omap_vout_probe(struct platform_device *pdev)
+{
+	int ret = 0, i;
+	struct omap_overlay *ovl;
+	struct omap_dss_device *dssdev = NULL;
+	struct omap_dss_device *def_display;
+	struct omap2video_device *vid_dev = NULL;
+
+	if (pdev->num_resources == 0) {
+		dev_err(&pdev->dev, "probed for an unknown device\n");
+		return -ENODEV;
+	}
+
+	vid_dev = kzalloc(sizeof(struct omap2video_device), GFP_KERNEL);
+	if (vid_dev == NULL)
+		return -ENOMEM;
+
+	vid_dev->num_displays = 0;
+	for_each_dss_dev(dssdev) {
+		omap_dss_get_device(dssdev);
+
+		if (!dssdev->driver) {
+			dev_warn(&pdev->dev, "no driver for display: %s\n",
+					dssdev->name);
+			omap_dss_put_device(dssdev);
+			continue;
+		}
+
+		vid_dev->displays[vid_dev->num_displays++] = dssdev;
+	}
+
+	if (vid_dev->num_displays == 0) {
+		dev_err(&pdev->dev, "no displays\n");
+		ret = -EINVAL;
+		goto probe_err0;
+	}
+
+	vid_dev->num_overlays = omap_dss_get_num_overlays();
+	for (i = 0; i < vid_dev->num_overlays; i++)
+		vid_dev->overlays[i] = omap_dss_get_overlay(i);
+
+	vid_dev->num_managers = omap_dss_get_num_overlay_managers();
+	for (i = 0; i < vid_dev->num_managers; i++)
+		vid_dev->managers[i] = omap_dss_get_overlay_manager(i);
+
+	/* Get the Video1 overlay and video2 overlay.
+	 * Setup the Display attached to that overlays
+	 */
+	for (i = 1; i < vid_dev->num_overlays; i++) {
+		ovl = omap_dss_get_overlay(i);
+		if (ovl->manager && ovl->manager->device) {
+			def_display = ovl->manager->device;
+		} else {
+			dev_warn(&pdev->dev, "cannot find display\n");
+			def_display = NULL;
+		}
+		if (def_display) {
+			struct omap_dss_driver *dssdrv = def_display->driver;
+
+			ret = dssdrv->enable(def_display);
+			if (ret) {
+				/* Here we are not considering a error
+				 *  as display may be enabled by frame
+				 *  buffer driver
+				 */
+				dev_warn(&pdev->dev,
+					"'%s' Display already enabled\n",
+					def_display->name);
+			}
+		}
+	}
+
+	if (v4l2_device_register(&pdev->dev, &vid_dev->v4l2_dev) < 0) {
+		dev_err(&pdev->dev, "v4l2_device_register failed\n");
+		ret = -ENODEV;
+		goto probe_err1;
+	}
+
+	ret = omap_vout_create_video_devices(pdev);
+	if (ret)
+		goto probe_err2;
+
+	for (i = 0; i < vid_dev->num_displays; i++) {
+		struct omap_dss_device *display = vid_dev->displays[i];
+
+		if (display->driver->update)
+			display->driver->update(display, 0, 0,
+					display->panel.timings.x_res,
+					display->panel.timings.y_res);
+	}
+	return 0;
+
+probe_err2:
+	v4l2_device_unregister(&vid_dev->v4l2_dev);
+probe_err1:
+	for (i = 1; i < vid_dev->num_overlays; i++) {
+		def_display = NULL;
+		ovl = omap_dss_get_overlay(i);
+		if (ovl->manager && ovl->manager->device)
+			def_display = ovl->manager->device;
+
+		if (def_display && def_display->driver)
+			def_display->driver->disable(def_display);
+	}
+probe_err0:
+	kfree(vid_dev);
+	return ret;
+}
+
+static struct platform_driver omap_vout_driver = {
+	.driver = {
+		.name = VOUT_NAME,
+	},
+	.remove = omap_vout_remove,
+};
+
+static int __init omap_vout_init(void)
+{
+	if (platform_driver_probe(&omap_vout_driver, omap_vout_probe) != 0) {
+		printk(KERN_ERR VOUT_NAME ":Could not register Video driver\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void omap_vout_cleanup(void)
+{
+	platform_driver_unregister(&omap_vout_driver);
+}
+
+late_initcall(omap_vout_init);
+module_exit(omap_vout_cleanup);
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
new file mode 100644
index 0000000..4be26abf6c
--- /dev/null
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -0,0 +1,390 @@
+/*
+ * omap_vout_vrfb.c
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+
+#include <media/videobuf-dma-contig.h>
+#include <media/v4l2-device.h>
+
+#include <plat/dma.h>
+#include <plat/vrfb.h>
+
+#include "omap_voutdef.h"
+#include "omap_voutlib.h"
+
+/*
+ * Function for allocating video buffers
+ */
+static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
+		unsigned int *count, int startindex)
+{
+	int i, j;
+
+	for (i = 0; i < *count; i++) {
+		if (!vout->smsshado_virt_addr[i]) {
+			vout->smsshado_virt_addr[i] =
+				omap_vout_alloc_buffer(vout->smsshado_size,
+						&vout->smsshado_phy_addr[i]);
+		}
+		if (!vout->smsshado_virt_addr[i] && startindex != -1) {
+			if (V4L2_MEMORY_MMAP == vout->memory && i >= startindex)
+				break;
+		}
+		if (!vout->smsshado_virt_addr[i]) {
+			for (j = 0; j < i; j++) {
+				omap_vout_free_buffer(
+						vout->smsshado_virt_addr[j],
+						vout->smsshado_size);
+				vout->smsshado_virt_addr[j] = 0;
+				vout->smsshado_phy_addr[j] = 0;
+			}
+			*count = 0;
+			return -ENOMEM;
+		}
+		memset((void *) vout->smsshado_virt_addr[i], 0,
+				vout->smsshado_size);
+	}
+	return 0;
+}
+
+/*
+ * Wakes up the application once the DMA transfer to VRFB space is completed.
+ */
+static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
+{
+	struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
+
+	t->tx_status = 1;
+	wake_up_interruptible(&t->wait);
+}
+
+/*
+ * Free VRFB buffers
+ */
+void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
+{
+	int j;
+
+	for (j = 0; j < VRFB_NUM_BUFS; j++) {
+		omap_vout_free_buffer(vout->smsshado_virt_addr[j],
+				vout->smsshado_size);
+		vout->smsshado_virt_addr[j] = 0;
+		vout->smsshado_phy_addr[j] = 0;
+	}
+}
+
+int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+			      bool static_vrfb_allocation)
+{
+	int ret = 0, i, j;
+	struct omap_vout_device *vout;
+	struct video_device *vfd;
+	int image_width, image_height;
+	int vrfb_num_bufs = VRFB_NUM_BUFS;
+	struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+	struct omap2video_device *vid_dev =
+		container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
+
+	vout = vid_dev->vouts[vid_num];
+	vfd = vout->vfd;
+
+	for (i = 0; i < VRFB_NUM_BUFS; i++) {
+		if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
+			dev_info(&pdev->dev, ": VRFB allocation failed\n");
+			for (j = 0; j < i; j++)
+				omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+			ret = -ENOMEM;
+			goto free_buffers;
+		}
+	}
+
+	/* Calculate VRFB memory size */
+	/* allocate for worst case size */
+	image_width = VID_MAX_WIDTH / TILE_SIZE;
+	if (VID_MAX_WIDTH % TILE_SIZE)
+		image_width++;
+
+	image_width = image_width * TILE_SIZE;
+	image_height = VID_MAX_HEIGHT / TILE_SIZE;
+
+	if (VID_MAX_HEIGHT % TILE_SIZE)
+		image_height++;
+
+	image_height = image_height * TILE_SIZE;
+	vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);
+
+	/*
+	 * Request and Initialize DMA, for DMA based VRFB transfer
+	 */
+	vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
+	vout->vrfb_dma_tx.dma_ch = -1;
+	vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
+	ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
+			omap_vout_vrfb_dma_tx_callback,
+			(void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
+	if (ret < 0) {
+		vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+		dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
+				" video%d\n", vfd->minor);
+	}
+	init_waitqueue_head(&vout->vrfb_dma_tx.wait);
+
+	/* statically allocated the VRFB buffer is done through
+	   commands line aruments */
+	if (static_vrfb_allocation) {
+		if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
+			ret =  -ENOMEM;
+			goto release_vrfb_ctx;
+		}
+		vout->vrfb_static_allocation = 1;
+	}
+	return 0;
+
+release_vrfb_ctx:
+	for (j = 0; j < VRFB_NUM_BUFS; j++)
+		omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+free_buffers:
+	omap_vout_free_buffers(vout);
+
+	return ret;
+}
+
+/*
+ * Release the VRFB context once the module exits
+ */
+void omap_vout_release_vrfb(struct omap_vout_device *vout)
+{
+	int i;
+
+	for (i = 0; i < VRFB_NUM_BUFS; i++)
+		omap_vrfb_release_ctx(&vout->vrfb_context[i]);
+
+	if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
+		vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+		omap_free_dma(vout->vrfb_dma_tx.dma_ch);
+	}
+}
+
+/*
+ * Allocate the buffers for the VRFB space.  Data is copied from V4L2
+ * buffers to the VRFB buffers using the DMA engine.
+ */
+int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+			  unsigned int *count, unsigned int startindex)
+{
+	int i;
+	bool yuv_mode;
+
+	if (!is_rotation_enabled(vout))
+		return 0;
+
+	/* If rotation is enabled, allocate memory for VRFB space also */
+	*count = *count > VRFB_NUM_BUFS ? VRFB_NUM_BUFS : *count;
+
+	/* Allocate the VRFB buffers only if the buffers are not
+	 * allocated during init time.
+	 */
+	if (!vout->vrfb_static_allocation)
+		if (omap_vout_allocate_vrfb_buffers(vout, count, startindex))
+			return -ENOMEM;
+
+	if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 ||
+			vout->dss_mode == OMAP_DSS_COLOR_UYVY)
+		yuv_mode = true;
+	else
+		yuv_mode = false;
+
+	for (i = 0; i < *count; i++)
+		omap_vrfb_setup(&vout->vrfb_context[i],
+				vout->smsshado_phy_addr[i], vout->pix.width,
+				vout->pix.height, vout->bpp, yuv_mode);
+
+	return 0;
+}
+
+int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+				struct videobuf_buffer *vb)
+{
+	dma_addr_t dmabuf;
+	struct vid_vrfb_dma *tx;
+	enum dss_rotation rotation;
+	u32 dest_frame_index = 0, src_element_index = 0;
+	u32 dest_element_index = 0, src_frame_index = 0;
+	u32 elem_count = 0, frame_count = 0, pixsize = 2;
+
+	if (!is_rotation_enabled(vout))
+		return 0;
+
+	dmabuf = vout->buf_phy_addr[vb->i];
+	/* If rotation is enabled, copy input buffer into VRFB
+	 * memory space using DMA. We are copying input buffer
+	 * into VRFB memory space of desired angle and DSS will
+	 * read image VRFB memory for 0 degree angle
+	 */
+	pixsize = vout->bpp * vout->vrfb_bpp;
+	/*
+	 * DMA transfer in double index mode
+	 */
+
+	/* Frame index */
+	dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
+			(vout->pix.width * vout->bpp)) + 1;
+
+	/* Source and destination parameters */
+	src_element_index = 0;
+	src_frame_index = 0;
+	dest_element_index = 1;
+	/* Number of elements per frame */
+	elem_count = vout->pix.width * vout->bpp;
+	frame_count = vout->pix.height;
+	tx = &vout->vrfb_dma_tx;
+	tx->tx_status = 0;
+	omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
+			(elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
+			tx->dev_id, 0x0);
+	/* src_port required only for OMAP1 */
+	omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
+			dmabuf, src_element_index, src_frame_index);
+	/*set dma source burst mode for VRFB */
+	omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+	rotation = calc_rotation(vout);
+
+	/* dest_port required only for OMAP1 */
+	omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
+			vout->vrfb_context[vb->i].paddr[0], dest_element_index,
+			dest_frame_index);
+	/*set dma dest burst mode for VRFB */
+	omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+	omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
+
+	omap_start_dma(tx->dma_ch);
+	interruptible_sleep_on_timeout(&tx->wait, VRFB_TX_TIMEOUT);
+
+	if (tx->tx_status == 0) {
+		omap_stop_dma(tx->dma_ch);
+		return -EINVAL;
+	}
+	/* Store buffers physical address into an array. Addresses
+	 * from this array will be used to configure DSS */
+	vout->queued_buf_addr[vb->i] = (u8 *)
+		vout->vrfb_context[vb->i].paddr[rotation];
+	return 0;
+}
+
+/*
+ * Calculate the buffer offsets from which the streaming should
+ * start. This offset calculation is mainly required because of
+ * the VRFB 32 pixels alignment with rotation.
+ */
+void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout)
+{
+	enum dss_rotation rotation;
+	bool mirroring = vout->mirror;
+	struct v4l2_rect *crop = &vout->crop;
+	struct v4l2_pix_format *pix = &vout->pix;
+	int *cropped_offset = &vout->cropped_offset;
+	int vr_ps = 1, ps = 2, temp_ps = 2;
+	int offset = 0, ctop = 0, cleft = 0, line_length = 0;
+
+	rotation = calc_rotation(vout);
+
+	if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
+			V4L2_PIX_FMT_UYVY == pix->pixelformat) {
+		if (is_rotation_enabled(vout)) {
+			/*
+			 * ps    - Actual pixel size for YUYV/UYVY for
+			 *         VRFB/Mirroring is 4 bytes
+			 * vr_ps - Virtually pixel size for YUYV/UYVY is
+			 *         2 bytes
+			 */
+			ps = 4;
+			vr_ps = 2;
+		} else {
+			ps = 2;	/* otherwise the pixel size is 2 byte */
+		}
+	} else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) {
+		ps = 4;
+	} else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) {
+		ps = 3;
+	}
+	vout->ps = ps;
+	vout->vr_ps = vr_ps;
+
+	if (is_rotation_enabled(vout)) {
+		line_length = MAX_PIXELS_PER_LINE;
+		ctop = (pix->height - crop->height) - crop->top;
+		cleft = (pix->width - crop->width) - crop->left;
+	} else {
+		line_length = pix->width;
+	}
+	vout->line_length = line_length;
+	switch (rotation) {
+	case dss_rotation_90_degree:
+		offset = vout->vrfb_context[0].yoffset *
+			vout->vrfb_context[0].bytespp;
+		temp_ps = ps / vr_ps;
+		if (mirroring == 0) {
+			*cropped_offset = offset + line_length *
+				temp_ps * cleft + crop->top * temp_ps;
+		} else {
+			*cropped_offset = offset + line_length * temp_ps *
+				cleft + crop->top * temp_ps + (line_length *
+				((crop->width / (vr_ps)) - 1) * ps);
+		}
+		break;
+	case dss_rotation_180_degree:
+		offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset *
+			vout->vrfb_context[0].bytespp) +
+			(vout->vrfb_context[0].xoffset *
+			vout->vrfb_context[0].bytespp));
+		if (mirroring == 0) {
+			*cropped_offset = offset + (line_length * ps * ctop) +
+				(cleft / vr_ps) * ps;
+
+		} else {
+			*cropped_offset = offset + (line_length * ps * ctop) +
+				(cleft / vr_ps) * ps + (line_length *
+				(crop->height - 1) * ps);
+		}
+		break;
+	case dss_rotation_270_degree:
+		offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
+			vout->vrfb_context[0].bytespp;
+		temp_ps = ps / vr_ps;
+		if (mirroring == 0) {
+			*cropped_offset = offset + line_length *
+			    temp_ps * crop->left + ctop * ps;
+		} else {
+			*cropped_offset = offset + line_length *
+				temp_ps * crop->left + ctop * ps +
+				(line_length * ((crop->width / vr_ps) - 1) *
+				 ps);
+		}
+		break;
+	case dss_rotation_0_degree:
+		if (mirroring == 0) {
+			*cropped_offset = (line_length * ps) *
+				crop->top + (crop->left / vr_ps) * ps;
+		} else {
+			*cropped_offset = (line_length * ps) *
+				crop->top + (crop->left / vr_ps) * ps +
+				(line_length * (crop->height - 1) * ps);
+		}
+		break;
+	default:
+		*cropped_offset = (line_length * ps * crop->top) /
+			vr_ps + (crop->left * ps) / vr_ps +
+			((crop->width / vr_ps) - 1) * ps;
+		break;
+	}
+}
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.h b/drivers/media/platform/omap/omap_vout_vrfb.h
new file mode 100644
index 0000000..ffde741
--- /dev/null
+++ b/drivers/media/platform/omap/omap_vout_vrfb.h
@@ -0,0 +1,40 @@
+/*
+ * omap_vout_vrfb.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef OMAP_VOUT_VRFB_H
+#define OMAP_VOUT_VRFB_H
+
+#ifdef CONFIG_VIDEO_OMAP2_VOUT_VRFB
+void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout);
+int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+			u32 static_vrfb_allocation);
+void omap_vout_release_vrfb(struct omap_vout_device *vout);
+int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+			unsigned int *count, unsigned int startindex);
+int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+			struct videobuf_buffer *vb);
+void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout);
+#else
+void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout) { }
+int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+			u32 static_vrfb_allocation)
+		{ return 0; }
+void omap_vout_release_vrfb(struct omap_vout_device *vout) { }
+int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+			unsigned int *count, unsigned int startindex)
+		{ return 0; }
+int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+			struct videobuf_buffer *vb)
+		{ return 0; }
+void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout) { }
+#endif
+
+#endif
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
new file mode 100644
index 0000000..27a95d2
--- /dev/null
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -0,0 +1,225 @@
+/*
+ * omap_voutdef.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef OMAP_VOUTDEF_H
+#define OMAP_VOUTDEF_H
+
+#include <video/omapdss.h>
+#include <plat/vrfb.h>
+
+#define YUYV_BPP        2
+#define RGB565_BPP      2
+#define RGB24_BPP       3
+#define RGB32_BPP       4
+#define TILE_SIZE       32
+#define YUYV_VRFB_BPP   2
+#define RGB_VRFB_BPP    1
+#define MAX_CID		3
+#define MAC_VRFB_CTXS	4
+#define MAX_VOUT_DEV	2
+#define MAX_OVLS	3
+#define MAX_DISPLAYS	10
+#define MAX_MANAGERS	3
+
+#define QQVGA_WIDTH		160
+#define QQVGA_HEIGHT		120
+
+/* Max Resolution supported by the driver */
+#define VID_MAX_WIDTH		1280	/* Largest width */
+#define VID_MAX_HEIGHT		720	/* Largest height */
+
+/* Mimimum requirement is 2x2 for DSS */
+#define VID_MIN_WIDTH		2
+#define VID_MIN_HEIGHT		2
+
+/* 2048 x 2048 is max res supported by OMAP display controller */
+#define MAX_PIXELS_PER_LINE     2048
+
+#define VRFB_TX_TIMEOUT         1000
+#define VRFB_NUM_BUFS		4
+
+/* Max buffer size tobe allocated during init */
+#define OMAP_VOUT_MAX_BUF_SIZE (VID_MAX_WIDTH*VID_MAX_HEIGHT*4)
+
+enum dma_channel_state {
+	DMA_CHAN_NOT_ALLOTED,
+	DMA_CHAN_ALLOTED,
+};
+
+/* Enum for Rotation
+ * DSS understands rotation in 0, 1, 2, 3 context
+ * while V4L2 driver understands it as 0, 90, 180, 270
+ */
+enum dss_rotation {
+	dss_rotation_0_degree	= 0,
+	dss_rotation_90_degree	= 1,
+	dss_rotation_180_degree	= 2,
+	dss_rotation_270_degree = 3,
+};
+
+/* Enum for choosing rotation type for vout
+ * DSS2 doesn't understand no rotation as an
+ * option while V4L2 driver doesn't support
+ * rotation in the case where VRFB is not built in
+ * the kernel
+ */
+enum vout_rotaion_type {
+	VOUT_ROT_NONE	= 0,
+	VOUT_ROT_VRFB	= 1,
+};
+
+/*
+ * This structure is used to store the DMA transfer parameters
+ * for VRFB hidden buffer
+ */
+struct vid_vrfb_dma {
+	int dev_id;
+	int dma_ch;
+	int req_status;
+	int tx_status;
+	wait_queue_head_t wait;
+};
+
+struct omapvideo_info {
+	int id;
+	int num_overlays;
+	struct omap_overlay *overlays[MAX_OVLS];
+	enum vout_rotaion_type rotation_type;
+};
+
+struct omap2video_device {
+	struct mutex  mtx;
+
+	int state;
+
+	struct v4l2_device v4l2_dev;
+	struct omap_vout_device *vouts[MAX_VOUT_DEV];
+
+	int num_displays;
+	struct omap_dss_device *displays[MAX_DISPLAYS];
+	int num_overlays;
+	struct omap_overlay *overlays[MAX_OVLS];
+	int num_managers;
+	struct omap_overlay_manager *managers[MAX_MANAGERS];
+};
+
+/* per-device data structure */
+struct omap_vout_device {
+
+	struct omapvideo_info vid_info;
+	struct video_device *vfd;
+	struct omap2video_device *vid_dev;
+	int vid;
+	int opened;
+
+	/* we don't allow to change image fmt/size once buffer has
+	 * been allocated
+	 */
+	int buffer_allocated;
+	/* allow to reuse previously allocated buffer which is big enough */
+	int buffer_size;
+	/* keep buffer info across opens */
+	unsigned long buf_virt_addr[VIDEO_MAX_FRAME];
+	unsigned long buf_phy_addr[VIDEO_MAX_FRAME];
+	enum omap_color_mode dss_mode;
+
+	/* we don't allow to request new buffer when old buffers are
+	 * still mmaped
+	 */
+	int mmap_count;
+
+	spinlock_t vbq_lock;		/* spinlock for videobuf queues */
+	unsigned long field_count;	/* field counter for videobuf_buffer */
+
+	/* non-NULL means streaming is in progress. */
+	bool streaming;
+
+	struct v4l2_pix_format pix;
+	struct v4l2_rect crop;
+	struct v4l2_window win;
+	struct v4l2_framebuffer fbuf;
+
+	/* Lock to protect the shared data structures in ioctl */
+	struct mutex lock;
+
+	/* V4L2 control structure for different control id */
+	struct v4l2_control control[MAX_CID];
+	enum dss_rotation rotation;
+	bool mirror;
+	int flicker_filter;
+	/* V4L2 control structure for different control id */
+
+	int bpp; /* bytes per pixel */
+	int vrfb_bpp; /* bytes per pixel with respect to VRFB */
+
+	struct vid_vrfb_dma vrfb_dma_tx;
+	unsigned int smsshado_phy_addr[MAC_VRFB_CTXS];
+	unsigned int smsshado_virt_addr[MAC_VRFB_CTXS];
+	struct vrfb vrfb_context[MAC_VRFB_CTXS];
+	bool vrfb_static_allocation;
+	unsigned int smsshado_size;
+	unsigned char pos;
+
+	int ps, vr_ps, line_length, first_int, field_id;
+	enum v4l2_memory memory;
+	struct videobuf_buffer *cur_frm, *next_frm;
+	struct list_head dma_queue;
+	u8 *queued_buf_addr[VIDEO_MAX_FRAME];
+	u32 cropped_offset;
+	s32 tv_field1_offset;
+	void *isr_handle;
+
+	/* Buffer queue variables */
+	struct omap_vout_device *vout;
+	enum v4l2_buf_type type;
+	struct videobuf_queue vbq;
+	int io_allowed;
+
+};
+
+/*
+ * Return true if rotation is 90 or 270
+ */
+static inline int is_rotation_90_or_270(const struct omap_vout_device *vout)
+{
+	return (vout->rotation == dss_rotation_90_degree ||
+			vout->rotation == dss_rotation_270_degree);
+}
+
+/*
+ * Return true if rotation is enabled
+ */
+static inline int is_rotation_enabled(const struct omap_vout_device *vout)
+{
+	return vout->rotation || vout->mirror;
+}
+
+/*
+ * Reverse the rotation degree if mirroring is enabled
+ */
+static inline int calc_rotation(const struct omap_vout_device *vout)
+{
+	if (!vout->mirror)
+		return vout->rotation;
+
+	switch (vout->rotation) {
+	case dss_rotation_90_degree:
+		return dss_rotation_270_degree;
+	case dss_rotation_270_degree:
+		return dss_rotation_90_degree;
+	case dss_rotation_180_degree:
+		return dss_rotation_0_degree;
+	default:
+		return dss_rotation_180_degree;
+	}
+}
+
+void omap_vout_free_buffers(struct omap_vout_device *vout);
+#endif	/* ifndef OMAP_VOUTDEF_H */
diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c
new file mode 100644
index 0000000..115408b
--- /dev/null
+++ b/drivers/media/platform/omap/omap_voutlib.c
@@ -0,0 +1,339 @@
+/*
+ * omap_voutlib.c
+ *
+ * Copyright (C) 2005-2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Based on the OMAP2 camera driver
+ * Video-for-Linux (Version 2) camera capture driver for
+ * the OMAP24xx camera controller.
+ *
+ * Author: Andy Lowe (source@mvista.com)
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <linux/dma-mapping.h>
+
+#include <plat/cpu.h>
+
+#include "omap_voutlib.h"
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("OMAP Video library");
+MODULE_LICENSE("GPL");
+
+/* Return the default overlay cropping rectangle in crop given the image
+ * size in pix and the video display size in fbuf.  The default
+ * cropping rectangle is the largest rectangle no larger than the capture size
+ * that will fit on the display.  The default cropping rectangle is centered in
+ * the image.  All dimensions and offsets are rounded down to even numbers.
+ */
+void omap_vout_default_crop(struct v4l2_pix_format *pix,
+		  struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop)
+{
+	crop->width = (pix->width < fbuf->fmt.width) ?
+		pix->width : fbuf->fmt.width;
+	crop->height = (pix->height < fbuf->fmt.height) ?
+		pix->height : fbuf->fmt.height;
+	crop->width &= ~1;
+	crop->height &= ~1;
+	crop->left = ((pix->width - crop->width) >> 1) & ~1;
+	crop->top = ((pix->height - crop->height) >> 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(omap_vout_default_crop);
+
+/* Given a new render window in new_win, adjust the window to the
+ * nearest supported configuration.  The adjusted window parameters are
+ * returned in new_win.
+ * Returns zero if successful, or -EINVAL if the requested window is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+			struct v4l2_window *new_win)
+{
+	struct v4l2_rect try_win;
+
+	/* make a working copy of the new_win rectangle */
+	try_win = new_win->w;
+
+	/* adjust the preview window so it fits on the display by clipping any
+	 * offscreen areas
+	 */
+	if (try_win.left < 0) {
+		try_win.width += try_win.left;
+		try_win.left = 0;
+	}
+	if (try_win.top < 0) {
+		try_win.height += try_win.top;
+		try_win.top = 0;
+	}
+	try_win.width = (try_win.width < fbuf->fmt.width) ?
+		try_win.width : fbuf->fmt.width;
+	try_win.height = (try_win.height < fbuf->fmt.height) ?
+		try_win.height : fbuf->fmt.height;
+	if (try_win.left + try_win.width > fbuf->fmt.width)
+		try_win.width = fbuf->fmt.width - try_win.left;
+	if (try_win.top + try_win.height > fbuf->fmt.height)
+		try_win.height = fbuf->fmt.height - try_win.top;
+	try_win.width &= ~1;
+	try_win.height &= ~1;
+
+	if (try_win.width <= 0 || try_win.height <= 0)
+		return -EINVAL;
+
+	/* We now have a valid preview window, so go with it */
+	new_win->w = try_win;
+	new_win->field = V4L2_FIELD_ANY;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_try_window);
+
+/* Given a new render window in new_win, adjust the window to the
+ * nearest supported configuration.  The image cropping window in crop
+ * will also be adjusted if necessary.  Preference is given to keeping the
+ * the window as close to the requested configuration as possible.  If
+ * successful, new_win, vout->win, and crop are updated.
+ * Returns zero if successful, or -EINVAL if the requested preview window is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_new_window(struct v4l2_rect *crop,
+		struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
+		struct v4l2_window *new_win)
+{
+	int err;
+
+	err = omap_vout_try_window(fbuf, new_win);
+	if (err)
+		return err;
+
+	/* update our preview window */
+	win->w = new_win->w;
+	win->field = new_win->field;
+	win->chromakey = new_win->chromakey;
+
+	/* Adjust the cropping window to allow for resizing limitation */
+	if (cpu_is_omap24xx()) {
+		/* For 24xx limit is 8x to 1/2x scaling. */
+		if ((crop->height/win->w.height) >= 2)
+			crop->height = win->w.height * 2;
+
+		if ((crop->width/win->w.width) >= 2)
+			crop->width = win->w.width * 2;
+
+		if (crop->width > 768) {
+			/* The OMAP2420 vertical resizing line buffer is 768
+			 * pixels wide. If the cropped image is wider than
+			 * 768 pixels then it cannot be vertically resized.
+			 */
+			if (crop->height != win->w.height)
+				crop->width = 768;
+		}
+	} else if (cpu_is_omap34xx()) {
+		/* For 34xx limit is 8x to 1/4x scaling. */
+		if ((crop->height/win->w.height) >= 4)
+			crop->height = win->w.height * 4;
+
+		if ((crop->width/win->w.width) >= 4)
+			crop->width = win->w.width * 4;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_window);
+
+/* Given a new cropping rectangle in new_crop, adjust the cropping rectangle to
+ * the nearest supported configuration.  The image render window in win will
+ * also be adjusted if necessary.  The preview window is adjusted such that the
+ * horizontal and vertical rescaling ratios stay constant.  If the render
+ * window would fall outside the display boundaries, the cropping rectangle
+ * will also be adjusted to maintain the rescaling ratios.  If successful, crop
+ * and win are updated.
+ * Returns zero if successful, or -EINVAL if the requested cropping rectangle is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_new_crop(struct v4l2_pix_format *pix,
+	      struct v4l2_rect *crop, struct v4l2_window *win,
+	      struct v4l2_framebuffer *fbuf, const struct v4l2_rect *new_crop)
+{
+	struct v4l2_rect try_crop;
+	unsigned long vresize, hresize;
+
+	/* make a working copy of the new_crop rectangle */
+	try_crop = *new_crop;
+
+	/* adjust the cropping rectangle so it fits in the image */
+	if (try_crop.left < 0) {
+		try_crop.width += try_crop.left;
+		try_crop.left = 0;
+	}
+	if (try_crop.top < 0) {
+		try_crop.height += try_crop.top;
+		try_crop.top = 0;
+	}
+	try_crop.width = (try_crop.width < pix->width) ?
+		try_crop.width : pix->width;
+	try_crop.height = (try_crop.height < pix->height) ?
+		try_crop.height : pix->height;
+	if (try_crop.left + try_crop.width > pix->width)
+		try_crop.width = pix->width - try_crop.left;
+	if (try_crop.top + try_crop.height > pix->height)
+		try_crop.height = pix->height - try_crop.top;
+
+	try_crop.width &= ~1;
+	try_crop.height &= ~1;
+
+	if (try_crop.width <= 0 || try_crop.height <= 0)
+		return -EINVAL;
+
+	if (cpu_is_omap24xx()) {
+		if (try_crop.height != win->w.height) {
+			/* If we're resizing vertically, we can't support a
+			 * crop width wider than 768 pixels.
+			 */
+			if (try_crop.width > 768)
+				try_crop.width = 768;
+		}
+	}
+	/* vertical resizing */
+	vresize = (1024 * try_crop.height) / win->w.height;
+	if (cpu_is_omap24xx() && (vresize > 2048))
+		vresize = 2048;
+	else if (cpu_is_omap34xx() && (vresize > 4096))
+		vresize = 4096;
+
+	win->w.height = ((1024 * try_crop.height) / vresize) & ~1;
+	if (win->w.height == 0)
+		win->w.height = 2;
+	if (win->w.height + win->w.top > fbuf->fmt.height) {
+		/* We made the preview window extend below the bottom of the
+		 * display, so clip it to the display boundary and resize the
+		 * cropping height to maintain the vertical resizing ratio.
+		 */
+		win->w.height = (fbuf->fmt.height - win->w.top) & ~1;
+		if (try_crop.height == 0)
+			try_crop.height = 2;
+	}
+	/* horizontal resizing */
+	hresize = (1024 * try_crop.width) / win->w.width;
+	if (cpu_is_omap24xx() && (hresize > 2048))
+		hresize = 2048;
+	else if (cpu_is_omap34xx() && (hresize > 4096))
+		hresize = 4096;
+
+	win->w.width = ((1024 * try_crop.width) / hresize) & ~1;
+	if (win->w.width == 0)
+		win->w.width = 2;
+	if (win->w.width + win->w.left > fbuf->fmt.width) {
+		/* We made the preview window extend past the right side of the
+		 * display, so clip it to the display boundary and resize the
+		 * cropping width to maintain the horizontal resizing ratio.
+		 */
+		win->w.width = (fbuf->fmt.width - win->w.left) & ~1;
+		if (try_crop.width == 0)
+			try_crop.width = 2;
+	}
+	if (cpu_is_omap24xx()) {
+		if ((try_crop.height/win->w.height) >= 2)
+			try_crop.height = win->w.height * 2;
+
+		if ((try_crop.width/win->w.width) >= 2)
+			try_crop.width = win->w.width * 2;
+
+		if (try_crop.width > 768) {
+			/* The OMAP2420 vertical resizing line buffer is
+			 * 768 pixels wide.  If the cropped image is wider
+			 * than 768 pixels then it cannot be vertically resized.
+			 */
+			if (try_crop.height != win->w.height)
+				try_crop.width = 768;
+		}
+	} else if (cpu_is_omap34xx()) {
+		if ((try_crop.height/win->w.height) >= 4)
+			try_crop.height = win->w.height * 4;
+
+		if ((try_crop.width/win->w.width) >= 4)
+			try_crop.width = win->w.width * 4;
+	}
+	/* update our cropping rectangle and we're done */
+	*crop = try_crop;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_crop);
+
+/* Given a new format in pix and fbuf,  crop and win
+ * structures are initialized to default values. crop
+ * is initialized to the largest window size that will fit on the display.  The
+ * crop window is centered in the image. win is initialized to
+ * the same size as crop and is centered on the display.
+ * All sizes and offsets are constrained to be even numbers.
+ */
+void omap_vout_new_format(struct v4l2_pix_format *pix,
+		struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
+		struct v4l2_window *win)
+{
+	/* crop defines the preview source window in the image capture
+	 * buffer
+	 */
+	omap_vout_default_crop(pix, fbuf, crop);
+
+	/* win defines the preview target window on the display */
+	win->w.width = crop->width;
+	win->w.height = crop->height;
+	win->w.left = ((fbuf->fmt.width - win->w.width) >> 1) & ~1;
+	win->w.top = ((fbuf->fmt.height - win->w.height) >> 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_format);
+
+/*
+ * Allocate buffers
+ */
+unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
+{
+	u32 order, size;
+	unsigned long virt_addr, addr;
+
+	size = PAGE_ALIGN(buf_size);
+	order = get_order(size);
+	virt_addr = __get_free_pages(GFP_KERNEL, order);
+	addr = virt_addr;
+
+	if (virt_addr) {
+		while (size > 0) {
+			SetPageReserved(virt_to_page(addr));
+			addr += PAGE_SIZE;
+			size -= PAGE_SIZE;
+		}
+	}
+	*phys_addr = (u32) virt_to_phys((void *) virt_addr);
+	return virt_addr;
+}
+
+/*
+ * Free buffers
+ */
+void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
+{
+	u32 order, size;
+	unsigned long addr = virtaddr;
+
+	size = PAGE_ALIGN(buf_size);
+	order = get_order(size);
+
+	while (size > 0) {
+		ClearPageReserved(virt_to_page(addr));
+		addr += PAGE_SIZE;
+		size -= PAGE_SIZE;
+	}
+	free_pages((unsigned long) virtaddr, order);
+}
diff --git a/drivers/media/platform/omap/omap_voutlib.h b/drivers/media/platform/omap/omap_voutlib.h
new file mode 100644
index 0000000..e51750a
--- /dev/null
+++ b/drivers/media/platform/omap/omap_voutlib.h
@@ -0,0 +1,36 @@
+/*
+ * omap_voutlib.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef OMAP_VOUTLIB_H
+#define OMAP_VOUTLIB_H
+
+void omap_vout_default_crop(struct v4l2_pix_format *pix,
+		struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop);
+
+int omap_vout_new_crop(struct v4l2_pix_format *pix,
+		struct v4l2_rect *crop, struct v4l2_window *win,
+		struct v4l2_framebuffer *fbuf,
+		const struct v4l2_rect *new_crop);
+
+int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+		struct v4l2_window *new_win);
+
+int omap_vout_new_window(struct v4l2_rect *crop,
+		struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
+		struct v4l2_window *new_win);
+
+void omap_vout_new_format(struct v4l2_pix_format *pix,
+		struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
+		struct v4l2_window *win);
+unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr);
+void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size);
+#endif	/* #ifndef OMAP_VOUTLIB_H */
+
diff --git a/drivers/media/platform/omap1_camera.c b/drivers/media/platform/omap1_camera.c
new file mode 100644
index 0000000..fa08c76
--- /dev/null
+++ b/drivers/media/platform/omap1_camera.c
@@ -0,0 +1,1723 @@
+/*
+ * V4L2 SoC Camera driver for OMAP1 Camera Interface
+ *
+ * Copyright (C) 2010, Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ *
+ * Based on V4L2 Driver for i.MXL/i.MXL camera (CSI) host
+ * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Copyright (C) 2009, Darius Augulis <augulis.darius@gmail.com>
+ *
+ * Based on PXA SoC camera driver
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * Hardware specific bits initialy based on former work by Matt Callow
+ * drivers/media/platform/omap/omap1510cam.c
+ * Copyright (C) 2006 Matt Callow
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/omap1_camera.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/videobuf-dma-contig.h>
+#include <media/videobuf-dma-sg.h>
+
+#include <plat/dma.h>
+
+
+#define DRIVER_NAME		"omap1-camera"
+#define DRIVER_VERSION		"0.0.2"
+
+
+/*
+ * ---------------------------------------------------------------------------
+ *  OMAP1 Camera Interface registers
+ * ---------------------------------------------------------------------------
+ */
+
+#define REG_CTRLCLOCK		0x00
+#define REG_IT_STATUS		0x04
+#define REG_MODE		0x08
+#define REG_STATUS		0x0C
+#define REG_CAMDATA		0x10
+#define REG_GPIO		0x14
+#define REG_PEAK_COUNTER	0x18
+
+/* CTRLCLOCK bit shifts */
+#define LCLK_EN			BIT(7)
+#define DPLL_EN			BIT(6)
+#define MCLK_EN			BIT(5)
+#define CAMEXCLK_EN		BIT(4)
+#define POLCLK			BIT(3)
+#define FOSCMOD_SHIFT		0
+#define FOSCMOD_MASK		(0x7 << FOSCMOD_SHIFT)
+#define FOSCMOD_12MHz		0x0
+#define FOSCMOD_6MHz		0x2
+#define FOSCMOD_9_6MHz		0x4
+#define FOSCMOD_24MHz		0x5
+#define FOSCMOD_8MHz		0x6
+
+/* IT_STATUS bit shifts */
+#define DATA_TRANSFER		BIT(5)
+#define FIFO_FULL		BIT(4)
+#define H_DOWN			BIT(3)
+#define H_UP			BIT(2)
+#define V_DOWN			BIT(1)
+#define V_UP			BIT(0)
+
+/* MODE bit shifts */
+#define RAZ_FIFO		BIT(18)
+#define EN_FIFO_FULL		BIT(17)
+#define EN_NIRQ			BIT(16)
+#define THRESHOLD_SHIFT		9
+#define THRESHOLD_MASK		(0x7f << THRESHOLD_SHIFT)
+#define DMA			BIT(8)
+#define EN_H_DOWN		BIT(7)
+#define EN_H_UP			BIT(6)
+#define EN_V_DOWN		BIT(5)
+#define EN_V_UP			BIT(4)
+#define ORDERCAMD		BIT(3)
+
+#define IRQ_MASK		(EN_V_UP | EN_V_DOWN | EN_H_UP | EN_H_DOWN | \
+				 EN_NIRQ | EN_FIFO_FULL)
+
+/* STATUS bit shifts */
+#define HSTATUS			BIT(1)
+#define VSTATUS			BIT(0)
+
+/* GPIO bit shifts */
+#define CAM_RST			BIT(0)
+
+/* end of OMAP1 Camera Interface registers */
+
+
+#define SOCAM_BUS_FLAGS	(V4L2_MBUS_MASTER | \
+			V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+			V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING | \
+			V4L2_MBUS_DATA_ACTIVE_HIGH)
+
+
+#define FIFO_SIZE		((THRESHOLD_MASK >> THRESHOLD_SHIFT) + 1)
+#define FIFO_SHIFT		__fls(FIFO_SIZE)
+
+#define DMA_BURST_SHIFT		(1 + OMAP_DMA_DATA_BURST_4)
+#define DMA_BURST_SIZE		(1 << DMA_BURST_SHIFT)
+
+#define DMA_ELEMENT_SHIFT	OMAP_DMA_DATA_TYPE_S32
+#define DMA_ELEMENT_SIZE	(1 << DMA_ELEMENT_SHIFT)
+
+#define DMA_FRAME_SHIFT_CONTIG	(FIFO_SHIFT - 1)
+#define DMA_FRAME_SHIFT_SG	DMA_BURST_SHIFT
+
+#define DMA_FRAME_SHIFT(x)	((x) == OMAP1_CAM_DMA_CONTIG ? \
+						DMA_FRAME_SHIFT_CONTIG : \
+						DMA_FRAME_SHIFT_SG)
+#define DMA_FRAME_SIZE(x)	(1 << DMA_FRAME_SHIFT(x))
+#define DMA_SYNC		OMAP_DMA_SYNC_FRAME
+#define THRESHOLD_LEVEL		DMA_FRAME_SIZE
+
+
+#define MAX_VIDEO_MEM		4	/* arbitrary video memory limit in MB */
+
+
+/*
+ * Structures
+ */
+
+/* buffer for one video frame */
+struct omap1_cam_buf {
+	struct videobuf_buffer		vb;
+	enum v4l2_mbus_pixelcode	code;
+	int				inwork;
+	struct scatterlist		*sgbuf;
+	int				sgcount;
+	int				bytes_left;
+	enum videobuf_state		result;
+};
+
+struct omap1_cam_dev {
+	struct soc_camera_host		soc_host;
+	struct soc_camera_device	*icd;
+	struct clk			*clk;
+
+	unsigned int			irq;
+	void __iomem			*base;
+
+	int				dma_ch;
+
+	struct omap1_cam_platform_data	*pdata;
+	struct resource			*res;
+	unsigned long			pflags;
+	unsigned long			camexclk;
+
+	struct list_head		capture;
+
+	/* lock used to protect videobuf */
+	spinlock_t			lock;
+
+	/* Pointers to DMA buffers */
+	struct omap1_cam_buf		*active;
+	struct omap1_cam_buf		*ready;
+
+	enum omap1_cam_vb_mode		vb_mode;
+	int				(*mmap_mapper)(struct videobuf_queue *q,
+						struct videobuf_buffer *buf,
+						struct vm_area_struct *vma);
+
+	u32				reg_cache[0];
+};
+
+
+static void cam_write(struct omap1_cam_dev *pcdev, u16 reg, u32 val)
+{
+	pcdev->reg_cache[reg / sizeof(u32)] = val;
+	__raw_writel(val, pcdev->base + reg);
+}
+
+static u32 cam_read(struct omap1_cam_dev *pcdev, u16 reg, bool from_cache)
+{
+	return !from_cache ? __raw_readl(pcdev->base + reg) :
+			pcdev->reg_cache[reg / sizeof(u32)];
+}
+
+#define CAM_READ(pcdev, reg) \
+		cam_read(pcdev, REG_##reg, false)
+#define CAM_WRITE(pcdev, reg, val) \
+		cam_write(pcdev, REG_##reg, val)
+#define CAM_READ_CACHE(pcdev, reg) \
+		cam_read(pcdev, REG_##reg, true)
+
+/*
+ *  Videobuf operations
+ */
+static int omap1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
+		unsigned int *size)
+{
+	struct soc_camera_device *icd = vq->priv_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct omap1_cam_dev *pcdev = ici->priv;
+
+	*size = icd->sizeimage;
+
+	if (!*count || *count < OMAP1_CAMERA_MIN_BUF_COUNT(pcdev->vb_mode))
+		*count = OMAP1_CAMERA_MIN_BUF_COUNT(pcdev->vb_mode);
+
+	if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
+		*count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
+
+	dev_dbg(icd->parent,
+			"%s: count=%d, size=%d\n", __func__, *count, *size);
+
+	return 0;
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct omap1_cam_buf *buf,
+		enum omap1_cam_vb_mode vb_mode)
+{
+	struct videobuf_buffer *vb = &buf->vb;
+
+	BUG_ON(in_interrupt());
+
+	videobuf_waiton(vq, vb, 0, 0);
+
+	if (vb_mode == OMAP1_CAM_DMA_CONTIG) {
+		videobuf_dma_contig_free(vq, vb);
+	} else {
+		struct soc_camera_device *icd = vq->priv_data;
+		struct device *dev = icd->parent;
+		struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+
+		videobuf_dma_unmap(dev, dma);
+		videobuf_dma_free(dma);
+	}
+
+	vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int omap1_videobuf_prepare(struct videobuf_queue *vq,
+		struct videobuf_buffer *vb, enum v4l2_field field)
+{
+	struct soc_camera_device *icd = vq->priv_data;
+	struct omap1_cam_buf *buf = container_of(vb, struct omap1_cam_buf, vb);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct omap1_cam_dev *pcdev = ici->priv;
+	int ret;
+
+	WARN_ON(!list_empty(&vb->queue));
+
+	BUG_ON(NULL == icd->current_fmt);
+
+	buf->inwork = 1;
+
+	if (buf->code != icd->current_fmt->code || vb->field != field ||
+			vb->width  != icd->user_width ||
+			vb->height != icd->user_height) {
+		buf->code  = icd->current_fmt->code;
+		vb->width  = icd->user_width;
+		vb->height = icd->user_height;
+		vb->field  = field;
+		vb->state  = VIDEOBUF_NEEDS_INIT;
+	}
+
+	vb->size = icd->sizeimage;
+
+	if (vb->baddr && vb->bsize < vb->size) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (vb->state == VIDEOBUF_NEEDS_INIT) {
+		ret = videobuf_iolock(vq, vb, NULL);
+		if (ret)
+			goto fail;
+
+		vb->state = VIDEOBUF_PREPARED;
+	}
+	buf->inwork = 0;
+
+	return 0;
+fail:
+	free_buffer(vq, buf, pcdev->vb_mode);
+out:
+	buf->inwork = 0;
+	return ret;
+}
+
+static void set_dma_dest_params(int dma_ch, struct omap1_cam_buf *buf,
+		enum omap1_cam_vb_mode vb_mode)
+{
+	dma_addr_t dma_addr;
+	unsigned int block_size;
+
+	if (vb_mode == OMAP1_CAM_DMA_CONTIG) {
+		dma_addr = videobuf_to_dma_contig(&buf->vb);
+		block_size = buf->vb.size;
+	} else {
+		if (WARN_ON(!buf->sgbuf)) {
+			buf->result = VIDEOBUF_ERROR;
+			return;
+		}
+		dma_addr = sg_dma_address(buf->sgbuf);
+		if (WARN_ON(!dma_addr)) {
+			buf->sgbuf = NULL;
+			buf->result = VIDEOBUF_ERROR;
+			return;
+		}
+		block_size = sg_dma_len(buf->sgbuf);
+		if (WARN_ON(!block_size)) {
+			buf->sgbuf = NULL;
+			buf->result = VIDEOBUF_ERROR;
+			return;
+		}
+		if (unlikely(buf->bytes_left < block_size))
+			block_size = buf->bytes_left;
+		if (WARN_ON(dma_addr & (DMA_FRAME_SIZE(vb_mode) *
+				DMA_ELEMENT_SIZE - 1))) {
+			dma_addr = ALIGN(dma_addr, DMA_FRAME_SIZE(vb_mode) *
+					DMA_ELEMENT_SIZE);
+			block_size &= ~(DMA_FRAME_SIZE(vb_mode) *
+					DMA_ELEMENT_SIZE - 1);
+		}
+		buf->bytes_left -= block_size;
+		buf->sgcount++;
+	}
+
+	omap_set_dma_dest_params(dma_ch,
+		OMAP_DMA_PORT_EMIFF, OMAP_DMA_AMODE_POST_INC, dma_addr, 0, 0);
+	omap_set_dma_transfer_params(dma_ch,
+		OMAP_DMA_DATA_TYPE_S32, DMA_FRAME_SIZE(vb_mode),
+		block_size >> (DMA_FRAME_SHIFT(vb_mode) + DMA_ELEMENT_SHIFT),
+		DMA_SYNC, 0, 0);
+}
+
+static struct omap1_cam_buf *prepare_next_vb(struct omap1_cam_dev *pcdev)
+{
+	struct omap1_cam_buf *buf;
+
+	/*
+	 * If there is already a buffer pointed out by the pcdev->ready,
+	 * (re)use it, otherwise try to fetch and configure a new one.
+	 */
+	buf = pcdev->ready;
+	if (!buf) {
+		if (list_empty(&pcdev->capture))
+			return buf;
+		buf = list_entry(pcdev->capture.next,
+				struct omap1_cam_buf, vb.queue);
+		buf->vb.state = VIDEOBUF_ACTIVE;
+		pcdev->ready = buf;
+		list_del_init(&buf->vb.queue);
+	}
+
+	if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+		/*
+		 * In CONTIG mode, we can safely enter next buffer parameters
+		 * into the DMA programming register set after the DMA
+		 * has already been activated on the previous buffer
+		 */
+		set_dma_dest_params(pcdev->dma_ch, buf, pcdev->vb_mode);
+	} else {
+		/*
+		 * In SG mode, the above is not safe since there are probably
+		 * a bunch of sgbufs from previous sglist still pending.
+		 * Instead, mark the sglist fresh for the upcoming
+		 * try_next_sgbuf().
+		 */
+		buf->sgbuf = NULL;
+	}
+
+	return buf;
+}
+
+static struct scatterlist *try_next_sgbuf(int dma_ch, struct omap1_cam_buf *buf)
+{
+	struct scatterlist *sgbuf;
+
+	if (likely(buf->sgbuf)) {
+		/* current sglist is active */
+		if (unlikely(!buf->bytes_left)) {
+			/* indicate sglist complete */
+			sgbuf = NULL;
+		} else {
+			/* process next sgbuf */
+			sgbuf = sg_next(buf->sgbuf);
+			if (WARN_ON(!sgbuf)) {
+				buf->result = VIDEOBUF_ERROR;
+			} else if (WARN_ON(!sg_dma_len(sgbuf))) {
+				sgbuf = NULL;
+				buf->result = VIDEOBUF_ERROR;
+			}
+		}
+		buf->sgbuf = sgbuf;
+	} else {
+		/* sglist is fresh, initialize it before using */
+		struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
+
+		sgbuf = dma->sglist;
+		if (!(WARN_ON(!sgbuf))) {
+			buf->sgbuf = sgbuf;
+			buf->sgcount = 0;
+			buf->bytes_left = buf->vb.size;
+			buf->result = VIDEOBUF_DONE;
+		}
+	}
+	if (sgbuf)
+		/*
+		 * Put our next sgbuf parameters (address, size)
+		 * into the DMA programming register set.
+		 */
+		set_dma_dest_params(dma_ch, buf, OMAP1_CAM_DMA_SG);
+
+	return sgbuf;
+}
+
+static void start_capture(struct omap1_cam_dev *pcdev)
+{
+	struct omap1_cam_buf *buf = pcdev->active;
+	u32 ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+	u32 mode = CAM_READ_CACHE(pcdev, MODE) & ~EN_V_DOWN;
+
+	if (WARN_ON(!buf))
+		return;
+
+	/*
+	 * Enable start of frame interrupt, which we will use for activating
+	 * our end of frame watchdog when capture actually starts.
+	 */
+	mode |= EN_V_UP;
+
+	if (unlikely(ctrlclock & LCLK_EN))
+		/* stop pixel clock before FIFO reset */
+		CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+	/* reset FIFO */
+	CAM_WRITE(pcdev, MODE, mode | RAZ_FIFO);
+
+	omap_start_dma(pcdev->dma_ch);
+
+	if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) {
+		/*
+		 * In SG mode, it's a good moment for fetching next sgbuf
+		 * from the current sglist and, if available, already putting
+		 * its parameters into the DMA programming register set.
+		 */
+		try_next_sgbuf(pcdev->dma_ch, buf);
+	}
+
+	/* (re)enable pixel clock */
+	CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock | LCLK_EN);
+	/* release FIFO reset */
+	CAM_WRITE(pcdev, MODE, mode);
+}
+
+static void suspend_capture(struct omap1_cam_dev *pcdev)
+{
+	u32 ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+
+	CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+	omap_stop_dma(pcdev->dma_ch);
+}
+
+static void disable_capture(struct omap1_cam_dev *pcdev)
+{
+	u32 mode = CAM_READ_CACHE(pcdev, MODE);
+
+	CAM_WRITE(pcdev, MODE, mode & ~(IRQ_MASK | DMA));
+}
+
+static void omap1_videobuf_queue(struct videobuf_queue *vq,
+						struct videobuf_buffer *vb)
+{
+	struct soc_camera_device *icd = vq->priv_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct omap1_cam_dev *pcdev = ici->priv;
+	struct omap1_cam_buf *buf;
+	u32 mode;
+
+	list_add_tail(&vb->queue, &pcdev->capture);
+	vb->state = VIDEOBUF_QUEUED;
+
+	if (pcdev->active) {
+		/*
+		 * Capture in progress, so don't touch pcdev->ready even if
+		 * empty. Since the transfer of the DMA programming register set
+		 * content to the DMA working register set is done automatically
+		 * by the DMA hardware, this can pretty well happen while we
+		 * are keeping the lock here. Leave fetching it from the queue
+		 * to be done when a next DMA interrupt occures instead.
+		 */
+		return;
+	}
+
+	WARN_ON(pcdev->ready);
+
+	buf = prepare_next_vb(pcdev);
+	if (WARN_ON(!buf))
+		return;
+
+	pcdev->active = buf;
+	pcdev->ready = NULL;
+
+	dev_dbg(icd->parent,
+		"%s: capture not active, setup FIFO, start DMA\n", __func__);
+	mode = CAM_READ_CACHE(pcdev, MODE) & ~THRESHOLD_MASK;
+	mode |= THRESHOLD_LEVEL(pcdev->vb_mode) << THRESHOLD_SHIFT;
+	CAM_WRITE(pcdev, MODE, mode | EN_FIFO_FULL | DMA);
+
+	if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) {
+		/*
+		 * In SG mode, the above prepare_next_vb() didn't actually
+		 * put anything into the DMA programming register set,
+		 * so we have to do it now, before activating DMA.
+		 */
+		try_next_sgbuf(pcdev->dma_ch, buf);
+	}
+
+	start_capture(pcdev);
+}
+
+static void omap1_videobuf_release(struct videobuf_queue *vq,
+				 struct videobuf_buffer *vb)
+{
+	struct omap1_cam_buf *buf =
+			container_of(vb, struct omap1_cam_buf, vb);
+	struct soc_camera_device *icd = vq->priv_data;
+	struct device *dev = icd->parent;
+	struct soc_camera_host *ici = to_soc_camera_host(dev);
+	struct omap1_cam_dev *pcdev = ici->priv;
+
+	switch (vb->state) {
+	case VIDEOBUF_DONE:
+		dev_dbg(dev, "%s (done)\n", __func__);
+		break;
+	case VIDEOBUF_ACTIVE:
+		dev_dbg(dev, "%s (active)\n", __func__);
+		break;
+	case VIDEOBUF_QUEUED:
+		dev_dbg(dev, "%s (queued)\n", __func__);
+		break;
+	case VIDEOBUF_PREPARED:
+		dev_dbg(dev, "%s (prepared)\n", __func__);
+		break;
+	default:
+		dev_dbg(dev, "%s (unknown %d)\n", __func__, vb->state);
+		break;
+	}
+
+	free_buffer(vq, buf, pcdev->vb_mode);
+}
+
+static void videobuf_done(struct omap1_cam_dev *pcdev,
+		enum videobuf_state result)
+{
+	struct omap1_cam_buf *buf = pcdev->active;
+	struct videobuf_buffer *vb;
+	struct device *dev = pcdev->icd->parent;
+
+	if (WARN_ON(!buf)) {
+		suspend_capture(pcdev);
+		disable_capture(pcdev);
+		return;
+	}
+
+	if (result == VIDEOBUF_ERROR)
+		suspend_capture(pcdev);
+
+	vb = &buf->vb;
+	if (waitqueue_active(&vb->done)) {
+		if (!pcdev->ready && result != VIDEOBUF_ERROR) {
+			/*
+			 * No next buffer has been entered into the DMA
+			 * programming register set on time (could be done only
+			 * while the previous DMA interurpt was processed, not
+			 * later), so the last DMA block, be it a whole buffer
+			 * if in CONTIG or its last sgbuf if in SG mode, is
+			 * about to be reused by the just autoreinitialized DMA
+			 * engine, and overwritten with next frame data. Best we
+			 * can do is stopping the capture as soon as possible,
+			 * hopefully before the next frame start.
+			 */
+			suspend_capture(pcdev);
+		}
+		vb->state = result;
+		do_gettimeofday(&vb->ts);
+		if (result != VIDEOBUF_ERROR)
+			vb->field_count++;
+		wake_up(&vb->done);
+
+		/* shift in next buffer */
+		buf = pcdev->ready;
+		pcdev->active = buf;
+		pcdev->ready = NULL;
+
+		if (!buf) {
+			/*
+			 * No next buffer was ready on time (see above), so
+			 * indicate error condition to force capture restart or
+			 * stop, depending on next buffer already queued or not.
+			 */
+			result = VIDEOBUF_ERROR;
+			prepare_next_vb(pcdev);
+
+			buf = pcdev->ready;
+			pcdev->active = buf;
+			pcdev->ready = NULL;
+		}
+	} else if (pcdev->ready) {
+		/*
+		 * In both CONTIG and SG mode, the DMA engine has possibly
+		 * been already autoreinitialized with the preprogrammed
+		 * pcdev->ready buffer.  We can either accept this fact
+		 * and just swap the buffers, or provoke an error condition
+		 * and restart capture.  The former seems less intrusive.
+		 */
+		dev_dbg(dev, "%s: nobody waiting on videobuf, swap with next\n",
+				__func__);
+		pcdev->active = pcdev->ready;
+
+		if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) {
+			/*
+			 * In SG mode, we have to make sure that the buffer we
+			 * are putting back into the pcdev->ready is marked
+			 * fresh.
+			 */
+			buf->sgbuf = NULL;
+		}
+		pcdev->ready = buf;
+
+		buf = pcdev->active;
+	} else {
+		/*
+		 * No next buffer has been entered into
+		 * the DMA programming register set on time.
+		 */
+		if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+			/*
+			 * In CONTIG mode, the DMA engine has already been
+			 * reinitialized with the current buffer. Best we can do
+			 * is not touching it.
+			 */
+			dev_dbg(dev,
+				"%s: nobody waiting on videobuf, reuse it\n",
+				__func__);
+		} else {
+			/*
+			 * In SG mode, the DMA engine has just been
+			 * autoreinitialized with the last sgbuf from the
+			 * current list. Restart capture in order to transfer
+			 * next frame start into the first sgbuf, not the last
+			 * one.
+			 */
+			if (result != VIDEOBUF_ERROR) {
+				suspend_capture(pcdev);
+				result = VIDEOBUF_ERROR;
+			}
+		}
+	}
+
+	if (!buf) {
+		dev_dbg(dev, "%s: no more videobufs, stop capture\n", __func__);
+		disable_capture(pcdev);
+		return;
+	}
+
+	if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+		/*
+		 * In CONTIG mode, the current buffer parameters had already
+		 * been entered into the DMA programming register set while the
+		 * buffer was fetched with prepare_next_vb(), they may have also
+		 * been transferred into the runtime set and already active if
+		 * the DMA still running.
+		 */
+	} else {
+		/* In SG mode, extra steps are required */
+		if (result == VIDEOBUF_ERROR)
+			/* make sure we (re)use sglist from start on error */
+			buf->sgbuf = NULL;
+
+		/*
+		 * In any case, enter the next sgbuf parameters into the DMA
+		 * programming register set.  They will be used either during
+		 * nearest DMA autoreinitialization or, in case of an error,
+		 * on DMA startup below.
+		 */
+		try_next_sgbuf(pcdev->dma_ch, buf);
+	}
+
+	if (result == VIDEOBUF_ERROR) {
+		dev_dbg(dev, "%s: videobuf error; reset FIFO, restart DMA\n",
+				__func__);
+		start_capture(pcdev);
+		/*
+		 * In SG mode, the above also resulted in the next sgbuf
+		 * parameters being entered into the DMA programming register
+		 * set, making them ready for next DMA autoreinitialization.
+		 */
+	}
+
+	/*
+	 * Finally, try fetching next buffer.
+	 * In CONTIG mode, it will also enter it into the DMA programming
+	 * register set, making it ready for next DMA autoreinitialization.
+	 */
+	prepare_next_vb(pcdev);
+}
+
+static void dma_isr(int channel, unsigned short status, void *data)
+{
+	struct omap1_cam_dev *pcdev = data;
+	struct omap1_cam_buf *buf = pcdev->active;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pcdev->lock, flags);
+
+	if (WARN_ON(!buf)) {
+		suspend_capture(pcdev);
+		disable_capture(pcdev);
+		goto out;
+	}
+
+	if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+		/*
+		 * In CONTIG mode, assume we have just managed to collect the
+		 * whole frame, hopefully before our end of frame watchdog is
+		 * triggered. Then, all we have to do is disabling the watchdog
+		 * for this frame, and calling videobuf_done() with success
+		 * indicated.
+		 */
+		CAM_WRITE(pcdev, MODE,
+				CAM_READ_CACHE(pcdev, MODE) & ~EN_V_DOWN);
+		videobuf_done(pcdev, VIDEOBUF_DONE);
+	} else {
+		/*
+		 * In SG mode, we have to process every sgbuf from the current
+		 * sglist, one after another.
+		 */
+		if (buf->sgbuf) {
+			/*
+			 * Current sglist not completed yet, try fetching next
+			 * sgbuf, hopefully putting it into the DMA programming
+			 * register set, making it ready for next DMA
+			 * autoreinitialization.
+			 */
+			try_next_sgbuf(pcdev->dma_ch, buf);
+			if (buf->sgbuf)
+				goto out;
+
+			/*
+			 * No more sgbufs left in the current sglist. This
+			 * doesn't mean that the whole videobuffer is already
+			 * complete, but only that the last sgbuf from the
+			 * current sglist is about to be filled. It will be
+			 * ready on next DMA interrupt, signalled with the
+			 * buf->sgbuf set back to NULL.
+			 */
+			if (buf->result != VIDEOBUF_ERROR) {
+				/*
+				 * Video frame collected without errors so far,
+				 * we can prepare for collecting a next one
+				 * as soon as DMA gets autoreinitialized
+				 * after the current (last) sgbuf is completed.
+				 */
+				buf = prepare_next_vb(pcdev);
+				if (!buf)
+					goto out;
+
+				try_next_sgbuf(pcdev->dma_ch, buf);
+				goto out;
+			}
+		}
+		/* end of videobuf */
+		videobuf_done(pcdev, buf->result);
+	}
+
+out:
+	spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static irqreturn_t cam_isr(int irq, void *data)
+{
+	struct omap1_cam_dev *pcdev = data;
+	struct device *dev = pcdev->icd->parent;
+	struct omap1_cam_buf *buf = pcdev->active;
+	u32 it_status;
+	unsigned long flags;
+
+	it_status = CAM_READ(pcdev, IT_STATUS);
+	if (!it_status)
+		return IRQ_NONE;
+
+	spin_lock_irqsave(&pcdev->lock, flags);
+
+	if (WARN_ON(!buf)) {
+		dev_warn(dev, "%s: unhandled camera interrupt, status == %#x\n",
+			 __func__, it_status);
+		suspend_capture(pcdev);
+		disable_capture(pcdev);
+		goto out;
+	}
+
+	if (unlikely(it_status & FIFO_FULL)) {
+		dev_warn(dev, "%s: FIFO overflow\n", __func__);
+
+	} else if (it_status & V_DOWN) {
+		/* end of video frame watchdog */
+		if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+			/*
+			 * In CONTIG mode, the watchdog is disabled with
+			 * successful DMA end of block interrupt, and reenabled
+			 * on next frame start. If we get here, there is nothing
+			 * to check, we must be out of sync.
+			 */
+		} else {
+			if (buf->sgcount == 2) {
+				/*
+				 * If exactly 2 sgbufs from the next sglist have
+				 * been programmed into the DMA engine (the
+				 * first one already transferred into the DMA
+				 * runtime register set, the second one still
+				 * in the programming set), then we are in sync.
+				 */
+				goto out;
+			}
+		}
+		dev_notice(dev, "%s: unexpected end of video frame\n",
+				__func__);
+
+	} else if (it_status & V_UP) {
+		u32 mode;
+
+		if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+			/*
+			 * In CONTIG mode, we need this interrupt every frame
+			 * in oredr to reenable our end of frame watchdog.
+			 */
+			mode = CAM_READ_CACHE(pcdev, MODE);
+		} else {
+			/*
+			 * In SG mode, the below enabled end of frame watchdog
+			 * is kept on permanently, so we can turn this one shot
+			 * setup off.
+			 */
+			mode = CAM_READ_CACHE(pcdev, MODE) & ~EN_V_UP;
+		}
+
+		if (!(mode & EN_V_DOWN)) {
+			/* (re)enable end of frame watchdog interrupt */
+			mode |= EN_V_DOWN;
+		}
+		CAM_WRITE(pcdev, MODE, mode);
+		goto out;
+
+	} else {
+		dev_warn(dev, "%s: unhandled camera interrupt, status == %#x\n",
+				__func__, it_status);
+		goto out;
+	}
+
+	videobuf_done(pcdev, VIDEOBUF_ERROR);
+out:
+	spin_unlock_irqrestore(&pcdev->lock, flags);
+	return IRQ_HANDLED;
+}
+
+static struct videobuf_queue_ops omap1_videobuf_ops = {
+	.buf_setup	= omap1_videobuf_setup,
+	.buf_prepare	= omap1_videobuf_prepare,
+	.buf_queue	= omap1_videobuf_queue,
+	.buf_release	= omap1_videobuf_release,
+};
+
+
+/*
+ * SOC Camera host operations
+ */
+
+static void sensor_reset(struct omap1_cam_dev *pcdev, bool reset)
+{
+	/* apply/release camera sensor reset if requested by platform data */
+	if (pcdev->pflags & OMAP1_CAMERA_RST_HIGH)
+		CAM_WRITE(pcdev, GPIO, reset);
+	else if (pcdev->pflags & OMAP1_CAMERA_RST_LOW)
+		CAM_WRITE(pcdev, GPIO, !reset);
+}
+
+/*
+ * The following two functions absolutely depend on the fact, that
+ * there can be only one camera on OMAP1 camera sensor interface
+ */
+static int omap1_cam_add_device(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct omap1_cam_dev *pcdev = ici->priv;
+	u32 ctrlclock;
+
+	if (pcdev->icd)
+		return -EBUSY;
+
+	clk_enable(pcdev->clk);
+
+	/* setup sensor clock */
+	ctrlclock = CAM_READ(pcdev, CTRLCLOCK);
+	ctrlclock &= ~(CAMEXCLK_EN | MCLK_EN | DPLL_EN);
+	CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+	ctrlclock &= ~FOSCMOD_MASK;
+	switch (pcdev->camexclk) {
+	case 6000000:
+		ctrlclock |= CAMEXCLK_EN | FOSCMOD_6MHz;
+		break;
+	case 8000000:
+		ctrlclock |= CAMEXCLK_EN | FOSCMOD_8MHz | DPLL_EN;
+		break;
+	case 9600000:
+		ctrlclock |= CAMEXCLK_EN | FOSCMOD_9_6MHz | DPLL_EN;
+		break;
+	case 12000000:
+		ctrlclock |= CAMEXCLK_EN | FOSCMOD_12MHz;
+		break;
+	case 24000000:
+		ctrlclock |= CAMEXCLK_EN | FOSCMOD_24MHz | DPLL_EN;
+	default:
+		break;
+	}
+	CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~DPLL_EN);
+
+	/* enable internal clock */
+	ctrlclock |= MCLK_EN;
+	CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+	sensor_reset(pcdev, false);
+
+	pcdev->icd = icd;
+
+	dev_dbg(icd->parent, "OMAP1 Camera driver attached to camera %d\n",
+			icd->devnum);
+	return 0;
+}
+
+static void omap1_cam_remove_device(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct omap1_cam_dev *pcdev = ici->priv;
+	u32 ctrlclock;
+
+	BUG_ON(icd != pcdev->icd);
+
+	suspend_capture(pcdev);
+	disable_capture(pcdev);
+
+	sensor_reset(pcdev, true);
+
+	/* disable and release system clocks */
+	ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+	ctrlclock &= ~(MCLK_EN | DPLL_EN | CAMEXCLK_EN);
+	CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+	ctrlclock = (ctrlclock & ~FOSCMOD_MASK) | FOSCMOD_12MHz;
+	CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+	CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock | MCLK_EN);
+
+	CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~MCLK_EN);
+
+	clk_disable(pcdev->clk);
+
+	pcdev->icd = NULL;
+
+	dev_dbg(icd->parent,
+		"OMAP1 Camera driver detached from camera %d\n", icd->devnum);
+}
+
+/* Duplicate standard formats based on host capability of byte swapping */
+static const struct soc_mbus_lookup omap1_cam_formats[] = {
+{
+	.code = V4L2_MBUS_FMT_UYVY8_2X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_YUYV,
+		.name			= "YUYV",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_BE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_VYUY8_2X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_YVYU,
+		.name			= "YVYU",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_BE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_YUYV8_2X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_UYVY,
+		.name			= "UYVY",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_BE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_YVYU8_2X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_VYUY,
+		.name			= "VYUY",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_BE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_RGB555,
+		.name			= "RGB555",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_BE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_RGB555X,
+		.name			= "RGB555X",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_BE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_RGB565,
+		.name			= "RGB565",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_BE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_RGB565X,
+		.name			= "RGB565X",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_BE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+},
+};
+
+static int omap1_cam_get_formats(struct soc_camera_device *icd,
+		unsigned int idx, struct soc_camera_format_xlate *xlate)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct device *dev = icd->parent;
+	int formats = 0, ret;
+	enum v4l2_mbus_pixelcode code;
+	const struct soc_mbus_pixelfmt *fmt;
+
+	ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+	if (ret < 0)
+		/* No more formats */
+		return 0;
+
+	fmt = soc_mbus_get_fmtdesc(code);
+	if (!fmt) {
+		dev_warn(dev, "%s: unsupported format code #%d: %d\n", __func__,
+				idx, code);
+		return 0;
+	}
+
+	/* Check support for the requested bits-per-sample */
+	if (fmt->bits_per_sample != 8)
+		return 0;
+
+	switch (code) {
+	case V4L2_MBUS_FMT_YUYV8_2X8:
+	case V4L2_MBUS_FMT_YVYU8_2X8:
+	case V4L2_MBUS_FMT_UYVY8_2X8:
+	case V4L2_MBUS_FMT_VYUY8_2X8:
+	case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE:
+	case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
+	case V4L2_MBUS_FMT_RGB565_2X8_BE:
+	case V4L2_MBUS_FMT_RGB565_2X8_LE:
+		formats++;
+		if (xlate) {
+			xlate->host_fmt	= soc_mbus_find_fmtdesc(code,
+						omap1_cam_formats,
+						ARRAY_SIZE(omap1_cam_formats));
+			xlate->code	= code;
+			xlate++;
+			dev_dbg(dev,
+				"%s: providing format %s as byte swapped code #%d\n",
+				__func__, xlate->host_fmt->name, code);
+		}
+	default:
+		if (xlate)
+			dev_dbg(dev,
+				"%s: providing format %s in pass-through mode\n",
+				__func__, fmt->name);
+	}
+	formats++;
+	if (xlate) {
+		xlate->host_fmt	= fmt;
+		xlate->code	= code;
+		xlate++;
+	}
+
+	return formats;
+}
+
+static bool is_dma_aligned(s32 bytes_per_line, unsigned int height,
+		enum omap1_cam_vb_mode vb_mode)
+{
+	int size = bytes_per_line * height;
+
+	return IS_ALIGNED(bytes_per_line, DMA_ELEMENT_SIZE) &&
+		IS_ALIGNED(size, DMA_FRAME_SIZE(vb_mode) * DMA_ELEMENT_SIZE);
+}
+
+static int dma_align(int *width, int *height,
+		const struct soc_mbus_pixelfmt *fmt,
+		enum omap1_cam_vb_mode vb_mode, bool enlarge)
+{
+	s32 bytes_per_line = soc_mbus_bytes_per_line(*width, fmt);
+
+	if (bytes_per_line < 0)
+		return bytes_per_line;
+
+	if (!is_dma_aligned(bytes_per_line, *height, vb_mode)) {
+		unsigned int pxalign = __fls(bytes_per_line / *width);
+		unsigned int salign  = DMA_FRAME_SHIFT(vb_mode) +
+				DMA_ELEMENT_SHIFT - pxalign;
+		unsigned int incr    = enlarge << salign;
+
+		v4l_bound_align_image(width, 1, *width + incr, 0,
+				height, 1, *height + incr, 0, salign);
+		return 0;
+	}
+	return 1;
+}
+
+#define subdev_call_with_sense(pcdev, dev, icd, sd, function, args...)		     \
+({										     \
+	struct soc_camera_sense sense = {					     \
+		.master_clock		= pcdev->camexclk,			     \
+		.pixel_clock_max	= 0,					     \
+	};									     \
+	int __ret;								     \
+										     \
+	if (pcdev->pdata)							     \
+		sense.pixel_clock_max = pcdev->pdata->lclk_khz_max * 1000;	     \
+	icd->sense = &sense;							     \
+	__ret = v4l2_subdev_call(sd, video, function, ##args);			     \
+	icd->sense = NULL;							     \
+										     \
+	if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {				     \
+		if (sense.pixel_clock > sense.pixel_clock_max) {		     \
+			dev_err(dev,						     \
+				"%s: pixel clock %lu set by the camera too high!\n", \
+				__func__, sense.pixel_clock);			     \
+			__ret = -EINVAL;					     \
+		}								     \
+	}									     \
+	__ret;									     \
+})
+
+static int set_mbus_format(struct omap1_cam_dev *pcdev, struct device *dev,
+		struct soc_camera_device *icd, struct v4l2_subdev *sd,
+		struct v4l2_mbus_framefmt *mf,
+		const struct soc_camera_format_xlate *xlate)
+{
+	s32 bytes_per_line;
+	int ret = subdev_call_with_sense(pcdev, dev, icd, sd, s_mbus_fmt, mf);
+
+	if (ret < 0) {
+		dev_err(dev, "%s: s_mbus_fmt failed\n", __func__);
+		return ret;
+	}
+
+	if (mf->code != xlate->code) {
+		dev_err(dev, "%s: unexpected pixel code change\n", __func__);
+		return -EINVAL;
+	}
+
+	bytes_per_line = soc_mbus_bytes_per_line(mf->width, xlate->host_fmt);
+	if (bytes_per_line < 0) {
+		dev_err(dev, "%s: soc_mbus_bytes_per_line() failed\n",
+				__func__);
+		return bytes_per_line;
+	}
+
+	if (!is_dma_aligned(bytes_per_line, mf->height, pcdev->vb_mode)) {
+		dev_err(dev, "%s: resulting geometry %ux%u not DMA aligned\n",
+				__func__, mf->width, mf->height);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int omap1_cam_set_crop(struct soc_camera_device *icd,
+			       struct v4l2_crop *crop)
+{
+	struct v4l2_rect *rect = &crop->c;
+	const struct soc_camera_format_xlate *xlate = icd->current_fmt;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct device *dev = icd->parent;
+	struct soc_camera_host *ici = to_soc_camera_host(dev);
+	struct omap1_cam_dev *pcdev = ici->priv;
+	struct v4l2_mbus_framefmt mf;
+	int ret;
+
+	ret = subdev_call_with_sense(pcdev, dev, icd, sd, s_crop, crop);
+	if (ret < 0) {
+		dev_warn(dev, "%s: failed to crop to %ux%u@%u:%u\n", __func__,
+			 rect->width, rect->height, rect->left, rect->top);
+		return ret;
+	}
+
+	ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+	if (ret < 0) {
+		dev_warn(dev, "%s: failed to fetch current format\n", __func__);
+		return ret;
+	}
+
+	ret = dma_align(&mf.width, &mf.height, xlate->host_fmt, pcdev->vb_mode,
+			false);
+	if (ret < 0) {
+		dev_err(dev, "%s: failed to align %ux%u %s with DMA\n",
+				__func__, mf.width, mf.height,
+				xlate->host_fmt->name);
+		return ret;
+	}
+
+	if (!ret) {
+		/* sensor returned geometry not DMA aligned, trying to fix */
+		ret = set_mbus_format(pcdev, dev, icd, sd, &mf, xlate);
+		if (ret < 0) {
+			dev_err(dev, "%s: failed to set format\n", __func__);
+			return ret;
+		}
+	}
+
+	icd->user_width	 = mf.width;
+	icd->user_height = mf.height;
+
+	return 0;
+}
+
+static int omap1_cam_set_fmt(struct soc_camera_device *icd,
+			      struct v4l2_format *f)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	const struct soc_camera_format_xlate *xlate;
+	struct device *dev = icd->parent;
+	struct soc_camera_host *ici = to_soc_camera_host(dev);
+	struct omap1_cam_dev *pcdev = ici->priv;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_mbus_framefmt mf;
+	int ret;
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+	if (!xlate) {
+		dev_warn(dev, "%s: format %#x not found\n", __func__,
+				pix->pixelformat);
+		return -EINVAL;
+	}
+
+	mf.width	= pix->width;
+	mf.height	= pix->height;
+	mf.field	= pix->field;
+	mf.colorspace	= pix->colorspace;
+	mf.code		= xlate->code;
+
+	ret = dma_align(&mf.width, &mf.height, xlate->host_fmt, pcdev->vb_mode,
+			true);
+	if (ret < 0) {
+		dev_err(dev, "%s: failed to align %ux%u %s with DMA\n",
+				__func__, pix->width, pix->height,
+				xlate->host_fmt->name);
+		return ret;
+	}
+
+	ret = set_mbus_format(pcdev, dev, icd, sd, &mf, xlate);
+	if (ret < 0) {
+		dev_err(dev, "%s: failed to set format\n", __func__);
+		return ret;
+	}
+
+	pix->width	 = mf.width;
+	pix->height	 = mf.height;
+	pix->field	 = mf.field;
+	pix->colorspace  = mf.colorspace;
+	icd->current_fmt = xlate;
+
+	return 0;
+}
+
+static int omap1_cam_try_fmt(struct soc_camera_device *icd,
+			      struct v4l2_format *f)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	const struct soc_camera_format_xlate *xlate;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_mbus_framefmt mf;
+	int ret;
+	/* TODO: limit to mx1 hardware capabilities */
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+	if (!xlate) {
+		dev_warn(icd->parent, "Format %#x not found\n",
+			 pix->pixelformat);
+		return -EINVAL;
+	}
+
+	mf.width	= pix->width;
+	mf.height	= pix->height;
+	mf.field	= pix->field;
+	mf.colorspace	= pix->colorspace;
+	mf.code		= xlate->code;
+
+	/* limit to sensor capabilities */
+	ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+	if (ret < 0)
+		return ret;
+
+	pix->width	= mf.width;
+	pix->height	= mf.height;
+	pix->field	= mf.field;
+	pix->colorspace	= mf.colorspace;
+
+	return 0;
+}
+
+static bool sg_mode;
+
+/*
+ * Local mmap_mapper wrapper,
+ * used for detecting videobuf-dma-contig buffer allocation failures
+ * and switching to videobuf-dma-sg automatically for future attempts.
+ */
+static int omap1_cam_mmap_mapper(struct videobuf_queue *q,
+				  struct videobuf_buffer *buf,
+				  struct vm_area_struct *vma)
+{
+	struct soc_camera_device *icd = q->priv_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct omap1_cam_dev *pcdev = ici->priv;
+	int ret;
+
+	ret = pcdev->mmap_mapper(q, buf, vma);
+
+	if (ret == -ENOMEM)
+		sg_mode = true;
+
+	return ret;
+}
+
+static void omap1_cam_init_videobuf(struct videobuf_queue *q,
+				     struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct omap1_cam_dev *pcdev = ici->priv;
+
+	if (!sg_mode)
+		videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops,
+				icd->parent, &pcdev->lock,
+				V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+				sizeof(struct omap1_cam_buf), icd, &icd->video_lock);
+	else
+		videobuf_queue_sg_init(q, &omap1_videobuf_ops,
+				icd->parent, &pcdev->lock,
+				V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+				sizeof(struct omap1_cam_buf), icd, &icd->video_lock);
+
+	/* use videobuf mode (auto)selected with the module parameter */
+	pcdev->vb_mode = sg_mode ? OMAP1_CAM_DMA_SG : OMAP1_CAM_DMA_CONTIG;
+
+	/*
+	 * Ensure we substitute the videobuf-dma-contig version of the
+	 * mmap_mapper() callback with our own wrapper, used for switching
+	 * automatically to videobuf-dma-sg on buffer allocation failure.
+	 */
+	if (!sg_mode && q->int_ops->mmap_mapper != omap1_cam_mmap_mapper) {
+		pcdev->mmap_mapper = q->int_ops->mmap_mapper;
+		q->int_ops->mmap_mapper = omap1_cam_mmap_mapper;
+	}
+}
+
+static int omap1_cam_reqbufs(struct soc_camera_device *icd,
+			      struct v4l2_requestbuffers *p)
+{
+	int i;
+
+	/*
+	 * This is for locking debugging only. I removed spinlocks and now I
+	 * check whether .prepare is ever called on a linked buffer, or whether
+	 * a dma IRQ can occur for an in-work or unlinked buffer. Until now
+	 * it hadn't triggered
+	 */
+	for (i = 0; i < p->count; i++) {
+		struct omap1_cam_buf *buf = container_of(icd->vb_vidq.bufs[i],
+						      struct omap1_cam_buf, vb);
+		buf->inwork = 0;
+		INIT_LIST_HEAD(&buf->vb.queue);
+	}
+
+	return 0;
+}
+
+static int omap1_cam_querycap(struct soc_camera_host *ici,
+			       struct v4l2_capability *cap)
+{
+	/* cap->name is set by the friendly caller:-> */
+	strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card));
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+	return 0;
+}
+
+static int omap1_cam_set_bus_param(struct soc_camera_device *icd)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct device *dev = icd->parent;
+	struct soc_camera_host *ici = to_soc_camera_host(dev);
+	struct omap1_cam_dev *pcdev = ici->priv;
+	u32 pixfmt = icd->current_fmt->host_fmt->fourcc;
+	const struct soc_camera_format_xlate *xlate;
+	const struct soc_mbus_pixelfmt *fmt;
+	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+	unsigned long common_flags;
+	u32 ctrlclock, mode;
+	int ret;
+
+	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+	if (!ret) {
+		common_flags = soc_mbus_config_compatible(&cfg, SOCAM_BUS_FLAGS);
+		if (!common_flags) {
+			dev_warn(dev,
+				 "Flags incompatible: camera 0x%x, host 0x%x\n",
+				 cfg.flags, SOCAM_BUS_FLAGS);
+			return -EINVAL;
+		}
+	} else if (ret != -ENOIOCTLCMD) {
+		return ret;
+	} else {
+		common_flags = SOCAM_BUS_FLAGS;
+	}
+
+	/* Make choices, possibly based on platform configuration */
+	if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+			(common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
+		if (!pcdev->pdata ||
+				pcdev->pdata->flags & OMAP1_CAMERA_LCLK_RISING)
+			common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
+		else
+			common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
+	}
+
+	cfg.flags = common_flags;
+	ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+	if (ret < 0 && ret != -ENOIOCTLCMD) {
+		dev_dbg(dev, "camera s_mbus_config(0x%lx) returned %d\n",
+			common_flags, ret);
+		return ret;
+	}
+
+	ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+	if (ctrlclock & LCLK_EN)
+		CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+
+	if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) {
+		dev_dbg(dev, "CTRLCLOCK_REG |= POLCLK\n");
+		ctrlclock |= POLCLK;
+	} else {
+		dev_dbg(dev, "CTRLCLOCK_REG &= ~POLCLK\n");
+		ctrlclock &= ~POLCLK;
+	}
+	CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+
+	if (ctrlclock & LCLK_EN)
+		CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+	/* select bus endianess */
+	xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+	fmt = xlate->host_fmt;
+
+	mode = CAM_READ(pcdev, MODE) & ~(RAZ_FIFO | IRQ_MASK | DMA);
+	if (fmt->order == SOC_MBUS_ORDER_LE) {
+		dev_dbg(dev, "MODE_REG &= ~ORDERCAMD\n");
+		CAM_WRITE(pcdev, MODE, mode & ~ORDERCAMD);
+	} else {
+		dev_dbg(dev, "MODE_REG |= ORDERCAMD\n");
+		CAM_WRITE(pcdev, MODE, mode | ORDERCAMD);
+	}
+
+	return 0;
+}
+
+static unsigned int omap1_cam_poll(struct file *file, poll_table *pt)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct omap1_cam_buf *buf;
+
+	buf = list_entry(icd->vb_vidq.stream.next, struct omap1_cam_buf,
+			 vb.stream);
+
+	poll_wait(file, &buf->vb.done, pt);
+
+	if (buf->vb.state == VIDEOBUF_DONE ||
+	    buf->vb.state == VIDEOBUF_ERROR)
+		return POLLIN | POLLRDNORM;
+
+	return 0;
+}
+
+static struct soc_camera_host_ops omap1_host_ops = {
+	.owner		= THIS_MODULE,
+	.add		= omap1_cam_add_device,
+	.remove		= omap1_cam_remove_device,
+	.get_formats	= omap1_cam_get_formats,
+	.set_crop	= omap1_cam_set_crop,
+	.set_fmt	= omap1_cam_set_fmt,
+	.try_fmt	= omap1_cam_try_fmt,
+	.init_videobuf	= omap1_cam_init_videobuf,
+	.reqbufs	= omap1_cam_reqbufs,
+	.querycap	= omap1_cam_querycap,
+	.set_bus_param	= omap1_cam_set_bus_param,
+	.poll		= omap1_cam_poll,
+};
+
+static int __init omap1_cam_probe(struct platform_device *pdev)
+{
+	struct omap1_cam_dev *pcdev;
+	struct resource *res;
+	struct clk *clk;
+	void __iomem *base;
+	unsigned int irq;
+	int err = 0;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+	if (!res || (int)irq <= 0) {
+		err = -ENODEV;
+		goto exit;
+	}
+
+	clk = clk_get(&pdev->dev, "armper_ck");
+	if (IS_ERR(clk)) {
+		err = PTR_ERR(clk);
+		goto exit;
+	}
+
+	pcdev = kzalloc(sizeof(*pcdev) + resource_size(res), GFP_KERNEL);
+	if (!pcdev) {
+		dev_err(&pdev->dev, "Could not allocate pcdev\n");
+		err = -ENOMEM;
+		goto exit_put_clk;
+	}
+
+	pcdev->res = res;
+	pcdev->clk = clk;
+
+	pcdev->pdata = pdev->dev.platform_data;
+	if (pcdev->pdata) {
+		pcdev->pflags = pcdev->pdata->flags;
+		pcdev->camexclk = pcdev->pdata->camexclk_khz * 1000;
+	}
+
+	switch (pcdev->camexclk) {
+	case 6000000:
+	case 8000000:
+	case 9600000:
+	case 12000000:
+	case 24000000:
+		break;
+	default:
+		/* pcdev->camexclk != 0 => pcdev->pdata != NULL */
+		dev_warn(&pdev->dev,
+				"Incorrect sensor clock frequency %ld kHz, "
+				"should be one of 0, 6, 8, 9.6, 12 or 24 MHz, "
+				"please correct your platform data\n",
+				pcdev->pdata->camexclk_khz);
+		pcdev->camexclk = 0;
+	case 0:
+		dev_info(&pdev->dev, "Not providing sensor clock\n");
+	}
+
+	INIT_LIST_HEAD(&pcdev->capture);
+	spin_lock_init(&pcdev->lock);
+
+	/*
+	 * Request the region.
+	 */
+	if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) {
+		err = -EBUSY;
+		goto exit_kfree;
+	}
+
+	base = ioremap(res->start, resource_size(res));
+	if (!base) {
+		err = -ENOMEM;
+		goto exit_release;
+	}
+	pcdev->irq = irq;
+	pcdev->base = base;
+
+	sensor_reset(pcdev, true);
+
+	err = omap_request_dma(OMAP_DMA_CAMERA_IF_RX, DRIVER_NAME,
+			dma_isr, (void *)pcdev, &pcdev->dma_ch);
+	if (err < 0) {
+		dev_err(&pdev->dev, "Can't request DMA for OMAP1 Camera\n");
+		err = -EBUSY;
+		goto exit_iounmap;
+	}
+	dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_ch);
+
+	/* preconfigure DMA */
+	omap_set_dma_src_params(pcdev->dma_ch, OMAP_DMA_PORT_TIPB,
+			OMAP_DMA_AMODE_CONSTANT, res->start + REG_CAMDATA,
+			0, 0);
+	omap_set_dma_dest_burst_mode(pcdev->dma_ch, OMAP_DMA_DATA_BURST_4);
+	/* setup DMA autoinitialization */
+	omap_dma_link_lch(pcdev->dma_ch, pcdev->dma_ch);
+
+	err = request_irq(pcdev->irq, cam_isr, 0, DRIVER_NAME, pcdev);
+	if (err) {
+		dev_err(&pdev->dev, "Camera interrupt register failed\n");
+		goto exit_free_dma;
+	}
+
+	pcdev->soc_host.drv_name	= DRIVER_NAME;
+	pcdev->soc_host.ops		= &omap1_host_ops;
+	pcdev->soc_host.priv		= pcdev;
+	pcdev->soc_host.v4l2_dev.dev	= &pdev->dev;
+	pcdev->soc_host.nr		= pdev->id;
+
+	err = soc_camera_host_register(&pcdev->soc_host);
+	if (err)
+		goto exit_free_irq;
+
+	dev_info(&pdev->dev, "OMAP1 Camera Interface driver loaded\n");
+
+	return 0;
+
+exit_free_irq:
+	free_irq(pcdev->irq, pcdev);
+exit_free_dma:
+	omap_free_dma(pcdev->dma_ch);
+exit_iounmap:
+	iounmap(base);
+exit_release:
+	release_mem_region(res->start, resource_size(res));
+exit_kfree:
+	kfree(pcdev);
+exit_put_clk:
+	clk_put(clk);
+exit:
+	return err;
+}
+
+static int __exit omap1_cam_remove(struct platform_device *pdev)
+{
+	struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+	struct omap1_cam_dev *pcdev = container_of(soc_host,
+					struct omap1_cam_dev, soc_host);
+	struct resource *res;
+
+	free_irq(pcdev->irq, pcdev);
+
+	omap_free_dma(pcdev->dma_ch);
+
+	soc_camera_host_unregister(soc_host);
+
+	iounmap(pcdev->base);
+
+	res = pcdev->res;
+	release_mem_region(res->start, resource_size(res));
+
+	clk_put(pcdev->clk);
+
+	kfree(pcdev);
+
+	dev_info(&pdev->dev, "OMAP1 Camera Interface driver unloaded\n");
+
+	return 0;
+}
+
+static struct platform_driver omap1_cam_driver = {
+	.driver		= {
+		.name	= DRIVER_NAME,
+	},
+	.probe		= omap1_cam_probe,
+	.remove		= __exit_p(omap1_cam_remove),
+};
+
+module_platform_driver(omap1_cam_driver);
+
+module_param(sg_mode, bool, 0644);
+MODULE_PARM_DESC(sg_mode, "videobuf mode, 0: dma-contig (default), 1: dma-sg");
+
+MODULE_DESCRIPTION("OMAP1 Camera Interface driver");
+MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/platform/omap24xxcam-dma.c b/drivers/media/platform/omap24xxcam-dma.c
new file mode 100644
index 0000000..9c00776
--- /dev/null
+++ b/drivers/media/platform/omap24xxcam-dma.c
@@ -0,0 +1,601 @@
+/*
+ * drivers/media/platform/omap24xxcam-dma.c
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2004 Texas Instruments.
+ * Copyright (C) 2007 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
+ *
+ * Based on code from Andy Lowe <source@mvista.com> and
+ *                    David Cohen <david.cohen@indt.org.br>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+
+#include "omap24xxcam.h"
+
+/*
+ *
+ * DMA hardware.
+ *
+ */
+
+/* Ack all interrupt on CSR and IRQSTATUS_L0 */
+static void omap24xxcam_dmahw_ack_all(void __iomem *base)
+{
+	u32 csr;
+	int i;
+
+	for (i = 0; i < NUM_CAMDMA_CHANNELS; ++i) {
+		csr = omap24xxcam_reg_in(base, CAMDMA_CSR(i));
+		/* ack interrupt in CSR */
+		omap24xxcam_reg_out(base, CAMDMA_CSR(i), csr);
+	}
+	omap24xxcam_reg_out(base, CAMDMA_IRQSTATUS_L0, 0xf);
+}
+
+/* Ack dmach on CSR and IRQSTATUS_L0 */
+static u32 omap24xxcam_dmahw_ack_ch(void __iomem *base, int dmach)
+{
+	u32 csr;
+
+	csr = omap24xxcam_reg_in(base, CAMDMA_CSR(dmach));
+	/* ack interrupt in CSR */
+	omap24xxcam_reg_out(base, CAMDMA_CSR(dmach), csr);
+	/* ack interrupt in IRQSTATUS */
+	omap24xxcam_reg_out(base, CAMDMA_IRQSTATUS_L0, (1 << dmach));
+
+	return csr;
+}
+
+static int omap24xxcam_dmahw_running(void __iomem *base, int dmach)
+{
+	return omap24xxcam_reg_in(base, CAMDMA_CCR(dmach)) & CAMDMA_CCR_ENABLE;
+}
+
+static void omap24xxcam_dmahw_transfer_setup(void __iomem *base, int dmach,
+					     dma_addr_t start, u32 len)
+{
+	omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
+			    CAMDMA_CCR_SEL_SRC_DST_SYNC
+			    | CAMDMA_CCR_BS
+			    | CAMDMA_CCR_DST_AMODE_POST_INC
+			    | CAMDMA_CCR_SRC_AMODE_POST_INC
+			    | CAMDMA_CCR_FS
+			    | CAMDMA_CCR_WR_ACTIVE
+			    | CAMDMA_CCR_RD_ACTIVE
+			    | CAMDMA_CCR_SYNCHRO_CAMERA);
+	omap24xxcam_reg_out(base, CAMDMA_CLNK_CTRL(dmach), 0);
+	omap24xxcam_reg_out(base, CAMDMA_CEN(dmach), len);
+	omap24xxcam_reg_out(base, CAMDMA_CFN(dmach), 1);
+	omap24xxcam_reg_out(base, CAMDMA_CSDP(dmach),
+			    CAMDMA_CSDP_WRITE_MODE_POSTED
+			    | CAMDMA_CSDP_DST_BURST_EN_32
+			    | CAMDMA_CSDP_DST_PACKED
+			    | CAMDMA_CSDP_SRC_BURST_EN_32
+			    | CAMDMA_CSDP_SRC_PACKED
+			    | CAMDMA_CSDP_DATA_TYPE_8BITS);
+	omap24xxcam_reg_out(base, CAMDMA_CSSA(dmach), 0);
+	omap24xxcam_reg_out(base, CAMDMA_CDSA(dmach), start);
+	omap24xxcam_reg_out(base, CAMDMA_CSEI(dmach), 0);
+	omap24xxcam_reg_out(base, CAMDMA_CSFI(dmach), DMA_THRESHOLD);
+	omap24xxcam_reg_out(base, CAMDMA_CDEI(dmach), 0);
+	omap24xxcam_reg_out(base, CAMDMA_CDFI(dmach), 0);
+	omap24xxcam_reg_out(base, CAMDMA_CSR(dmach),
+			    CAMDMA_CSR_MISALIGNED_ERR
+			    | CAMDMA_CSR_SECURE_ERR
+			    | CAMDMA_CSR_TRANS_ERR
+			    | CAMDMA_CSR_BLOCK
+			    | CAMDMA_CSR_DROP);
+	omap24xxcam_reg_out(base, CAMDMA_CICR(dmach),
+			    CAMDMA_CICR_MISALIGNED_ERR_IE
+			    | CAMDMA_CICR_SECURE_ERR_IE
+			    | CAMDMA_CICR_TRANS_ERR_IE
+			    | CAMDMA_CICR_BLOCK_IE
+			    | CAMDMA_CICR_DROP_IE);
+}
+
+static void omap24xxcam_dmahw_transfer_start(void __iomem *base, int dmach)
+{
+	omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
+			    CAMDMA_CCR_SEL_SRC_DST_SYNC
+			    | CAMDMA_CCR_BS
+			    | CAMDMA_CCR_DST_AMODE_POST_INC
+			    | CAMDMA_CCR_SRC_AMODE_POST_INC
+			    | CAMDMA_CCR_ENABLE
+			    | CAMDMA_CCR_FS
+			    | CAMDMA_CCR_SYNCHRO_CAMERA);
+}
+
+static void omap24xxcam_dmahw_transfer_chain(void __iomem *base, int dmach,
+					     int free_dmach)
+{
+	int prev_dmach, ch;
+
+	if (dmach == 0)
+		prev_dmach = NUM_CAMDMA_CHANNELS - 1;
+	else
+		prev_dmach = dmach - 1;
+	omap24xxcam_reg_out(base, CAMDMA_CLNK_CTRL(prev_dmach),
+			    CAMDMA_CLNK_CTRL_ENABLE_LNK | dmach);
+	/* Did we chain the DMA transfer before the previous one
+	 * finished?
+	 */
+	ch = (dmach + free_dmach) % NUM_CAMDMA_CHANNELS;
+	while (!(omap24xxcam_reg_in(base, CAMDMA_CCR(ch))
+		 & CAMDMA_CCR_ENABLE)) {
+		if (ch == dmach) {
+			/* The previous transfer has ended and this one
+			 * hasn't started, so we must not have chained
+			 * to the previous one in time.  We'll have to
+			 * start it now.
+			 */
+			omap24xxcam_dmahw_transfer_start(base, dmach);
+			break;
+		} else
+			ch = (ch + 1) % NUM_CAMDMA_CHANNELS;
+	}
+}
+
+/* Abort all chained DMA transfers. After all transfers have been
+ * aborted and the DMA controller is idle, the completion routines for
+ * any aborted transfers will be called in sequence. The DMA
+ * controller may not be idle after this routine completes, because
+ * the completion routines might start new transfers.
+ */
+static void omap24xxcam_dmahw_abort_ch(void __iomem *base, int dmach)
+{
+	/* mask all interrupts from this channel */
+	omap24xxcam_reg_out(base, CAMDMA_CICR(dmach), 0);
+	/* unlink this channel */
+	omap24xxcam_reg_merge(base, CAMDMA_CLNK_CTRL(dmach), 0,
+			      CAMDMA_CLNK_CTRL_ENABLE_LNK);
+	/* disable this channel */
+	omap24xxcam_reg_merge(base, CAMDMA_CCR(dmach), 0, CAMDMA_CCR_ENABLE);
+}
+
+static void omap24xxcam_dmahw_init(void __iomem *base)
+{
+	omap24xxcam_reg_out(base, CAMDMA_OCP_SYSCONFIG,
+			    CAMDMA_OCP_SYSCONFIG_MIDLEMODE_FSTANDBY
+			    | CAMDMA_OCP_SYSCONFIG_SIDLEMODE_FIDLE
+			    | CAMDMA_OCP_SYSCONFIG_AUTOIDLE);
+
+	omap24xxcam_reg_merge(base, CAMDMA_GCR, 0x10,
+			      CAMDMA_GCR_MAX_CHANNEL_FIFO_DEPTH);
+
+	omap24xxcam_reg_out(base, CAMDMA_IRQENABLE_L0, 0xf);
+}
+
+/*
+ *
+ * Individual DMA channel handling.
+ *
+ */
+
+/* Start a DMA transfer from the camera to memory.
+ * Returns zero if the transfer was successfully started, or non-zero if all
+ * DMA channels are already in use or starting is currently inhibited.
+ */
+static int omap24xxcam_dma_start(struct omap24xxcam_dma *dma, dma_addr_t start,
+				 u32 len, dma_callback_t callback, void *arg)
+{
+	unsigned long flags;
+	int dmach;
+
+	spin_lock_irqsave(&dma->lock, flags);
+
+	if (!dma->free_dmach || atomic_read(&dma->dma_stop)) {
+		spin_unlock_irqrestore(&dma->lock, flags);
+		return -EBUSY;
+	}
+
+	dmach = dma->next_dmach;
+
+	dma->ch_state[dmach].callback = callback;
+	dma->ch_state[dmach].arg = arg;
+
+	omap24xxcam_dmahw_transfer_setup(dma->base, dmach, start, len);
+
+	/* We're ready to start the DMA transfer. */
+
+	if (dma->free_dmach < NUM_CAMDMA_CHANNELS) {
+		/* A transfer is already in progress, so try to chain to it. */
+		omap24xxcam_dmahw_transfer_chain(dma->base, dmach,
+						 dma->free_dmach);
+	} else {
+		/* No transfer is in progress, so we'll just start this one
+		 * now.
+		 */
+		omap24xxcam_dmahw_transfer_start(dma->base, dmach);
+	}
+
+	dma->next_dmach = (dma->next_dmach + 1) % NUM_CAMDMA_CHANNELS;
+	dma->free_dmach--;
+
+	spin_unlock_irqrestore(&dma->lock, flags);
+
+	return 0;
+}
+
+/* Abort all chained DMA transfers. After all transfers have been
+ * aborted and the DMA controller is idle, the completion routines for
+ * any aborted transfers will be called in sequence. The DMA
+ * controller may not be idle after this routine completes, because
+ * the completion routines might start new transfers.
+ */
+static void omap24xxcam_dma_abort(struct omap24xxcam_dma *dma, u32 csr)
+{
+	unsigned long flags;
+	int dmach, i, free_dmach;
+	dma_callback_t callback;
+	void *arg;
+
+	spin_lock_irqsave(&dma->lock, flags);
+
+	/* stop any DMA transfers in progress */
+	dmach = (dma->next_dmach + dma->free_dmach) % NUM_CAMDMA_CHANNELS;
+	for (i = 0; i < NUM_CAMDMA_CHANNELS; i++) {
+		omap24xxcam_dmahw_abort_ch(dma->base, dmach);
+		dmach = (dmach + 1) % NUM_CAMDMA_CHANNELS;
+	}
+
+	/* We have to be careful here because the callback routine
+	 * might start a new DMA transfer, and we only want to abort
+	 * transfers that were started before this routine was called.
+	 */
+	free_dmach = dma->free_dmach;
+	while ((dma->free_dmach < NUM_CAMDMA_CHANNELS) &&
+	       (free_dmach < NUM_CAMDMA_CHANNELS)) {
+		dmach = (dma->next_dmach + dma->free_dmach)
+			% NUM_CAMDMA_CHANNELS;
+		callback = dma->ch_state[dmach].callback;
+		arg = dma->ch_state[dmach].arg;
+		dma->free_dmach++;
+		free_dmach++;
+		if (callback) {
+			/* leave interrupts disabled during callback */
+			spin_unlock(&dma->lock);
+			(*callback) (dma, csr, arg);
+			spin_lock(&dma->lock);
+		}
+	}
+
+	spin_unlock_irqrestore(&dma->lock, flags);
+}
+
+/* Abort all chained DMA transfers. After all transfers have been
+ * aborted and the DMA controller is idle, the completion routines for
+ * any aborted transfers will be called in sequence. If the completion
+ * routines attempt to start a new DMA transfer it will fail, so the
+ * DMA controller will be idle after this routine completes.
+ */
+static void omap24xxcam_dma_stop(struct omap24xxcam_dma *dma, u32 csr)
+{
+	atomic_inc(&dma->dma_stop);
+	omap24xxcam_dma_abort(dma, csr);
+	atomic_dec(&dma->dma_stop);
+}
+
+/* Camera DMA interrupt service routine. */
+void omap24xxcam_dma_isr(struct omap24xxcam_dma *dma)
+{
+	int dmach;
+	dma_callback_t callback;
+	void *arg;
+	u32 csr;
+	const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
+		| CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
+		| CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
+
+	spin_lock(&dma->lock);
+
+	if (dma->free_dmach == NUM_CAMDMA_CHANNELS) {
+		/* A camera DMA interrupt occurred while all channels
+		 * are idle, so we'll acknowledge the interrupt in the
+		 * IRQSTATUS register and exit.
+		 */
+		omap24xxcam_dmahw_ack_all(dma->base);
+		spin_unlock(&dma->lock);
+		return;
+	}
+
+	while (dma->free_dmach < NUM_CAMDMA_CHANNELS) {
+		dmach = (dma->next_dmach + dma->free_dmach)
+			% NUM_CAMDMA_CHANNELS;
+		if (omap24xxcam_dmahw_running(dma->base, dmach)) {
+			/* This buffer hasn't finished yet, so we're done. */
+			break;
+		}
+		csr = omap24xxcam_dmahw_ack_ch(dma->base, dmach);
+		if (csr & csr_error) {
+			/* A DMA error occurred, so stop all DMA
+			 * transfers in progress.
+			 */
+			spin_unlock(&dma->lock);
+			omap24xxcam_dma_stop(dma, csr);
+			return;
+		} else {
+			callback = dma->ch_state[dmach].callback;
+			arg = dma->ch_state[dmach].arg;
+			dma->free_dmach++;
+			if (callback) {
+				spin_unlock(&dma->lock);
+				(*callback) (dma, csr, arg);
+				spin_lock(&dma->lock);
+			}
+		}
+	}
+
+	spin_unlock(&dma->lock);
+
+	omap24xxcam_sgdma_process(
+		container_of(dma, struct omap24xxcam_sgdma, dma));
+}
+
+void omap24xxcam_dma_hwinit(struct omap24xxcam_dma *dma)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dma->lock, flags);
+
+	omap24xxcam_dmahw_init(dma->base);
+
+	spin_unlock_irqrestore(&dma->lock, flags);
+}
+
+static void omap24xxcam_dma_init(struct omap24xxcam_dma *dma,
+				 void __iomem *base)
+{
+	int ch;
+
+	/* group all channels on DMA IRQ0 and unmask irq */
+	spin_lock_init(&dma->lock);
+	dma->base = base;
+	dma->free_dmach = NUM_CAMDMA_CHANNELS;
+	dma->next_dmach = 0;
+	for (ch = 0; ch < NUM_CAMDMA_CHANNELS; ch++) {
+		dma->ch_state[ch].callback = NULL;
+		dma->ch_state[ch].arg = NULL;
+	}
+}
+
+/*
+ *
+ * Scatter-gather DMA.
+ *
+ * High-level DMA construct for transferring whole picture frames to
+ * memory that is discontinuous.
+ *
+ */
+
+/* DMA completion routine for the scatter-gather DMA fragments. */
+static void omap24xxcam_sgdma_callback(struct omap24xxcam_dma *dma, u32 csr,
+				       void *arg)
+{
+	struct omap24xxcam_sgdma *sgdma =
+		container_of(dma, struct omap24xxcam_sgdma, dma);
+	int sgslot = (int)arg;
+	struct sgdma_state *sg_state;
+	const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
+		| CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
+		| CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
+
+	spin_lock(&sgdma->lock);
+
+	/* We got an interrupt, we can remove the timer */
+	del_timer(&sgdma->reset_timer);
+
+	sg_state = sgdma->sg_state + sgslot;
+	if (!sg_state->queued_sglist) {
+		spin_unlock(&sgdma->lock);
+		printk(KERN_ERR "%s: sgdma completed when none queued!\n",
+		       __func__);
+		return;
+	}
+
+	sg_state->csr |= csr;
+	if (!--sg_state->queued_sglist) {
+		/* Queue for this sglist is empty, so check to see if we're
+		 * done.
+		 */
+		if ((sg_state->next_sglist == sg_state->sglen)
+		    || (sg_state->csr & csr_error)) {
+			sgdma_callback_t callback = sg_state->callback;
+			void *arg = sg_state->arg;
+			u32 sg_csr = sg_state->csr;
+			/* All done with this sglist */
+			sgdma->free_sgdma++;
+			if (callback) {
+				spin_unlock(&sgdma->lock);
+				(*callback) (sgdma, sg_csr, arg);
+				return;
+			}
+		}
+	}
+
+	spin_unlock(&sgdma->lock);
+}
+
+/* Start queued scatter-gather DMA transfers. */
+void omap24xxcam_sgdma_process(struct omap24xxcam_sgdma *sgdma)
+{
+	unsigned long flags;
+	int queued_sgdma, sgslot;
+	struct sgdma_state *sg_state;
+	const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
+		| CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
+		| CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
+
+	spin_lock_irqsave(&sgdma->lock, flags);
+
+	queued_sgdma = NUM_SG_DMA - sgdma->free_sgdma;
+	sgslot = (sgdma->next_sgdma + sgdma->free_sgdma) % NUM_SG_DMA;
+	while (queued_sgdma > 0) {
+		sg_state = sgdma->sg_state + sgslot;
+		while ((sg_state->next_sglist < sg_state->sglen) &&
+		       !(sg_state->csr & csr_error)) {
+			const struct scatterlist *sglist;
+			unsigned int len;
+
+			sglist = sg_state->sglist + sg_state->next_sglist;
+			/* try to start the next DMA transfer */
+			if (sg_state->next_sglist + 1 == sg_state->sglen) {
+				/*
+				 *  On the last sg, we handle the case where
+				 *  cam->img.pix.sizeimage % PAGE_ALIGN != 0
+				 */
+				len = sg_state->len - sg_state->bytes_read;
+			} else {
+				len = sg_dma_len(sglist);
+			}
+
+			if (omap24xxcam_dma_start(&sgdma->dma,
+						  sg_dma_address(sglist),
+						  len,
+						  omap24xxcam_sgdma_callback,
+						  (void *)sgslot)) {
+				/* DMA start failed */
+				spin_unlock_irqrestore(&sgdma->lock, flags);
+				return;
+			} else {
+				unsigned long expires;
+				/* DMA start was successful */
+				sg_state->next_sglist++;
+				sg_state->bytes_read += len;
+				sg_state->queued_sglist++;
+
+				/* We start the reset timer */
+				expires = jiffies + HZ;
+				mod_timer(&sgdma->reset_timer, expires);
+			}
+		}
+		queued_sgdma--;
+		sgslot = (sgslot + 1) % NUM_SG_DMA;
+	}
+
+	spin_unlock_irqrestore(&sgdma->lock, flags);
+}
+
+/*
+ * Queue a scatter-gather DMA transfer from the camera to memory.
+ * Returns zero if the transfer was successfully queued, or non-zero
+ * if all of the scatter-gather slots are already in use.
+ */
+int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
+			    const struct scatterlist *sglist, int sglen,
+			    int len, sgdma_callback_t callback, void *arg)
+{
+	unsigned long flags;
+	struct sgdma_state *sg_state;
+
+	if ((sglen < 0) || ((sglen > 0) && !sglist))
+		return -EINVAL;
+
+	spin_lock_irqsave(&sgdma->lock, flags);
+
+	if (!sgdma->free_sgdma) {
+		spin_unlock_irqrestore(&sgdma->lock, flags);
+		return -EBUSY;
+	}
+
+	sg_state = sgdma->sg_state + sgdma->next_sgdma;
+
+	sg_state->sglist = sglist;
+	sg_state->sglen = sglen;
+	sg_state->next_sglist = 0;
+	sg_state->bytes_read = 0;
+	sg_state->len = len;
+	sg_state->queued_sglist = 0;
+	sg_state->csr = 0;
+	sg_state->callback = callback;
+	sg_state->arg = arg;
+
+	sgdma->next_sgdma = (sgdma->next_sgdma + 1) % NUM_SG_DMA;
+	sgdma->free_sgdma--;
+
+	spin_unlock_irqrestore(&sgdma->lock, flags);
+
+	omap24xxcam_sgdma_process(sgdma);
+
+	return 0;
+}
+
+/* Sync scatter-gather DMA by aborting any DMA transfers currently in progress.
+ * Any queued scatter-gather DMA transactions that have not yet been started
+ * will remain queued.  The DMA controller will be idle after this routine
+ * completes.  When the scatter-gather queue is restarted, the next
+ * scatter-gather DMA transfer will begin at the start of a new transaction.
+ */
+void omap24xxcam_sgdma_sync(struct omap24xxcam_sgdma *sgdma)
+{
+	unsigned long flags;
+	int sgslot;
+	struct sgdma_state *sg_state;
+	u32 csr = CAMDMA_CSR_TRANS_ERR;
+
+	/* stop any DMA transfers in progress */
+	omap24xxcam_dma_stop(&sgdma->dma, csr);
+
+	spin_lock_irqsave(&sgdma->lock, flags);
+
+	if (sgdma->free_sgdma < NUM_SG_DMA) {
+		sgslot = (sgdma->next_sgdma + sgdma->free_sgdma) % NUM_SG_DMA;
+		sg_state = sgdma->sg_state + sgslot;
+		if (sg_state->next_sglist != 0) {
+			/* This DMA transfer was in progress, so abort it. */
+			sgdma_callback_t callback = sg_state->callback;
+			void *arg = sg_state->arg;
+			sgdma->free_sgdma++;
+			if (callback) {
+				/* leave interrupts masked */
+				spin_unlock(&sgdma->lock);
+				(*callback) (sgdma, csr, arg);
+				spin_lock(&sgdma->lock);
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&sgdma->lock, flags);
+}
+
+void omap24xxcam_sgdma_init(struct omap24xxcam_sgdma *sgdma,
+			    void __iomem *base,
+			    void (*reset_callback)(unsigned long data),
+			    unsigned long reset_callback_data)
+{
+	int sg;
+
+	spin_lock_init(&sgdma->lock);
+	sgdma->free_sgdma = NUM_SG_DMA;
+	sgdma->next_sgdma = 0;
+	for (sg = 0; sg < NUM_SG_DMA; sg++) {
+		sgdma->sg_state[sg].sglen = 0;
+		sgdma->sg_state[sg].next_sglist = 0;
+		sgdma->sg_state[sg].bytes_read = 0;
+		sgdma->sg_state[sg].queued_sglist = 0;
+		sgdma->sg_state[sg].csr = 0;
+		sgdma->sg_state[sg].callback = NULL;
+		sgdma->sg_state[sg].arg = NULL;
+	}
+
+	omap24xxcam_dma_init(&sgdma->dma, base);
+	setup_timer(&sgdma->reset_timer, reset_callback, reset_callback_data);
+}
diff --git a/drivers/media/platform/omap24xxcam.c b/drivers/media/platform/omap24xxcam.c
new file mode 100644
index 0000000..fde2e66
--- /dev/null
+++ b/drivers/media/platform/omap24xxcam.c
@@ -0,0 +1,1881 @@
+/*
+ * drivers/media/platform/omap24xxcam.c
+ *
+ * OMAP 2 camera block driver.
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2004 Texas Instruments.
+ * Copyright (C) 2007-2008 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
+ *
+ * Based on code from Andy Lowe <source@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/videodev2.h>
+#include <linux/pci.h>		/* needed for videobufs */
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+
+#include "omap24xxcam.h"
+
+#define OMAP24XXCAM_VERSION "0.0.1"
+
+#define RESET_TIMEOUT_NS 10000
+
+static void omap24xxcam_reset(struct omap24xxcam_device *cam);
+static int omap24xxcam_sensor_if_enable(struct omap24xxcam_device *cam);
+static void omap24xxcam_device_unregister(struct v4l2_int_device *s);
+static int omap24xxcam_remove(struct platform_device *pdev);
+
+/* module parameters */
+static int video_nr = -1;	/* video device minor (-1 ==> auto assign) */
+/*
+ * Maximum amount of memory to use for capture buffers.
+ * Default is 4800KB, enough to double-buffer SXGA.
+ */
+static int capture_mem = 1280 * 960 * 2 * 2;
+
+static struct v4l2_int_device omap24xxcam;
+
+/*
+ *
+ * Clocks.
+ *
+ */
+
+static void omap24xxcam_clock_put(struct omap24xxcam_device *cam)
+{
+	if (cam->ick != NULL && !IS_ERR(cam->ick))
+		clk_put(cam->ick);
+	if (cam->fck != NULL && !IS_ERR(cam->fck))
+		clk_put(cam->fck);
+
+	cam->ick = cam->fck = NULL;
+}
+
+static int omap24xxcam_clock_get(struct omap24xxcam_device *cam)
+{
+	int rval = 0;
+
+	cam->fck = clk_get(cam->dev, "fck");
+	if (IS_ERR(cam->fck)) {
+		dev_err(cam->dev, "can't get camera fck");
+		rval = PTR_ERR(cam->fck);
+		omap24xxcam_clock_put(cam);
+		return rval;
+	}
+
+	cam->ick = clk_get(cam->dev, "ick");
+	if (IS_ERR(cam->ick)) {
+		dev_err(cam->dev, "can't get camera ick");
+		rval = PTR_ERR(cam->ick);
+		omap24xxcam_clock_put(cam);
+	}
+
+	return rval;
+}
+
+static void omap24xxcam_clock_on(struct omap24xxcam_device *cam)
+{
+	clk_enable(cam->fck);
+	clk_enable(cam->ick);
+}
+
+static void omap24xxcam_clock_off(struct omap24xxcam_device *cam)
+{
+	clk_disable(cam->fck);
+	clk_disable(cam->ick);
+}
+
+/*
+ *
+ * Camera core
+ *
+ */
+
+/*
+ * Set xclk.
+ *
+ * To disable xclk, use value zero.
+ */
+static void omap24xxcam_core_xclk_set(const struct omap24xxcam_device *cam,
+				      u32 xclk)
+{
+	if (xclk) {
+		u32 divisor = CAM_MCLK / xclk;
+
+		if (divisor == 1)
+			omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
+					    CC_CTRL_XCLK,
+					    CC_CTRL_XCLK_DIV_BYPASS);
+		else
+			omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
+					    CC_CTRL_XCLK, divisor);
+	} else
+		omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
+				    CC_CTRL_XCLK, CC_CTRL_XCLK_DIV_STABLE_LOW);
+}
+
+static void omap24xxcam_core_hwinit(const struct omap24xxcam_device *cam)
+{
+	/*
+	 * Setting the camera core AUTOIDLE bit causes problems with frame
+	 * synchronization, so we will clear the AUTOIDLE bit instead.
+	 */
+	omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_SYSCONFIG,
+			    CC_SYSCONFIG_AUTOIDLE);
+
+	/* program the camera interface DMA packet size */
+	omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL_DMA,
+			    CC_CTRL_DMA_EN | (DMA_THRESHOLD / 4 - 1));
+
+	/* enable camera core error interrupts */
+	omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_IRQENABLE,
+			    CC_IRQENABLE_FW_ERR_IRQ
+			    | CC_IRQENABLE_FSC_ERR_IRQ
+			    | CC_IRQENABLE_SSC_ERR_IRQ
+			    | CC_IRQENABLE_FIFO_OF_IRQ);
+}
+
+/*
+ * Enable the camera core.
+ *
+ * Data transfer to the camera DMA starts from next starting frame.
+ */
+static void omap24xxcam_core_enable(const struct omap24xxcam_device *cam)
+{
+
+	omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL,
+			    cam->cc_ctrl);
+}
+
+/*
+ * Disable camera core.
+ *
+ * The data transfer will be stopped immediately (CC_CTRL_CC_RST). The
+ * core internal state machines will be reset. Use
+ * CC_CTRL_CC_FRAME_TRIG instead if you want to transfer the current
+ * frame completely.
+ */
+static void omap24xxcam_core_disable(const struct omap24xxcam_device *cam)
+{
+	omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL,
+			    CC_CTRL_CC_RST);
+}
+
+/* Interrupt service routine for camera core interrupts. */
+static void omap24xxcam_core_isr(struct omap24xxcam_device *cam)
+{
+	u32 cc_irqstatus;
+	const u32 cc_irqstatus_err =
+		CC_IRQSTATUS_FW_ERR_IRQ
+		| CC_IRQSTATUS_FSC_ERR_IRQ
+		| CC_IRQSTATUS_SSC_ERR_IRQ
+		| CC_IRQSTATUS_FIFO_UF_IRQ
+		| CC_IRQSTATUS_FIFO_OF_IRQ;
+
+	cc_irqstatus = omap24xxcam_reg_in(cam->mmio_base + CC_REG_OFFSET,
+					  CC_IRQSTATUS);
+	omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_IRQSTATUS,
+			    cc_irqstatus);
+
+	if (cc_irqstatus & cc_irqstatus_err
+	    && !atomic_read(&cam->in_reset)) {
+		dev_dbg(cam->dev, "resetting camera, cc_irqstatus 0x%x\n",
+			cc_irqstatus);
+		omap24xxcam_reset(cam);
+	}
+}
+
+/*
+ *
+ * videobuf_buffer handling.
+ *
+ * Memory for mmapped videobuf_buffers is not allocated
+ * conventionally, but by several kmalloc allocations and then
+ * creating the scatterlist on our own. User-space buffers are handled
+ * normally.
+ *
+ */
+
+/*
+ * Free the memory-mapped buffer memory allocated for a
+ * videobuf_buffer and the associated scatterlist.
+ */
+static void omap24xxcam_vbq_free_mmap_buffer(struct videobuf_buffer *vb)
+{
+	struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+	size_t alloc_size;
+	struct page *page;
+	int i;
+
+	if (dma->sglist == NULL)
+		return;
+
+	i = dma->sglen;
+	while (i) {
+		i--;
+		alloc_size = sg_dma_len(&dma->sglist[i]);
+		page = sg_page(&dma->sglist[i]);
+		do {
+			ClearPageReserved(page++);
+		} while (alloc_size -= PAGE_SIZE);
+		__free_pages(sg_page(&dma->sglist[i]),
+			     get_order(sg_dma_len(&dma->sglist[i])));
+	}
+
+	kfree(dma->sglist);
+	dma->sglist = NULL;
+}
+
+/* Release all memory related to the videobuf_queue. */
+static void omap24xxcam_vbq_free_mmap_buffers(struct videobuf_queue *vbq)
+{
+	int i;
+
+	mutex_lock(&vbq->vb_lock);
+
+	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+		if (NULL == vbq->bufs[i])
+			continue;
+		if (V4L2_MEMORY_MMAP != vbq->bufs[i]->memory)
+			continue;
+		vbq->ops->buf_release(vbq, vbq->bufs[i]);
+		omap24xxcam_vbq_free_mmap_buffer(vbq->bufs[i]);
+		kfree(vbq->bufs[i]);
+		vbq->bufs[i] = NULL;
+	}
+
+	mutex_unlock(&vbq->vb_lock);
+
+	videobuf_mmap_free(vbq);
+}
+
+/*
+ * Allocate physically as contiguous as possible buffer for video
+ * frame and allocate and build DMA scatter-gather list for it.
+ */
+static int omap24xxcam_vbq_alloc_mmap_buffer(struct videobuf_buffer *vb)
+{
+	unsigned int order;
+	size_t alloc_size, size = vb->bsize; /* vb->bsize is page aligned */
+	struct page *page;
+	int max_pages, err = 0, i = 0;
+	struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+
+	/*
+	 * allocate maximum size scatter-gather list. Note this is
+	 * overhead. We may not use as many entries as we allocate
+	 */
+	max_pages = vb->bsize >> PAGE_SHIFT;
+	dma->sglist = kcalloc(max_pages, sizeof(*dma->sglist), GFP_KERNEL);
+	if (dma->sglist == NULL) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	while (size) {
+		order = get_order(size);
+		/*
+		 * do not over-allocate even if we would get larger
+		 * contiguous chunk that way
+		 */
+		if ((PAGE_SIZE << order) > size)
+			order--;
+
+		/* try to allocate as many contiguous pages as possible */
+		page = alloc_pages(GFP_KERNEL, order);
+		/* if allocation fails, try to allocate smaller amount */
+		while (page == NULL) {
+			order--;
+			page = alloc_pages(GFP_KERNEL, order);
+			if (page == NULL && !order) {
+				err = -ENOMEM;
+				goto out;
+			}
+		}
+		size -= (PAGE_SIZE << order);
+
+		/* append allocated chunk of pages into scatter-gather list */
+		sg_set_page(&dma->sglist[i], page, PAGE_SIZE << order, 0);
+		dma->sglen++;
+		i++;
+
+		alloc_size = (PAGE_SIZE << order);
+
+		/* clear pages before giving them to user space */
+		memset(page_address(page), 0, alloc_size);
+
+		/* mark allocated pages reserved */
+		do {
+			SetPageReserved(page++);
+		} while (alloc_size -= PAGE_SIZE);
+	}
+	/*
+	 * REVISIT: not fully correct to assign nr_pages == sglen but
+	 * video-buf is passing nr_pages for e.g. unmap_sg calls
+	 */
+	dma->nr_pages = dma->sglen;
+	dma->direction = PCI_DMA_FROMDEVICE;
+
+	return 0;
+
+out:
+	omap24xxcam_vbq_free_mmap_buffer(vb);
+	return err;
+}
+
+static int omap24xxcam_vbq_alloc_mmap_buffers(struct videobuf_queue *vbq,
+					      unsigned int count)
+{
+	int i, err = 0;
+	struct omap24xxcam_fh *fh =
+		container_of(vbq, struct omap24xxcam_fh, vbq);
+
+	mutex_lock(&vbq->vb_lock);
+
+	for (i = 0; i < count; i++) {
+		err = omap24xxcam_vbq_alloc_mmap_buffer(vbq->bufs[i]);
+		if (err)
+			goto out;
+		dev_dbg(fh->cam->dev, "sglen is %d for buffer %d\n",
+			videobuf_to_dma(vbq->bufs[i])->sglen, i);
+	}
+
+	mutex_unlock(&vbq->vb_lock);
+
+	return 0;
+out:
+	while (i) {
+		i--;
+		omap24xxcam_vbq_free_mmap_buffer(vbq->bufs[i]);
+	}
+
+	mutex_unlock(&vbq->vb_lock);
+
+	return err;
+}
+
+/*
+ * This routine is called from interrupt context when a scatter-gather DMA
+ * transfer of a videobuf_buffer completes.
+ */
+static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
+				     u32 csr, void *arg)
+{
+	struct omap24xxcam_device *cam =
+		container_of(sgdma, struct omap24xxcam_device, sgdma);
+	struct omap24xxcam_fh *fh = cam->streaming->private_data;
+	struct videobuf_buffer *vb = (struct videobuf_buffer *)arg;
+	const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
+		| CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
+		| CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
+	if (--cam->sgdma_in_queue == 0)
+		omap24xxcam_core_disable(cam);
+	spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
+
+	do_gettimeofday(&vb->ts);
+	vb->field_count = atomic_add_return(2, &fh->field_count);
+	if (csr & csr_error) {
+		vb->state = VIDEOBUF_ERROR;
+		if (!atomic_read(&fh->cam->in_reset)) {
+			dev_dbg(cam->dev, "resetting camera, csr 0x%x\n", csr);
+			omap24xxcam_reset(cam);
+		}
+	} else
+		vb->state = VIDEOBUF_DONE;
+	wake_up(&vb->done);
+}
+
+static void omap24xxcam_vbq_release(struct videobuf_queue *vbq,
+				    struct videobuf_buffer *vb)
+{
+	struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+
+	/* wait for buffer, especially to get out of the sgdma queue */
+	videobuf_waiton(vbq, vb, 0, 0);
+	if (vb->memory == V4L2_MEMORY_MMAP) {
+		dma_unmap_sg(vbq->dev, dma->sglist, dma->sglen,
+			     dma->direction);
+		dma->direction = DMA_NONE;
+	} else {
+		videobuf_dma_unmap(vbq->dev, videobuf_to_dma(vb));
+		videobuf_dma_free(videobuf_to_dma(vb));
+	}
+
+	vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+/*
+ * Limit the number of available kernel image capture buffers based on the
+ * number requested, the currently selected image size, and the maximum
+ * amount of memory permitted for kernel capture buffers.
+ */
+static int omap24xxcam_vbq_setup(struct videobuf_queue *vbq, unsigned int *cnt,
+				 unsigned int *size)
+{
+	struct omap24xxcam_fh *fh = vbq->priv_data;
+
+	if (*cnt <= 0)
+		*cnt = VIDEO_MAX_FRAME;	/* supply a default number of buffers */
+
+	if (*cnt > VIDEO_MAX_FRAME)
+		*cnt = VIDEO_MAX_FRAME;
+
+	*size = fh->pix.sizeimage;
+
+	/* accessing fh->cam->capture_mem is ok, it's constant */
+	if (*size * *cnt > fh->cam->capture_mem)
+		*cnt = fh->cam->capture_mem / *size;
+
+	return 0;
+}
+
+static int omap24xxcam_dma_iolock(struct videobuf_queue *vbq,
+				  struct videobuf_dmabuf *dma)
+{
+	int err = 0;
+
+	dma->direction = PCI_DMA_FROMDEVICE;
+	if (!dma_map_sg(vbq->dev, dma->sglist, dma->sglen, dma->direction)) {
+		kfree(dma->sglist);
+		dma->sglist = NULL;
+		dma->sglen = 0;
+		err = -EIO;
+	}
+
+	return err;
+}
+
+static int omap24xxcam_vbq_prepare(struct videobuf_queue *vbq,
+				   struct videobuf_buffer *vb,
+				   enum v4l2_field field)
+{
+	struct omap24xxcam_fh *fh = vbq->priv_data;
+	int err = 0;
+
+	/*
+	 * Accessing pix here is okay since it's constant while
+	 * streaming is on (and we only get called then).
+	 */
+	if (vb->baddr) {
+		/* This is a userspace buffer. */
+		if (fh->pix.sizeimage > vb->bsize) {
+			/* The buffer isn't big enough. */
+			err = -EINVAL;
+		} else
+			vb->size = fh->pix.sizeimage;
+	} else {
+		if (vb->state != VIDEOBUF_NEEDS_INIT) {
+			/*
+			 * We have a kernel bounce buffer that has
+			 * already been allocated.
+			 */
+			if (fh->pix.sizeimage > vb->size) {
+				/*
+				 * The image size has been changed to
+				 * a larger size since this buffer was
+				 * allocated, so we need to free and
+				 * reallocate it.
+				 */
+				omap24xxcam_vbq_release(vbq, vb);
+				vb->size = fh->pix.sizeimage;
+			}
+		} else {
+			/* We need to allocate a new kernel bounce buffer. */
+			vb->size = fh->pix.sizeimage;
+		}
+	}
+
+	if (err)
+		return err;
+
+	vb->width = fh->pix.width;
+	vb->height = fh->pix.height;
+	vb->field = field;
+
+	if (vb->state == VIDEOBUF_NEEDS_INIT) {
+		if (vb->memory == V4L2_MEMORY_MMAP)
+			/*
+			 * we have built the scatter-gather list by ourself so
+			 * do the scatter-gather mapping as well
+			 */
+			err = omap24xxcam_dma_iolock(vbq, videobuf_to_dma(vb));
+		else
+			err = videobuf_iolock(vbq, vb, NULL);
+	}
+
+	if (!err)
+		vb->state = VIDEOBUF_PREPARED;
+	else
+		omap24xxcam_vbq_release(vbq, vb);
+
+	return err;
+}
+
+static void omap24xxcam_vbq_queue(struct videobuf_queue *vbq,
+				  struct videobuf_buffer *vb)
+{
+	struct omap24xxcam_fh *fh = vbq->priv_data;
+	struct omap24xxcam_device *cam = fh->cam;
+	enum videobuf_state state = vb->state;
+	unsigned long flags;
+	int err;
+
+	/*
+	 * FIXME: We're marking the buffer active since we have no
+	 * pretty way of marking it active exactly when the
+	 * scatter-gather transfer starts.
+	 */
+	vb->state = VIDEOBUF_ACTIVE;
+
+	err = omap24xxcam_sgdma_queue(&fh->cam->sgdma,
+				      videobuf_to_dma(vb)->sglist,
+				      videobuf_to_dma(vb)->sglen, vb->size,
+				      omap24xxcam_vbq_complete, vb);
+
+	if (!err) {
+		spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
+		if (++cam->sgdma_in_queue == 1
+		    && !atomic_read(&cam->in_reset))
+			omap24xxcam_core_enable(cam);
+		spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
+	} else {
+		/*
+		 * Oops. We're not supposed to get any errors here.
+		 * The only way we could get an error is if we ran out
+		 * of scatter-gather DMA slots, but we are supposed to
+		 * have at least as many scatter-gather DMA slots as
+		 * video buffers so that can't happen.
+		 */
+		dev_err(cam->dev, "failed to queue a video buffer for dma!\n");
+		dev_err(cam->dev, "likely a bug in the driver!\n");
+		vb->state = state;
+	}
+}
+
+static struct videobuf_queue_ops omap24xxcam_vbq_ops = {
+	.buf_setup   = omap24xxcam_vbq_setup,
+	.buf_prepare = omap24xxcam_vbq_prepare,
+	.buf_queue   = omap24xxcam_vbq_queue,
+	.buf_release = omap24xxcam_vbq_release,
+};
+
+/*
+ *
+ * OMAP main camera system
+ *
+ */
+
+/*
+ * Reset camera block to power-on state.
+ */
+static void omap24xxcam_poweron_reset(struct omap24xxcam_device *cam)
+{
+	int max_loop = RESET_TIMEOUT_NS;
+
+	/* Reset whole camera subsystem */
+	omap24xxcam_reg_out(cam->mmio_base,
+			    CAM_SYSCONFIG,
+			    CAM_SYSCONFIG_SOFTRESET);
+
+	/* Wait till it's finished */
+	while (!(omap24xxcam_reg_in(cam->mmio_base, CAM_SYSSTATUS)
+		 & CAM_SYSSTATUS_RESETDONE)
+	       && --max_loop) {
+		ndelay(1);
+	}
+
+	if (!(omap24xxcam_reg_in(cam->mmio_base, CAM_SYSSTATUS)
+	      & CAM_SYSSTATUS_RESETDONE))
+		dev_err(cam->dev, "camera soft reset timeout\n");
+}
+
+/*
+ * (Re)initialise the camera block.
+ */
+static void omap24xxcam_hwinit(struct omap24xxcam_device *cam)
+{
+	omap24xxcam_poweron_reset(cam);
+
+	/* set the camera subsystem autoidle bit */
+	omap24xxcam_reg_out(cam->mmio_base, CAM_SYSCONFIG,
+			    CAM_SYSCONFIG_AUTOIDLE);
+
+	/* set the camera MMU autoidle bit */
+	omap24xxcam_reg_out(cam->mmio_base,
+			    CAMMMU_REG_OFFSET + CAMMMU_SYSCONFIG,
+			    CAMMMU_SYSCONFIG_AUTOIDLE);
+
+	omap24xxcam_core_hwinit(cam);
+
+	omap24xxcam_dma_hwinit(&cam->sgdma.dma);
+}
+
+/*
+ * Callback for dma transfer stalling.
+ */
+static void omap24xxcam_stalled_dma_reset(unsigned long data)
+{
+	struct omap24xxcam_device *cam = (struct omap24xxcam_device *)data;
+
+	if (!atomic_read(&cam->in_reset)) {
+		dev_dbg(cam->dev, "dma stalled, resetting camera\n");
+		omap24xxcam_reset(cam);
+	}
+}
+
+/*
+ * Stop capture. Mark we're doing a reset, stop DMA transfers and
+ * core. (No new scatter-gather transfers will be queued whilst
+ * in_reset is non-zero.)
+ *
+ * If omap24xxcam_capture_stop is called from several places at
+ * once, only the first call will have an effect. Similarly, the last
+ * call omap24xxcam_streaming_cont will have effect.
+ *
+ * Serialisation is ensured by using cam->core_enable_disable_lock.
+ */
+static void omap24xxcam_capture_stop(struct omap24xxcam_device *cam)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
+
+	if (atomic_inc_return(&cam->in_reset) != 1) {
+		spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
+		return;
+	}
+
+	omap24xxcam_core_disable(cam);
+
+	spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
+
+	omap24xxcam_sgdma_sync(&cam->sgdma);
+}
+
+/*
+ * Reset and continue streaming.
+ *
+ * Note: Resetting the camera FIFO via the CC_RST bit in the CC_CTRL
+ * register is supposed to be sufficient to recover from a camera
+ * interface error, but it doesn't seem to be enough. If we only do
+ * that then subsequent image captures are out of sync by either one
+ * or two times DMA_THRESHOLD bytes. Resetting and re-initializing the
+ * entire camera subsystem prevents the problem with frame
+ * synchronization.
+ */
+static void omap24xxcam_capture_cont(struct omap24xxcam_device *cam)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
+
+	if (atomic_read(&cam->in_reset) != 1)
+		goto out;
+
+	omap24xxcam_hwinit(cam);
+
+	omap24xxcam_sensor_if_enable(cam);
+
+	omap24xxcam_sgdma_process(&cam->sgdma);
+
+	if (cam->sgdma_in_queue)
+		omap24xxcam_core_enable(cam);
+
+out:
+	atomic_dec(&cam->in_reset);
+	spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
+}
+
+static ssize_t
+omap24xxcam_streaming_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct omap24xxcam_device *cam = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%s\n", cam->streaming ?  "active" : "inactive");
+}
+static DEVICE_ATTR(streaming, S_IRUGO, omap24xxcam_streaming_show, NULL);
+
+/*
+ * Stop capture and restart it. I.e. reset the camera during use.
+ */
+static void omap24xxcam_reset(struct omap24xxcam_device *cam)
+{
+	omap24xxcam_capture_stop(cam);
+	omap24xxcam_capture_cont(cam);
+}
+
+/*
+ * The main interrupt handler.
+ */
+static irqreturn_t omap24xxcam_isr(int irq, void *arg)
+{
+	struct omap24xxcam_device *cam = (struct omap24xxcam_device *)arg;
+	u32 irqstatus;
+	unsigned int irqhandled = 0;
+
+	irqstatus = omap24xxcam_reg_in(cam->mmio_base, CAM_IRQSTATUS);
+
+	if (irqstatus &
+	    (CAM_IRQSTATUS_DMA_IRQ2 | CAM_IRQSTATUS_DMA_IRQ1
+	     | CAM_IRQSTATUS_DMA_IRQ0)) {
+		omap24xxcam_dma_isr(&cam->sgdma.dma);
+		irqhandled = 1;
+	}
+	if (irqstatus & CAM_IRQSTATUS_CC_IRQ) {
+		omap24xxcam_core_isr(cam);
+		irqhandled = 1;
+	}
+	if (irqstatus & CAM_IRQSTATUS_MMU_IRQ)
+		dev_err(cam->dev, "unhandled camera MMU interrupt!\n");
+
+	return IRQ_RETVAL(irqhandled);
+}
+
+/*
+ *
+ * Sensor handling.
+ *
+ */
+
+/*
+ * Enable the external sensor interface. Try to negotiate interface
+ * parameters with the sensor and start using the new ones. The calls
+ * to sensor_if_enable and sensor_if_disable need not to be balanced.
+ */
+static int omap24xxcam_sensor_if_enable(struct omap24xxcam_device *cam)
+{
+	int rval;
+	struct v4l2_ifparm p;
+
+	rval = vidioc_int_g_ifparm(cam->sdev, &p);
+	if (rval) {
+		dev_err(cam->dev, "vidioc_int_g_ifparm failed with %d\n", rval);
+		return rval;
+	}
+
+	cam->if_type = p.if_type;
+
+	cam->cc_ctrl = CC_CTRL_CC_EN;
+
+	switch (p.if_type) {
+	case V4L2_IF_TYPE_BT656:
+		if (p.u.bt656.frame_start_on_rising_vs)
+			cam->cc_ctrl |= CC_CTRL_NOBT_SYNCHRO;
+		if (p.u.bt656.bt_sync_correct)
+			cam->cc_ctrl |= CC_CTRL_BT_CORRECT;
+		if (p.u.bt656.swap)
+			cam->cc_ctrl |= CC_CTRL_PAR_ORDERCAM;
+		if (p.u.bt656.latch_clk_inv)
+			cam->cc_ctrl |= CC_CTRL_PAR_CLK_POL;
+		if (p.u.bt656.nobt_hs_inv)
+			cam->cc_ctrl |= CC_CTRL_NOBT_HS_POL;
+		if (p.u.bt656.nobt_vs_inv)
+			cam->cc_ctrl |= CC_CTRL_NOBT_VS_POL;
+
+		switch (p.u.bt656.mode) {
+		case V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT:
+			cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT8;
+			break;
+		case V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT:
+			cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT10;
+			break;
+		case V4L2_IF_TYPE_BT656_MODE_NOBT_12BIT:
+			cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT12;
+			break;
+		case V4L2_IF_TYPE_BT656_MODE_BT_8BIT:
+			cam->cc_ctrl |= CC_CTRL_PAR_MODE_BT8;
+			break;
+		case V4L2_IF_TYPE_BT656_MODE_BT_10BIT:
+			cam->cc_ctrl |= CC_CTRL_PAR_MODE_BT10;
+			break;
+		default:
+			dev_err(cam->dev,
+				"bt656 interface mode %d not supported\n",
+				p.u.bt656.mode);
+			return -EINVAL;
+		}
+		/*
+		 * The clock rate that the sensor wants has changed.
+		 * We have to adjust the xclk from OMAP 2 side to
+		 * match the sensor's wish as closely as possible.
+		 */
+		if (p.u.bt656.clock_curr != cam->if_u.bt656.xclk) {
+			u32 xclk = p.u.bt656.clock_curr;
+			u32 divisor;
+
+			if (xclk == 0)
+				return -EINVAL;
+
+			if (xclk > CAM_MCLK)
+				xclk = CAM_MCLK;
+
+			divisor = CAM_MCLK / xclk;
+			if (divisor * xclk < CAM_MCLK)
+				divisor++;
+			if (CAM_MCLK / divisor < p.u.bt656.clock_min
+			    && divisor > 1)
+				divisor--;
+			if (divisor > 30)
+				divisor = 30;
+
+			xclk = CAM_MCLK / divisor;
+
+			if (xclk < p.u.bt656.clock_min
+			    || xclk > p.u.bt656.clock_max)
+				return -EINVAL;
+
+			cam->if_u.bt656.xclk = xclk;
+		}
+		omap24xxcam_core_xclk_set(cam, cam->if_u.bt656.xclk);
+		break;
+	default:
+		/* FIXME: how about other interfaces? */
+		dev_err(cam->dev, "interface type %d not supported\n",
+			p.if_type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void omap24xxcam_sensor_if_disable(const struct omap24xxcam_device *cam)
+{
+	switch (cam->if_type) {
+	case V4L2_IF_TYPE_BT656:
+		omap24xxcam_core_xclk_set(cam, 0);
+		break;
+	}
+}
+
+/*
+ * Initialise the sensor hardware.
+ */
+static int omap24xxcam_sensor_init(struct omap24xxcam_device *cam)
+{
+	int err = 0;
+	struct v4l2_int_device *sdev = cam->sdev;
+
+	omap24xxcam_clock_on(cam);
+	err = omap24xxcam_sensor_if_enable(cam);
+	if (err) {
+		dev_err(cam->dev, "sensor interface could not be enabled at "
+			"initialisation, %d\n", err);
+		cam->sdev = NULL;
+		goto out;
+	}
+
+	/* power up sensor during sensor initialization */
+	vidioc_int_s_power(sdev, 1);
+
+	err = vidioc_int_dev_init(sdev);
+	if (err) {
+		dev_err(cam->dev, "cannot initialize sensor, error %d\n", err);
+		/* Sensor init failed --- it's nonexistent to us! */
+		cam->sdev = NULL;
+		goto out;
+	}
+
+	dev_info(cam->dev, "sensor is %s\n", sdev->name);
+
+out:
+	omap24xxcam_sensor_if_disable(cam);
+	omap24xxcam_clock_off(cam);
+
+	vidioc_int_s_power(sdev, 0);
+
+	return err;
+}
+
+static void omap24xxcam_sensor_exit(struct omap24xxcam_device *cam)
+{
+	if (cam->sdev)
+		vidioc_int_dev_exit(cam->sdev);
+}
+
+static void omap24xxcam_sensor_disable(struct omap24xxcam_device *cam)
+{
+	omap24xxcam_sensor_if_disable(cam);
+	omap24xxcam_clock_off(cam);
+	vidioc_int_s_power(cam->sdev, 0);
+}
+
+/*
+ * Power-up and configure camera sensor. It's ready for capturing now.
+ */
+static int omap24xxcam_sensor_enable(struct omap24xxcam_device *cam)
+{
+	int rval;
+
+	omap24xxcam_clock_on(cam);
+
+	omap24xxcam_sensor_if_enable(cam);
+
+	rval = vidioc_int_s_power(cam->sdev, 1);
+	if (rval)
+		goto out;
+
+	rval = vidioc_int_init(cam->sdev);
+	if (rval)
+		goto out;
+
+	return 0;
+
+out:
+	omap24xxcam_sensor_disable(cam);
+
+	return rval;
+}
+
+static void omap24xxcam_sensor_reset_work(struct work_struct *work)
+{
+	struct omap24xxcam_device *cam =
+		container_of(work, struct omap24xxcam_device,
+			     sensor_reset_work);
+
+	if (atomic_read(&cam->reset_disable))
+		return;
+
+	omap24xxcam_capture_stop(cam);
+
+	if (vidioc_int_reset(cam->sdev) == 0) {
+		vidioc_int_init(cam->sdev);
+	} else {
+		/* Can't reset it by vidioc_int_reset. */
+		omap24xxcam_sensor_disable(cam);
+		omap24xxcam_sensor_enable(cam);
+	}
+
+	omap24xxcam_capture_cont(cam);
+}
+
+/*
+ *
+ * IOCTL interface.
+ *
+ */
+
+static int vidioc_querycap(struct file *file, void *fh,
+			   struct v4l2_capability *cap)
+{
+	struct omap24xxcam_fh *ofh = fh;
+	struct omap24xxcam_device *cam = ofh->cam;
+
+	strlcpy(cap->driver, CAM_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, cam->vfd->name, sizeof(cap->card));
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+	return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *fh,
+				   struct v4l2_fmtdesc *f)
+{
+	struct omap24xxcam_fh *ofh = fh;
+	struct omap24xxcam_device *cam = ofh->cam;
+	int rval;
+
+	rval = vidioc_int_enum_fmt_cap(cam->sdev, f);
+
+	return rval;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *fh,
+				struct v4l2_format *f)
+{
+	struct omap24xxcam_fh *ofh = fh;
+	struct omap24xxcam_device *cam = ofh->cam;
+	int rval;
+
+	mutex_lock(&cam->mutex);
+	rval = vidioc_int_g_fmt_cap(cam->sdev, f);
+	mutex_unlock(&cam->mutex);
+
+	return rval;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *fh,
+				struct v4l2_format *f)
+{
+	struct omap24xxcam_fh *ofh = fh;
+	struct omap24xxcam_device *cam = ofh->cam;
+	int rval;
+
+	mutex_lock(&cam->mutex);
+	if (cam->streaming) {
+		rval = -EBUSY;
+		goto out;
+	}
+
+	rval = vidioc_int_s_fmt_cap(cam->sdev, f);
+
+out:
+	mutex_unlock(&cam->mutex);
+
+	if (!rval) {
+		mutex_lock(&ofh->vbq.vb_lock);
+		ofh->pix = f->fmt.pix;
+		mutex_unlock(&ofh->vbq.vb_lock);
+	}
+
+	memset(f, 0, sizeof(*f));
+	vidioc_g_fmt_vid_cap(file, fh, f);
+
+	return rval;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *fh,
+				  struct v4l2_format *f)
+{
+	struct omap24xxcam_fh *ofh = fh;
+	struct omap24xxcam_device *cam = ofh->cam;
+	int rval;
+
+	mutex_lock(&cam->mutex);
+	rval = vidioc_int_try_fmt_cap(cam->sdev, f);
+	mutex_unlock(&cam->mutex);
+
+	return rval;
+}
+
+static int vidioc_reqbufs(struct file *file, void *fh,
+			  struct v4l2_requestbuffers *b)
+{
+	struct omap24xxcam_fh *ofh = fh;
+	struct omap24xxcam_device *cam = ofh->cam;
+	int rval;
+
+	mutex_lock(&cam->mutex);
+	if (cam->streaming) {
+		mutex_unlock(&cam->mutex);
+		return -EBUSY;
+	}
+
+	omap24xxcam_vbq_free_mmap_buffers(&ofh->vbq);
+	mutex_unlock(&cam->mutex);
+
+	rval = videobuf_reqbufs(&ofh->vbq, b);
+
+	/*
+	 * Either videobuf_reqbufs failed or the buffers are not
+	 * memory-mapped (which would need special attention).
+	 */
+	if (rval < 0 || b->memory != V4L2_MEMORY_MMAP)
+		goto out;
+
+	rval = omap24xxcam_vbq_alloc_mmap_buffers(&ofh->vbq, rval);
+	if (rval)
+		omap24xxcam_vbq_free_mmap_buffers(&ofh->vbq);
+
+out:
+	return rval;
+}
+
+static int vidioc_querybuf(struct file *file, void *fh,
+			   struct v4l2_buffer *b)
+{
+	struct omap24xxcam_fh *ofh = fh;
+
+	return videobuf_querybuf(&ofh->vbq, b);
+}
+
+static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+	struct omap24xxcam_fh *ofh = fh;
+
+	return videobuf_qbuf(&ofh->vbq, b);
+}
+
+static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+	struct omap24xxcam_fh *ofh = fh;
+	struct omap24xxcam_device *cam = ofh->cam;
+	struct videobuf_buffer *vb;
+	int rval;
+
+videobuf_dqbuf_again:
+	rval = videobuf_dqbuf(&ofh->vbq, b, file->f_flags & O_NONBLOCK);
+	if (rval)
+		goto out;
+
+	vb = ofh->vbq.bufs[b->index];
+
+	mutex_lock(&cam->mutex);
+	/* _needs_reset returns -EIO if reset is required. */
+	rval = vidioc_int_g_needs_reset(cam->sdev, (void *)vb->baddr);
+	mutex_unlock(&cam->mutex);
+	if (rval == -EIO)
+		schedule_work(&cam->sensor_reset_work);
+	else
+		rval = 0;
+
+out:
+	/*
+	 * This is a hack. We don't want to show -EIO to the user
+	 * space. Requeue the buffer and try again if we're not doing
+	 * this in non-blocking mode.
+	 */
+	if (rval == -EIO) {
+		videobuf_qbuf(&ofh->vbq, b);
+		if (!(file->f_flags & O_NONBLOCK))
+			goto videobuf_dqbuf_again;
+		/*
+		 * We don't have a videobuf_buffer now --- maybe next
+		 * time...
+		 */
+		rval = -EAGAIN;
+	}
+
+	return rval;
+}
+
+static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+	struct omap24xxcam_fh *ofh = fh;
+	struct omap24xxcam_device *cam = ofh->cam;
+	int rval;
+
+	mutex_lock(&cam->mutex);
+	if (cam->streaming) {
+		rval = -EBUSY;
+		goto out;
+	}
+
+	rval = omap24xxcam_sensor_if_enable(cam);
+	if (rval) {
+		dev_dbg(cam->dev, "vidioc_int_g_ifparm failed\n");
+		goto out;
+	}
+
+	rval = videobuf_streamon(&ofh->vbq);
+	if (!rval) {
+		cam->streaming = file;
+		sysfs_notify(&cam->dev->kobj, NULL, "streaming");
+	}
+
+out:
+	mutex_unlock(&cam->mutex);
+
+	return rval;
+}
+
+static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+	struct omap24xxcam_fh *ofh = fh;
+	struct omap24xxcam_device *cam = ofh->cam;
+	struct videobuf_queue *q = &ofh->vbq;
+	int rval;
+
+	atomic_inc(&cam->reset_disable);
+
+	flush_work_sync(&cam->sensor_reset_work);
+
+	rval = videobuf_streamoff(q);
+	if (!rval) {
+		mutex_lock(&cam->mutex);
+		cam->streaming = NULL;
+		mutex_unlock(&cam->mutex);
+		sysfs_notify(&cam->dev->kobj, NULL, "streaming");
+	}
+
+	atomic_dec(&cam->reset_disable);
+
+	return rval;
+}
+
+static int vidioc_enum_input(struct file *file, void *fh,
+			     struct v4l2_input *inp)
+{
+	if (inp->index > 0)
+		return -EINVAL;
+
+	strlcpy(inp->name, "camera", sizeof(inp->name));
+	inp->type = V4L2_INPUT_TYPE_CAMERA;
+
+	return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
+{
+	*i = 0;
+
+	return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *fh, unsigned int i)
+{
+	if (i > 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int vidioc_queryctrl(struct file *file, void *fh,
+			    struct v4l2_queryctrl *a)
+{
+	struct omap24xxcam_fh *ofh = fh;
+	struct omap24xxcam_device *cam = ofh->cam;
+	int rval;
+
+	rval = vidioc_int_queryctrl(cam->sdev, a);
+
+	return rval;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *fh,
+			 struct v4l2_control *a)
+{
+	struct omap24xxcam_fh *ofh = fh;
+	struct omap24xxcam_device *cam = ofh->cam;
+	int rval;
+
+	mutex_lock(&cam->mutex);
+	rval = vidioc_int_g_ctrl(cam->sdev, a);
+	mutex_unlock(&cam->mutex);
+
+	return rval;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *fh,
+			 struct v4l2_control *a)
+{
+	struct omap24xxcam_fh *ofh = fh;
+	struct omap24xxcam_device *cam = ofh->cam;
+	int rval;
+
+	mutex_lock(&cam->mutex);
+	rval = vidioc_int_s_ctrl(cam->sdev, a);
+	mutex_unlock(&cam->mutex);
+
+	return rval;
+}
+
+static int vidioc_g_parm(struct file *file, void *fh,
+			 struct v4l2_streamparm *a) {
+	struct omap24xxcam_fh *ofh = fh;
+	struct omap24xxcam_device *cam = ofh->cam;
+	int rval;
+
+	mutex_lock(&cam->mutex);
+	rval = vidioc_int_g_parm(cam->sdev, a);
+	mutex_unlock(&cam->mutex);
+
+	return rval;
+}
+
+static int vidioc_s_parm(struct file *file, void *fh,
+			 struct v4l2_streamparm *a)
+{
+	struct omap24xxcam_fh *ofh = fh;
+	struct omap24xxcam_device *cam = ofh->cam;
+	struct v4l2_streamparm old_streamparm;
+	int rval;
+
+	mutex_lock(&cam->mutex);
+	if (cam->streaming) {
+		rval = -EBUSY;
+		goto out;
+	}
+
+	old_streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	rval = vidioc_int_g_parm(cam->sdev, &old_streamparm);
+	if (rval)
+		goto out;
+
+	rval = vidioc_int_s_parm(cam->sdev, a);
+	if (rval)
+		goto out;
+
+	rval = omap24xxcam_sensor_if_enable(cam);
+	/*
+	 * Revert to old streaming parameters if enabling sensor
+	 * interface with the new ones failed.
+	 */
+	if (rval)
+		vidioc_int_s_parm(cam->sdev, &old_streamparm);
+
+out:
+	mutex_unlock(&cam->mutex);
+
+	return rval;
+}
+
+/*
+ *
+ * File operations.
+ *
+ */
+
+static unsigned int omap24xxcam_poll(struct file *file,
+				     struct poll_table_struct *wait)
+{
+	struct omap24xxcam_fh *fh = file->private_data;
+	struct omap24xxcam_device *cam = fh->cam;
+	struct videobuf_buffer *vb;
+
+	mutex_lock(&cam->mutex);
+	if (cam->streaming != file) {
+		mutex_unlock(&cam->mutex);
+		return POLLERR;
+	}
+	mutex_unlock(&cam->mutex);
+
+	mutex_lock(&fh->vbq.vb_lock);
+	if (list_empty(&fh->vbq.stream)) {
+		mutex_unlock(&fh->vbq.vb_lock);
+		return POLLERR;
+	}
+	vb = list_entry(fh->vbq.stream.next, struct videobuf_buffer, stream);
+	mutex_unlock(&fh->vbq.vb_lock);
+
+	poll_wait(file, &vb->done, wait);
+
+	if (vb->state == VIDEOBUF_DONE || vb->state == VIDEOBUF_ERROR)
+		return POLLIN | POLLRDNORM;
+
+	return 0;
+}
+
+static int omap24xxcam_mmap_buffers(struct file *file,
+				    struct vm_area_struct *vma)
+{
+	struct omap24xxcam_fh *fh = file->private_data;
+	struct omap24xxcam_device *cam = fh->cam;
+	struct videobuf_queue *vbq = &fh->vbq;
+	unsigned int first, last, size, i, j;
+	int err = 0;
+
+	mutex_lock(&cam->mutex);
+	if (cam->streaming) {
+		mutex_unlock(&cam->mutex);
+		return -EBUSY;
+	}
+	mutex_unlock(&cam->mutex);
+	mutex_lock(&vbq->vb_lock);
+
+	/* look for first buffer to map */
+	for (first = 0; first < VIDEO_MAX_FRAME; first++) {
+		if (NULL == vbq->bufs[first])
+			continue;
+		if (V4L2_MEMORY_MMAP != vbq->bufs[first]->memory)
+			continue;
+		if (vbq->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT))
+			break;
+	}
+
+	/* look for last buffer to map */
+	for (size = 0, last = first; last < VIDEO_MAX_FRAME; last++) {
+		if (NULL == vbq->bufs[last])
+			continue;
+		if (V4L2_MEMORY_MMAP != vbq->bufs[last]->memory)
+			continue;
+		size += vbq->bufs[last]->bsize;
+		if (size == (vma->vm_end - vma->vm_start))
+			break;
+	}
+
+	size = 0;
+	for (i = first; i <= last && i < VIDEO_MAX_FRAME; i++) {
+		struct videobuf_dmabuf *dma = videobuf_to_dma(vbq->bufs[i]);
+
+		for (j = 0; j < dma->sglen; j++) {
+			err = remap_pfn_range(
+				vma, vma->vm_start + size,
+				page_to_pfn(sg_page(&dma->sglist[j])),
+				sg_dma_len(&dma->sglist[j]), vma->vm_page_prot);
+			if (err)
+				goto out;
+			size += sg_dma_len(&dma->sglist[j]);
+		}
+	}
+
+out:
+	mutex_unlock(&vbq->vb_lock);
+
+	return err;
+}
+
+static int omap24xxcam_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct omap24xxcam_fh *fh = file->private_data;
+	int rval;
+
+	/* let the video-buf mapper check arguments and set-up structures */
+	rval = videobuf_mmap_mapper(&fh->vbq, vma);
+	if (rval)
+		return rval;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	/* do mapping to our allocated buffers */
+	rval = omap24xxcam_mmap_buffers(file, vma);
+	/*
+	 * In case of error, free vma->vm_private_data allocated by
+	 * videobuf_mmap_mapper.
+	 */
+	if (rval)
+		kfree(vma->vm_private_data);
+
+	return rval;
+}
+
+static int omap24xxcam_open(struct file *file)
+{
+	struct omap24xxcam_device *cam = omap24xxcam.priv;
+	struct omap24xxcam_fh *fh;
+	struct v4l2_format format;
+
+	if (!cam || !cam->vfd)
+		return -ENODEV;
+
+	fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+	if (fh == NULL)
+		return -ENOMEM;
+
+	mutex_lock(&cam->mutex);
+	if (cam->sdev == NULL || !try_module_get(cam->sdev->module)) {
+		mutex_unlock(&cam->mutex);
+		goto out_try_module_get;
+	}
+
+	if (atomic_inc_return(&cam->users) == 1) {
+		omap24xxcam_hwinit(cam);
+		if (omap24xxcam_sensor_enable(cam)) {
+			mutex_unlock(&cam->mutex);
+			goto out_omap24xxcam_sensor_enable;
+		}
+	}
+	mutex_unlock(&cam->mutex);
+
+	fh->cam = cam;
+	mutex_lock(&cam->mutex);
+	vidioc_int_g_fmt_cap(cam->sdev, &format);
+	mutex_unlock(&cam->mutex);
+	/* FIXME: how about fh->pix when there are more users? */
+	fh->pix = format.fmt.pix;
+
+	file->private_data = fh;
+
+	spin_lock_init(&fh->vbq_lock);
+
+	videobuf_queue_sg_init(&fh->vbq, &omap24xxcam_vbq_ops, NULL,
+				&fh->vbq_lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+				V4L2_FIELD_NONE,
+				sizeof(struct videobuf_buffer), fh, NULL);
+
+	return 0;
+
+out_omap24xxcam_sensor_enable:
+	omap24xxcam_poweron_reset(cam);
+	module_put(cam->sdev->module);
+
+out_try_module_get:
+	kfree(fh);
+
+	return -ENODEV;
+}
+
+static int omap24xxcam_release(struct file *file)
+{
+	struct omap24xxcam_fh *fh = file->private_data;
+	struct omap24xxcam_device *cam = fh->cam;
+
+	atomic_inc(&cam->reset_disable);
+
+	flush_work_sync(&cam->sensor_reset_work);
+
+	/* stop streaming capture */
+	videobuf_streamoff(&fh->vbq);
+
+	mutex_lock(&cam->mutex);
+	if (cam->streaming == file) {
+		cam->streaming = NULL;
+		mutex_unlock(&cam->mutex);
+		sysfs_notify(&cam->dev->kobj, NULL, "streaming");
+	} else {
+		mutex_unlock(&cam->mutex);
+	}
+
+	atomic_dec(&cam->reset_disable);
+
+	omap24xxcam_vbq_free_mmap_buffers(&fh->vbq);
+
+	/*
+	 * Make sure the reset work we might have scheduled is not
+	 * pending! It may be run *only* if we have users. (And it may
+	 * not be scheduled anymore since streaming is already
+	 * disabled.)
+	 */
+	flush_work_sync(&cam->sensor_reset_work);
+
+	mutex_lock(&cam->mutex);
+	if (atomic_dec_return(&cam->users) == 0) {
+		omap24xxcam_sensor_disable(cam);
+		omap24xxcam_poweron_reset(cam);
+	}
+	mutex_unlock(&cam->mutex);
+
+	file->private_data = NULL;
+
+	module_put(cam->sdev->module);
+	kfree(fh);
+
+	return 0;
+}
+
+static struct v4l2_file_operations omap24xxcam_fops = {
+	.ioctl	 = video_ioctl2,
+	.poll	 = omap24xxcam_poll,
+	.mmap	 = omap24xxcam_mmap,
+	.open	 = omap24xxcam_open,
+	.release = omap24xxcam_release,
+};
+
+/*
+ *
+ * Power management.
+ *
+ */
+
+#ifdef CONFIG_PM
+static int omap24xxcam_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
+
+	if (atomic_read(&cam->users) == 0)
+		return 0;
+
+	if (!atomic_read(&cam->reset_disable))
+		omap24xxcam_capture_stop(cam);
+
+	omap24xxcam_sensor_disable(cam);
+	omap24xxcam_poweron_reset(cam);
+
+	return 0;
+}
+
+static int omap24xxcam_resume(struct platform_device *pdev)
+{
+	struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
+
+	if (atomic_read(&cam->users) == 0)
+		return 0;
+
+	omap24xxcam_hwinit(cam);
+	omap24xxcam_sensor_enable(cam);
+
+	if (!atomic_read(&cam->reset_disable))
+		omap24xxcam_capture_cont(cam);
+
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct v4l2_ioctl_ops omap24xxcam_ioctl_fops = {
+	.vidioc_querycap	= vidioc_querycap,
+	.vidioc_enum_fmt_vid_cap	= vidioc_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap	= vidioc_g_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap	= vidioc_s_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap	= vidioc_try_fmt_vid_cap,
+	.vidioc_reqbufs		= vidioc_reqbufs,
+	.vidioc_querybuf	= vidioc_querybuf,
+	.vidioc_qbuf		= vidioc_qbuf,
+	.vidioc_dqbuf		= vidioc_dqbuf,
+	.vidioc_streamon	= vidioc_streamon,
+	.vidioc_streamoff	= vidioc_streamoff,
+	.vidioc_enum_input	= vidioc_enum_input,
+	.vidioc_g_input		= vidioc_g_input,
+	.vidioc_s_input		= vidioc_s_input,
+	.vidioc_queryctrl	= vidioc_queryctrl,
+	.vidioc_g_ctrl		= vidioc_g_ctrl,
+	.vidioc_s_ctrl		= vidioc_s_ctrl,
+	.vidioc_g_parm		= vidioc_g_parm,
+	.vidioc_s_parm		= vidioc_s_parm,
+};
+
+/*
+ *
+ * Camera device (i.e. /dev/video).
+ *
+ */
+
+static int omap24xxcam_device_register(struct v4l2_int_device *s)
+{
+	struct omap24xxcam_device *cam = s->u.slave->master->priv;
+	struct video_device *vfd;
+	int rval;
+
+	/* We already have a slave. */
+	if (cam->sdev)
+		return -EBUSY;
+
+	cam->sdev = s;
+
+	if (device_create_file(cam->dev, &dev_attr_streaming) != 0) {
+		dev_err(cam->dev, "could not register sysfs entry\n");
+		rval = -EBUSY;
+		goto err;
+	}
+
+	/* initialize the video_device struct */
+	vfd = cam->vfd = video_device_alloc();
+	if (!vfd) {
+		dev_err(cam->dev, "could not allocate video device struct\n");
+		rval = -ENOMEM;
+		goto err;
+	}
+	vfd->release = video_device_release;
+
+	vfd->parent = cam->dev;
+
+	strlcpy(vfd->name, CAM_NAME, sizeof(vfd->name));
+	vfd->fops		 = &omap24xxcam_fops;
+	vfd->ioctl_ops		 = &omap24xxcam_ioctl_fops;
+
+	omap24xxcam_hwinit(cam);
+
+	rval = omap24xxcam_sensor_init(cam);
+	if (rval)
+		goto err;
+
+	if (video_register_device(vfd, VFL_TYPE_GRABBER, video_nr) < 0) {
+		dev_err(cam->dev, "could not register V4L device\n");
+		rval = -EBUSY;
+		goto err;
+	}
+
+	omap24xxcam_poweron_reset(cam);
+
+	dev_info(cam->dev, "registered device %s\n",
+		 video_device_node_name(vfd));
+
+	return 0;
+
+err:
+	omap24xxcam_device_unregister(s);
+
+	return rval;
+}
+
+static void omap24xxcam_device_unregister(struct v4l2_int_device *s)
+{
+	struct omap24xxcam_device *cam = s->u.slave->master->priv;
+
+	omap24xxcam_sensor_exit(cam);
+
+	if (cam->vfd) {
+		if (!video_is_registered(cam->vfd)) {
+			/*
+			 * The device was never registered, so release the
+			 * video_device struct directly.
+			 */
+			video_device_release(cam->vfd);
+		} else {
+			/*
+			 * The unregister function will release the
+			 * video_device struct as well as
+			 * unregistering it.
+			 */
+			video_unregister_device(cam->vfd);
+		}
+		cam->vfd = NULL;
+	}
+
+	device_remove_file(cam->dev, &dev_attr_streaming);
+
+	cam->sdev = NULL;
+}
+
+static struct v4l2_int_master omap24xxcam_master = {
+	.attach = omap24xxcam_device_register,
+	.detach = omap24xxcam_device_unregister,
+};
+
+static struct v4l2_int_device omap24xxcam = {
+	.module	= THIS_MODULE,
+	.name	= CAM_NAME,
+	.type	= v4l2_int_type_master,
+	.u	= {
+		.master = &omap24xxcam_master
+	},
+};
+
+/*
+ *
+ * Driver initialisation and deinitialisation.
+ *
+ */
+
+static int __devinit omap24xxcam_probe(struct platform_device *pdev)
+{
+	struct omap24xxcam_device *cam;
+	struct resource *mem;
+	int irq;
+
+	cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+	if (!cam) {
+		dev_err(&pdev->dev, "could not allocate memory\n");
+		goto err;
+	}
+
+	platform_set_drvdata(pdev, cam);
+
+	cam->dev = &pdev->dev;
+
+	/*
+	 * Impose a lower limit on the amount of memory allocated for
+	 * capture. We require at least enough memory to double-buffer
+	 * QVGA (300KB).
+	 */
+	if (capture_mem < 320 * 240 * 2 * 2)
+		capture_mem = 320 * 240 * 2 * 2;
+	cam->capture_mem = capture_mem;
+
+	/* request the mem region for the camera registers */
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem) {
+		dev_err(cam->dev, "no mem resource?\n");
+		goto err;
+	}
+	if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
+		dev_err(cam->dev,
+			"cannot reserve camera register I/O region\n");
+		goto err;
+	}
+	cam->mmio_base_phys = mem->start;
+	cam->mmio_size = resource_size(mem);
+
+	/* map the region */
+	cam->mmio_base = ioremap_nocache(cam->mmio_base_phys, cam->mmio_size);
+	if (!cam->mmio_base) {
+		dev_err(cam->dev, "cannot map camera register I/O region\n");
+		goto err;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(cam->dev, "no irq for camera?\n");
+		goto err;
+	}
+
+	/* install the interrupt service routine */
+	if (request_irq(irq, omap24xxcam_isr, 0, CAM_NAME, cam)) {
+		dev_err(cam->dev,
+			"could not install interrupt service routine\n");
+		goto err;
+	}
+	cam->irq = irq;
+
+	if (omap24xxcam_clock_get(cam))
+		goto err;
+
+	INIT_WORK(&cam->sensor_reset_work, omap24xxcam_sensor_reset_work);
+
+	mutex_init(&cam->mutex);
+	spin_lock_init(&cam->core_enable_disable_lock);
+
+	omap24xxcam_sgdma_init(&cam->sgdma,
+			       cam->mmio_base + CAMDMA_REG_OFFSET,
+			       omap24xxcam_stalled_dma_reset,
+			       (unsigned long)cam);
+
+	omap24xxcam.priv = cam;
+
+	if (v4l2_int_device_register(&omap24xxcam))
+		goto err;
+
+	return 0;
+
+err:
+	omap24xxcam_remove(pdev);
+	return -ENODEV;
+}
+
+static int omap24xxcam_remove(struct platform_device *pdev)
+{
+	struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
+
+	if (!cam)
+		return 0;
+
+	if (omap24xxcam.priv != NULL)
+		v4l2_int_device_unregister(&omap24xxcam);
+	omap24xxcam.priv = NULL;
+
+	omap24xxcam_clock_put(cam);
+
+	if (cam->irq) {
+		free_irq(cam->irq, cam);
+		cam->irq = 0;
+	}
+
+	if (cam->mmio_base) {
+		iounmap((void *)cam->mmio_base);
+		cam->mmio_base = 0;
+	}
+
+	if (cam->mmio_base_phys) {
+		release_mem_region(cam->mmio_base_phys, cam->mmio_size);
+		cam->mmio_base_phys = 0;
+	}
+
+	kfree(cam);
+
+	return 0;
+}
+
+static struct platform_driver omap24xxcam_driver = {
+	.probe	 = omap24xxcam_probe,
+	.remove	 = omap24xxcam_remove,
+#ifdef CONFIG_PM
+	.suspend = omap24xxcam_suspend,
+	.resume	 = omap24xxcam_resume,
+#endif
+	.driver	 = {
+		.name = CAM_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+module_platform_driver(omap24xxcam_driver);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
+MODULE_DESCRIPTION("OMAP24xx Video for Linux camera driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(OMAP24XXCAM_VERSION);
+module_param(video_nr, int, 0);
+MODULE_PARM_DESC(video_nr,
+		 "Minor number for video device (-1 ==> auto assign)");
+module_param(capture_mem, int, 0);
+MODULE_PARM_DESC(capture_mem, "Maximum amount of memory for capture "
+		 "buffers (default 4800kiB)");
diff --git a/drivers/media/platform/omap24xxcam.h b/drivers/media/platform/omap24xxcam.h
new file mode 100644
index 0000000..c439595
--- /dev/null
+++ b/drivers/media/platform/omap24xxcam.h
@@ -0,0 +1,593 @@
+/*
+ * drivers/media/platform/omap24xxcam.h
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2004 Texas Instruments.
+ * Copyright (C) 2007 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
+ *
+ * Based on code from Andy Lowe <source@mvista.com>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP24XXCAM_H
+#define OMAP24XXCAM_H
+
+#include <media/videobuf-dma-sg.h>
+#include <media/v4l2-int-device.h>
+
+/*
+ *
+ * General driver related definitions.
+ *
+ */
+
+#define CAM_NAME				"omap24xxcam"
+
+#define CAM_MCLK				96000000
+
+/* number of bytes transferred per DMA request */
+#define DMA_THRESHOLD				32
+
+/*
+ * NUM_CAMDMA_CHANNELS is the number of logical channels provided by
+ * the camera DMA controller.
+ */
+#define NUM_CAMDMA_CHANNELS			4
+
+/*
+ * NUM_SG_DMA is the number of scatter-gather DMA transfers that can
+ * be queued. (We don't have any overlay sglists now.)
+ */
+#define NUM_SG_DMA				(VIDEO_MAX_FRAME)
+
+/*
+ *
+ * Register definitions.
+ *
+ */
+
+/* subsystem register block offsets */
+#define CC_REG_OFFSET				0x00000400
+#define CAMDMA_REG_OFFSET			0x00000800
+#define CAMMMU_REG_OFFSET			0x00000C00
+
+/* define camera subsystem register offsets */
+#define CAM_REVISION				0x000
+#define CAM_SYSCONFIG				0x010
+#define CAM_SYSSTATUS				0x014
+#define CAM_IRQSTATUS				0x018
+#define CAM_GPO					0x040
+#define CAM_GPI					0x050
+
+/* define camera core register offsets */
+#define CC_REVISION				0x000
+#define CC_SYSCONFIG				0x010
+#define CC_SYSSTATUS				0x014
+#define CC_IRQSTATUS				0x018
+#define CC_IRQENABLE				0x01C
+#define CC_CTRL					0x040
+#define CC_CTRL_DMA				0x044
+#define CC_CTRL_XCLK				0x048
+#define CC_FIFODATA				0x04C
+#define CC_TEST					0x050
+#define CC_GENPAR				0x054
+#define CC_CCPFSCR				0x058
+#define CC_CCPFECR				0x05C
+#define CC_CCPLSCR				0x060
+#define CC_CCPLECR				0x064
+#define CC_CCPDFR				0x068
+
+/* define camera dma register offsets */
+#define CAMDMA_REVISION				0x000
+#define CAMDMA_IRQSTATUS_L0			0x008
+#define CAMDMA_IRQSTATUS_L1			0x00C
+#define CAMDMA_IRQSTATUS_L2			0x010
+#define CAMDMA_IRQSTATUS_L3			0x014
+#define CAMDMA_IRQENABLE_L0			0x018
+#define CAMDMA_IRQENABLE_L1			0x01C
+#define CAMDMA_IRQENABLE_L2			0x020
+#define CAMDMA_IRQENABLE_L3			0x024
+#define CAMDMA_SYSSTATUS			0x028
+#define CAMDMA_OCP_SYSCONFIG			0x02C
+#define CAMDMA_CAPS_0				0x064
+#define CAMDMA_CAPS_2				0x06C
+#define CAMDMA_CAPS_3				0x070
+#define CAMDMA_CAPS_4				0x074
+#define CAMDMA_GCR				0x078
+#define CAMDMA_CCR(n)				(0x080 + (n)*0x60)
+#define CAMDMA_CLNK_CTRL(n)			(0x084 + (n)*0x60)
+#define CAMDMA_CICR(n)				(0x088 + (n)*0x60)
+#define CAMDMA_CSR(n)				(0x08C + (n)*0x60)
+#define CAMDMA_CSDP(n)				(0x090 + (n)*0x60)
+#define CAMDMA_CEN(n)				(0x094 + (n)*0x60)
+#define CAMDMA_CFN(n)				(0x098 + (n)*0x60)
+#define CAMDMA_CSSA(n)				(0x09C + (n)*0x60)
+#define CAMDMA_CDSA(n)				(0x0A0 + (n)*0x60)
+#define CAMDMA_CSEI(n)				(0x0A4 + (n)*0x60)
+#define CAMDMA_CSFI(n)				(0x0A8 + (n)*0x60)
+#define CAMDMA_CDEI(n)				(0x0AC + (n)*0x60)
+#define CAMDMA_CDFI(n)				(0x0B0 + (n)*0x60)
+#define CAMDMA_CSAC(n)				(0x0B4 + (n)*0x60)
+#define CAMDMA_CDAC(n)				(0x0B8 + (n)*0x60)
+#define CAMDMA_CCEN(n)				(0x0BC + (n)*0x60)
+#define CAMDMA_CCFN(n)				(0x0C0 + (n)*0x60)
+#define CAMDMA_COLOR(n)				(0x0C4 + (n)*0x60)
+
+/* define camera mmu register offsets */
+#define CAMMMU_REVISION				0x000
+#define CAMMMU_SYSCONFIG			0x010
+#define CAMMMU_SYSSTATUS			0x014
+#define CAMMMU_IRQSTATUS			0x018
+#define CAMMMU_IRQENABLE			0x01C
+#define CAMMMU_WALKING_ST			0x040
+#define CAMMMU_CNTL				0x044
+#define CAMMMU_FAULT_AD				0x048
+#define CAMMMU_TTB				0x04C
+#define CAMMMU_LOCK				0x050
+#define CAMMMU_LD_TLB				0x054
+#define CAMMMU_CAM				0x058
+#define CAMMMU_RAM				0x05C
+#define CAMMMU_GFLUSH				0x060
+#define CAMMMU_FLUSH_ENTRY			0x064
+#define CAMMMU_READ_CAM				0x068
+#define CAMMMU_READ_RAM				0x06C
+#define CAMMMU_EMU_FAULT_AD			0x070
+
+/* Define bit fields within selected registers */
+#define CAM_REVISION_MAJOR			(15 << 4)
+#define CAM_REVISION_MAJOR_SHIFT		4
+#define CAM_REVISION_MINOR			(15 << 0)
+#define CAM_REVISION_MINOR_SHIFT		0
+
+#define CAM_SYSCONFIG_SOFTRESET			(1 <<  1)
+#define CAM_SYSCONFIG_AUTOIDLE			(1 <<  0)
+
+#define CAM_SYSSTATUS_RESETDONE			(1 <<  0)
+
+#define CAM_IRQSTATUS_CC_IRQ			(1 <<  4)
+#define CAM_IRQSTATUS_MMU_IRQ			(1 <<  3)
+#define CAM_IRQSTATUS_DMA_IRQ2			(1 <<  2)
+#define CAM_IRQSTATUS_DMA_IRQ1			(1 <<  1)
+#define CAM_IRQSTATUS_DMA_IRQ0			(1 <<  0)
+
+#define CAM_GPO_CAM_S_P_EN			(1 <<  1)
+#define CAM_GPO_CAM_CCP_MODE			(1 <<  0)
+
+#define CAM_GPI_CC_DMA_REQ1			(1 << 24)
+#define CAP_GPI_CC_DMA_REQ0			(1 << 23)
+#define CAP_GPI_CAM_MSTANDBY			(1 << 21)
+#define CAP_GPI_CAM_WAIT			(1 << 20)
+#define CAP_GPI_CAM_S_DATA			(1 << 17)
+#define CAP_GPI_CAM_S_CLK			(1 << 16)
+#define CAP_GPI_CAM_P_DATA			(0xFFF << 3)
+#define CAP_GPI_CAM_P_DATA_SHIFT		3
+#define CAP_GPI_CAM_P_VS			(1 <<  2)
+#define CAP_GPI_CAM_P_HS			(1 <<  1)
+#define CAP_GPI_CAM_P_CLK			(1 <<  0)
+
+#define CC_REVISION_MAJOR			(15 << 4)
+#define CC_REVISION_MAJOR_SHIFT			4
+#define CC_REVISION_MINOR			(15 << 0)
+#define CC_REVISION_MINOR_SHIFT			0
+
+#define CC_SYSCONFIG_SIDLEMODE			(3 <<  3)
+#define CC_SYSCONFIG_SIDLEMODE_FIDLE		(0 <<  3)
+#define CC_SYSCONFIG_SIDLEMODE_NIDLE		(1 <<  3)
+#define CC_SYSCONFIG_SOFTRESET			(1 <<  1)
+#define CC_SYSCONFIG_AUTOIDLE			(1 <<  0)
+
+#define CC_SYSSTATUS_RESETDONE			(1 <<  0)
+
+#define CC_IRQSTATUS_FS_IRQ			(1 << 19)
+#define CC_IRQSTATUS_LE_IRQ			(1 << 18)
+#define CC_IRQSTATUS_LS_IRQ			(1 << 17)
+#define CC_IRQSTATUS_FE_IRQ			(1 << 16)
+#define CC_IRQSTATUS_FW_ERR_IRQ			(1 << 10)
+#define CC_IRQSTATUS_FSC_ERR_IRQ		(1 <<  9)
+#define CC_IRQSTATUS_SSC_ERR_IRQ		(1 <<  8)
+#define CC_IRQSTATUS_FIFO_NOEMPTY_IRQ		(1 <<  4)
+#define CC_IRQSTATUS_FIFO_FULL_IRQ		(1 <<  3)
+#define CC_IRQSTATUS_FIFO_THR_IRQ		(1 <<  2)
+#define CC_IRQSTATUS_FIFO_OF_IRQ		(1 <<  1)
+#define CC_IRQSTATUS_FIFO_UF_IRQ		(1 <<  0)
+
+#define CC_IRQENABLE_FS_IRQ			(1 << 19)
+#define CC_IRQENABLE_LE_IRQ			(1 << 18)
+#define CC_IRQENABLE_LS_IRQ			(1 << 17)
+#define CC_IRQENABLE_FE_IRQ			(1 << 16)
+#define CC_IRQENABLE_FW_ERR_IRQ			(1 << 10)
+#define CC_IRQENABLE_FSC_ERR_IRQ		(1 <<  9)
+#define CC_IRQENABLE_SSC_ERR_IRQ		(1 <<  8)
+#define CC_IRQENABLE_FIFO_NOEMPTY_IRQ		(1 <<  4)
+#define CC_IRQENABLE_FIFO_FULL_IRQ		(1 <<  3)
+#define CC_IRQENABLE_FIFO_THR_IRQ		(1 <<  2)
+#define CC_IRQENABLE_FIFO_OF_IRQ		(1 <<  1)
+#define CC_IRQENABLE_FIFO_UF_IRQ		(1 <<  0)
+
+#define CC_CTRL_CC_ONE_SHOT			(1 << 20)
+#define CC_CTRL_CC_IF_SYNCHRO			(1 << 19)
+#define CC_CTRL_CC_RST				(1 << 18)
+#define CC_CTRL_CC_FRAME_TRIG			(1 << 17)
+#define CC_CTRL_CC_EN				(1 << 16)
+#define CC_CTRL_NOBT_SYNCHRO			(1 << 13)
+#define CC_CTRL_BT_CORRECT			(1 << 12)
+#define CC_CTRL_PAR_ORDERCAM			(1 << 11)
+#define CC_CTRL_PAR_CLK_POL			(1 << 10)
+#define CC_CTRL_NOBT_HS_POL			(1 <<  9)
+#define CC_CTRL_NOBT_VS_POL			(1 <<  8)
+#define CC_CTRL_PAR_MODE			(7 <<  1)
+#define CC_CTRL_PAR_MODE_SHIFT			1
+#define CC_CTRL_PAR_MODE_NOBT8			(0 <<  1)
+#define CC_CTRL_PAR_MODE_NOBT10			(1 <<  1)
+#define CC_CTRL_PAR_MODE_NOBT12			(2 <<  1)
+#define CC_CTRL_PAR_MODE_BT8			(4 <<  1)
+#define CC_CTRL_PAR_MODE_BT10			(5 <<  1)
+#define CC_CTRL_PAR_MODE_FIFOTEST		(7 <<  1)
+#define CC_CTRL_CCP_MODE			(1 <<  0)
+
+#define CC_CTRL_DMA_EN				(1 <<  8)
+#define CC_CTRL_DMA_FIFO_THRESHOLD		(0x7F << 0)
+#define CC_CTRL_DMA_FIFO_THRESHOLD_SHIFT	0
+
+#define CC_CTRL_XCLK_DIV			(0x1F << 0)
+#define CC_CTRL_XCLK_DIV_SHIFT			0
+#define CC_CTRL_XCLK_DIV_STABLE_LOW		(0 <<  0)
+#define CC_CTRL_XCLK_DIV_STABLE_HIGH		(1 <<  0)
+#define CC_CTRL_XCLK_DIV_BYPASS			(31 << 0)
+
+#define CC_TEST_FIFO_RD_POINTER			(0xFF << 24)
+#define CC_TEST_FIFO_RD_POINTER_SHIFT		24
+#define CC_TEST_FIFO_WR_POINTER			(0xFF << 16)
+#define CC_TEST_FIFO_WR_POINTER_SHIFT		16
+#define CC_TEST_FIFO_LEVEL			(0xFF <<  8)
+#define CC_TEST_FIFO_LEVEL_SHIFT		8
+#define CC_TEST_FIFO_LEVEL_PEAK			(0xFF <<  0)
+#define CC_TEST_FIFO_LEVEL_PEAK_SHIFT		0
+
+#define CC_GENPAR_FIFO_DEPTH			(7 <<  0)
+#define CC_GENPAR_FIFO_DEPTH_SHIFT		0
+
+#define CC_CCPDFR_ALPHA				(0xFF <<  8)
+#define CC_CCPDFR_ALPHA_SHIFT			8
+#define CC_CCPDFR_DATAFORMAT			(15 <<  0)
+#define CC_CCPDFR_DATAFORMAT_SHIFT		0
+#define CC_CCPDFR_DATAFORMAT_YUV422BE		(0 <<  0)
+#define CC_CCPDFR_DATAFORMAT_YUV422		(1 <<  0)
+#define CC_CCPDFR_DATAFORMAT_YUV420		(2 <<  0)
+#define CC_CCPDFR_DATAFORMAT_RGB444		(4 <<  0)
+#define CC_CCPDFR_DATAFORMAT_RGB565		(5 <<  0)
+#define CC_CCPDFR_DATAFORMAT_RGB888NDE		(6 <<  0)
+#define CC_CCPDFR_DATAFORMAT_RGB888		(7 <<  0)
+#define CC_CCPDFR_DATAFORMAT_RAW8NDE		(8 <<  0)
+#define CC_CCPDFR_DATAFORMAT_RAW8		(9 <<  0)
+#define CC_CCPDFR_DATAFORMAT_RAW10NDE		(10 <<  0)
+#define CC_CCPDFR_DATAFORMAT_RAW10		(11 <<  0)
+#define CC_CCPDFR_DATAFORMAT_RAW12NDE		(12 <<  0)
+#define CC_CCPDFR_DATAFORMAT_RAW12		(13 <<  0)
+#define CC_CCPDFR_DATAFORMAT_JPEG8		(15 <<  0)
+
+#define CAMDMA_REVISION_MAJOR			(15 << 4)
+#define CAMDMA_REVISION_MAJOR_SHIFT		4
+#define CAMDMA_REVISION_MINOR			(15 << 0)
+#define CAMDMA_REVISION_MINOR_SHIFT		0
+
+#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE		(3 << 12)
+#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE_FSTANDBY	(0 << 12)
+#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE_NSTANDBY	(1 << 12)
+#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE_SSTANDBY	(2 << 12)
+#define CAMDMA_OCP_SYSCONFIG_FUNC_CLOCK		(1 <<  9)
+#define CAMDMA_OCP_SYSCONFIG_OCP_CLOCK		(1 <<  8)
+#define CAMDMA_OCP_SYSCONFIG_EMUFREE		(1 <<  5)
+#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE		(3 <<  3)
+#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE_FIDLE	(0 <<  3)
+#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE_NIDLE	(1 <<  3)
+#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE_SIDLE	(2 <<  3)
+#define CAMDMA_OCP_SYSCONFIG_SOFTRESET		(1 <<  1)
+#define CAMDMA_OCP_SYSCONFIG_AUTOIDLE		(1 <<  0)
+
+#define CAMDMA_SYSSTATUS_RESETDONE		(1 <<  0)
+
+#define CAMDMA_GCR_ARBITRATION_RATE		(0xFF << 16)
+#define CAMDMA_GCR_ARBITRATION_RATE_SHIFT	16
+#define CAMDMA_GCR_MAX_CHANNEL_FIFO_DEPTH	(0xFF << 0)
+#define CAMDMA_GCR_MAX_CHANNEL_FIFO_DEPTH_SHIFT	0
+
+#define CAMDMA_CCR_SEL_SRC_DST_SYNC		(1 << 24)
+#define CAMDMA_CCR_PREFETCH			(1 << 23)
+#define CAMDMA_CCR_SUPERVISOR			(1 << 22)
+#define CAMDMA_CCR_SECURE			(1 << 21)
+#define CAMDMA_CCR_BS				(1 << 18)
+#define CAMDMA_CCR_TRANSPARENT_COPY_ENABLE	(1 << 17)
+#define CAMDMA_CCR_CONSTANT_FILL_ENABLE		(1 << 16)
+#define CAMDMA_CCR_DST_AMODE			(3 << 14)
+#define CAMDMA_CCR_DST_AMODE_CONST_ADDR		(0 << 14)
+#define CAMDMA_CCR_DST_AMODE_POST_INC		(1 << 14)
+#define CAMDMA_CCR_DST_AMODE_SGL_IDX		(2 << 14)
+#define CAMDMA_CCR_DST_AMODE_DBL_IDX		(3 << 14)
+#define CAMDMA_CCR_SRC_AMODE			(3 << 12)
+#define CAMDMA_CCR_SRC_AMODE_CONST_ADDR		(0 << 12)
+#define CAMDMA_CCR_SRC_AMODE_POST_INC		(1 << 12)
+#define CAMDMA_CCR_SRC_AMODE_SGL_IDX		(2 << 12)
+#define CAMDMA_CCR_SRC_AMODE_DBL_IDX		(3 << 12)
+#define CAMDMA_CCR_WR_ACTIVE			(1 << 10)
+#define CAMDMA_CCR_RD_ACTIVE			(1 <<  9)
+#define CAMDMA_CCR_SUSPEND_SENSITIVE		(1 <<  8)
+#define CAMDMA_CCR_ENABLE			(1 <<  7)
+#define CAMDMA_CCR_PRIO				(1 <<  6)
+#define CAMDMA_CCR_FS				(1 <<  5)
+#define CAMDMA_CCR_SYNCHRO			((3 << 19) | (31 << 0))
+#define CAMDMA_CCR_SYNCHRO_CAMERA		0x01
+
+#define CAMDMA_CLNK_CTRL_ENABLE_LNK		(1 << 15)
+#define CAMDMA_CLNK_CTRL_NEXTLCH_ID		(0x1F << 0)
+#define CAMDMA_CLNK_CTRL_NEXTLCH_ID_SHIFT	0
+
+#define CAMDMA_CICR_MISALIGNED_ERR_IE		(1 << 11)
+#define CAMDMA_CICR_SUPERVISOR_ERR_IE		(1 << 10)
+#define CAMDMA_CICR_SECURE_ERR_IE		(1 <<  9)
+#define CAMDMA_CICR_TRANS_ERR_IE		(1 <<  8)
+#define CAMDMA_CICR_PACKET_IE			(1 <<  7)
+#define CAMDMA_CICR_BLOCK_IE			(1 <<  5)
+#define CAMDMA_CICR_LAST_IE			(1 <<  4)
+#define CAMDMA_CICR_FRAME_IE			(1 <<  3)
+#define CAMDMA_CICR_HALF_IE			(1 <<  2)
+#define CAMDMA_CICR_DROP_IE			(1 <<  1)
+
+#define CAMDMA_CSR_MISALIGNED_ERR		(1 << 11)
+#define CAMDMA_CSR_SUPERVISOR_ERR		(1 << 10)
+#define CAMDMA_CSR_SECURE_ERR			(1 <<  9)
+#define CAMDMA_CSR_TRANS_ERR			(1 <<  8)
+#define CAMDMA_CSR_PACKET			(1 <<  7)
+#define CAMDMA_CSR_SYNC				(1 <<  6)
+#define CAMDMA_CSR_BLOCK			(1 <<  5)
+#define CAMDMA_CSR_LAST				(1 <<  4)
+#define CAMDMA_CSR_FRAME			(1 <<  3)
+#define CAMDMA_CSR_HALF				(1 <<  2)
+#define CAMDMA_CSR_DROP				(1 <<  1)
+
+#define CAMDMA_CSDP_SRC_ENDIANNESS		(1 << 21)
+#define CAMDMA_CSDP_SRC_ENDIANNESS_LOCK		(1 << 20)
+#define CAMDMA_CSDP_DST_ENDIANNESS		(1 << 19)
+#define CAMDMA_CSDP_DST_ENDIANNESS_LOCK		(1 << 18)
+#define CAMDMA_CSDP_WRITE_MODE			(3 << 16)
+#define CAMDMA_CSDP_WRITE_MODE_WRNP		(0 << 16)
+#define CAMDMA_CSDP_WRITE_MODE_POSTED		(1 << 16)
+#define CAMDMA_CSDP_WRITE_MODE_POSTED_LAST_WRNP	(2 << 16)
+#define CAMDMA_CSDP_DST_BURST_EN		(3 << 14)
+#define CAMDMA_CSDP_DST_BURST_EN_1		(0 << 14)
+#define CAMDMA_CSDP_DST_BURST_EN_16		(1 << 14)
+#define CAMDMA_CSDP_DST_BURST_EN_32		(2 << 14)
+#define CAMDMA_CSDP_DST_BURST_EN_64		(3 << 14)
+#define CAMDMA_CSDP_DST_PACKED			(1 << 13)
+#define CAMDMA_CSDP_WR_ADD_TRSLT		(15 << 9)
+#define CAMDMA_CSDP_WR_ADD_TRSLT_ENABLE_MREQADD	(3 <<  9)
+#define CAMDMA_CSDP_SRC_BURST_EN		(3 <<  7)
+#define CAMDMA_CSDP_SRC_BURST_EN_1		(0 <<  7)
+#define CAMDMA_CSDP_SRC_BURST_EN_16		(1 <<  7)
+#define CAMDMA_CSDP_SRC_BURST_EN_32		(2 <<  7)
+#define CAMDMA_CSDP_SRC_BURST_EN_64		(3 <<  7)
+#define CAMDMA_CSDP_SRC_PACKED			(1 <<  6)
+#define CAMDMA_CSDP_RD_ADD_TRSLT		(15 << 2)
+#define CAMDMA_CSDP_RD_ADD_TRSLT_ENABLE_MREQADD	(3 <<  2)
+#define CAMDMA_CSDP_DATA_TYPE			(3 <<  0)
+#define CAMDMA_CSDP_DATA_TYPE_8BITS		(0 <<  0)
+#define CAMDMA_CSDP_DATA_TYPE_16BITS		(1 <<  0)
+#define CAMDMA_CSDP_DATA_TYPE_32BITS		(2 <<  0)
+
+#define CAMMMU_SYSCONFIG_AUTOIDLE		(1 <<  0)
+
+/*
+ *
+ * Declarations.
+ *
+ */
+
+/* forward declarations */
+struct omap24xxcam_sgdma;
+struct omap24xxcam_dma;
+
+typedef void (*sgdma_callback_t)(struct omap24xxcam_sgdma *cam,
+				 u32 status, void *arg);
+typedef void (*dma_callback_t)(struct omap24xxcam_dma *cam,
+			       u32 status, void *arg);
+
+struct channel_state {
+	dma_callback_t callback;
+	void *arg;
+};
+
+/* sgdma state for each of the possible videobuf_buffers + 2 overlays */
+struct sgdma_state {
+	const struct scatterlist *sglist;
+	int sglen;		 /* number of sglist entries */
+	int next_sglist;	 /* index of next sglist entry to process */
+	unsigned int bytes_read; /* number of bytes read */
+	unsigned int len;        /* total length of sglist (excluding
+				  * bytes due to page alignment) */
+	int queued_sglist;	 /* number of sglist entries queued for DMA */
+	u32 csr;		 /* DMA return code */
+	sgdma_callback_t callback;
+	void *arg;
+};
+
+/* physical DMA channel management */
+struct omap24xxcam_dma {
+	spinlock_t lock;	/* Lock for the whole structure. */
+
+	void __iomem *base;	/* base address for dma controller */
+
+	/* While dma_stop!=0, an attempt to start a new DMA transfer will
+	 * fail.
+	 */
+	atomic_t dma_stop;
+	int free_dmach;		/* number of dma channels free */
+	int next_dmach;		/* index of next dma channel to use */
+	struct channel_state ch_state[NUM_CAMDMA_CHANNELS];
+};
+
+/* scatter-gather DMA (scatterlist stuff) management */
+struct omap24xxcam_sgdma {
+	struct omap24xxcam_dma dma;
+
+	spinlock_t lock;	/* Lock for the fields below. */
+	int free_sgdma;		/* number of free sg dma slots */
+	int next_sgdma;		/* index of next sg dma slot to use */
+	struct sgdma_state sg_state[NUM_SG_DMA];
+
+	/* Reset timer data */
+	struct timer_list reset_timer;
+};
+
+/* per-device data structure */
+struct omap24xxcam_device {
+	/*** mutex  ***/
+	/*
+	 * mutex serialises access to this structure. Also camera
+	 * opening and releasing is synchronised by this.
+	 */
+	struct mutex mutex;
+
+	/*** general driver state information ***/
+	atomic_t users;
+	/*
+	 * Lock to serialise core enabling and disabling and access to
+	 * sgdma_in_queue.
+	 */
+	spinlock_t core_enable_disable_lock;
+	/*
+	 * Number or sgdma requests in scatter-gather queue, protected
+	 * by the lock above.
+	 */
+	int sgdma_in_queue;
+	/*
+	 * Sensor interface parameters: interface type, CC_CTRL
+	 * register value and interface specific data.
+	 */
+	int if_type;
+	union {
+		struct parallel {
+			u32 xclk;
+		} bt656;
+	} if_u;
+	u32 cc_ctrl;
+
+	/*** subsystem structures ***/
+	struct omap24xxcam_sgdma sgdma;
+
+	/*** hardware resources ***/
+	unsigned int irq;
+	void __iomem *mmio_base;
+	unsigned long mmio_base_phys;
+	unsigned long mmio_size;
+
+	/*** interfaces and device ***/
+	struct v4l2_int_device *sdev;
+	struct device *dev;
+	struct video_device *vfd;
+
+	/*** camera and sensor reset related stuff ***/
+	struct work_struct sensor_reset_work;
+	/*
+	 * We're in the middle of a reset. Don't enable core if this
+	 * is non-zero! This exists to help decisionmaking in a case
+	 * where videobuf_qbuf is called while we are in the middle of
+	 * a reset.
+	 */
+	atomic_t in_reset;
+	/*
+	 * Non-zero if we don't want any resets for now. Used to
+	 * prevent reset work to run when we're about to stop
+	 * streaming.
+	 */
+	atomic_t reset_disable;
+
+	/*** video device parameters ***/
+	int capture_mem;
+
+	/*** camera module clocks ***/
+	struct clk *fck;
+	struct clk *ick;
+
+	/*** capture data ***/
+	/* file handle, if streaming is on */
+	struct file *streaming;
+};
+
+/* Per-file handle data. */
+struct omap24xxcam_fh {
+	spinlock_t vbq_lock; /* spinlock for the videobuf queue */
+	struct videobuf_queue vbq;
+	struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
+	atomic_t field_count; /* field counter for videobuf_buffer */
+	/* accessing cam here doesn't need serialisation: it's constant */
+	struct omap24xxcam_device *cam;
+};
+
+/*
+ *
+ * Register I/O functions.
+ *
+ */
+
+static inline u32 omap24xxcam_reg_in(u32 __iomem *base, u32 offset)
+{
+	return readl(base + offset);
+}
+
+static inline u32 omap24xxcam_reg_out(u32 __iomem *base, u32 offset,
+					  u32 val)
+{
+	writel(val, base + offset);
+	return val;
+}
+
+static inline u32 omap24xxcam_reg_merge(u32 __iomem *base, u32 offset,
+					    u32 val, u32 mask)
+{
+	u32 __iomem *addr = base + offset;
+	u32 new_val = (readl(addr) & ~mask) | (val & mask);
+
+	writel(new_val, addr);
+	return new_val;
+}
+
+/*
+ *
+ * Function prototypes.
+ *
+ */
+
+/* dma prototypes */
+
+void omap24xxcam_dma_hwinit(struct omap24xxcam_dma *dma);
+void omap24xxcam_dma_isr(struct omap24xxcam_dma *dma);
+
+/* sgdma prototypes */
+
+void omap24xxcam_sgdma_process(struct omap24xxcam_sgdma *sgdma);
+int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
+			    const struct scatterlist *sglist, int sglen,
+			    int len, sgdma_callback_t callback, void *arg);
+void omap24xxcam_sgdma_sync(struct omap24xxcam_sgdma *sgdma);
+void omap24xxcam_sgdma_init(struct omap24xxcam_sgdma *sgdma,
+			    void __iomem *base,
+			    void (*reset_callback)(unsigned long data),
+			    unsigned long reset_callback_data);
+void omap24xxcam_sgdma_exit(struct omap24xxcam_sgdma *sgdma);
+
+#endif
diff --git a/drivers/media/platform/omap3isp/Makefile b/drivers/media/platform/omap3isp/Makefile
new file mode 100644
index 0000000..e8847e7
--- /dev/null
+++ b/drivers/media/platform/omap3isp/Makefile
@@ -0,0 +1,11 @@
+# Makefile for OMAP3 ISP driver
+
+ccflags-$(CONFIG_VIDEO_OMAP3_DEBUG) += -DDEBUG
+
+omap3-isp-objs += \
+	isp.o ispqueue.o ispvideo.o \
+	ispcsiphy.o ispccp2.o ispcsi2.o \
+	ispccdc.o isppreview.o ispresizer.o \
+	ispstat.o isph3a_aewb.o isph3a_af.o isphist.o
+
+obj-$(CONFIG_VIDEO_OMAP3) += omap3-isp.o
diff --git a/drivers/media/platform/omap3isp/cfa_coef_table.h b/drivers/media/platform/omap3isp/cfa_coef_table.h
new file mode 100644
index 0000000..c84df07
--- /dev/null
+++ b/drivers/media/platform/omap3isp/cfa_coef_table.h
@@ -0,0 +1,61 @@
+/*
+ * cfa_coef_table.h
+ *
+ * TI OMAP3 ISP - CFA coefficients table
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+{ 244, 0, 247,   0,  12,  27,  36, 247, 250,   0,  27,   0,   4, 250,  12, 244,
+248,   0,   0,   0,   0,  40,   0,   0, 244,  12, 250,   4,   0,  27,   0, 250,
+247,  36,  27,  12,   0, 247,   0, 244,   0,   0,  40,   0,   0,   0,   0, 248,
+244,   0, 247,   0,  12,  27,  36, 247, 250,   0,  27,   0,   4, 250,  12, 244,
+248,   0,   0,   0,   0,  40,   0,   0, 244,  12, 250,   4,   0,  27,   0, 250,
+247,  36,  27,  12,   0, 247,   0, 244,   0,   0,  40,   0,   0,   0,   0, 248,
+244,   0, 247,   0,  12,  27,  36, 247, 250,   0,  27,   0,   4, 250,  12, 244,
+248,   0,   0,   0,   0,  40,   0,   0, 244,  12, 250,   4,   0,  27,   0, 250,
+247,  36,  27,  12,   0, 247,   0, 244,   0,   0,  40,   0,   0,   0,   0, 248 },
+{ 0, 247,   0, 244, 247,  36,  27,  12,   0,  27,   0, 250, 244,  12, 250,   4,
+  0,   0,   0, 248,   0,   0,  40,   0,   4, 250,  12, 244, 250,   0,  27,   0,
+ 12,  27,  36, 247, 244,   0, 247,   0,   0,  40,   0,   0, 248,   0,   0,   0,
+  0, 247,   0, 244, 247,  36,  27,  12,   0,  27,   0, 250, 244,  12, 250,   4,
+  0,   0,   0, 248,   0,   0,  40,   0,   4, 250,  12, 244, 250,   0,  27,   0,
+ 12,  27,  36, 247, 244,   0, 247,   0,   0,  40,   0,   0, 248,   0,   0,   0,
+  0, 247,   0, 244, 247,  36,  27,  12,   0,  27,   0, 250, 244,  12, 250,   4,
+  0,   0,   0, 248,   0,   0,  40,   0,   4, 250,  12, 244, 250,   0,  27,   0,
+ 12,  27,  36, 247, 244,   0, 247,   0,   0,  40,   0,   0, 248,   0,   0,   0 },
+{ 4, 250,  12, 244, 250,   0,  27,   0,  12,  27,  36, 247, 244,   0, 247,   0,
+  0,   0,   0, 248,   0,   0,  40,   0,   0, 247,   0, 244, 247,  36,  27,  12,
+  0,  27,   0, 250, 244,  12, 250,   4,   0,  40,   0,   0, 248,   0,   0,   0,
+  4, 250,  12, 244, 250,   0,  27,   0,  12,  27,  36, 247, 244,   0, 247,   0,
+  0,   0,   0, 248,   0,   0,  40,   0,   0, 247,   0, 244, 247,  36,  27,  12,
+  0,  27,   0, 250, 244,  12, 250,   4,   0,  40,   0,   0, 248,   0,   0,   0,
+  4, 250,  12, 244, 250,   0,  27,   0,  12,  27,  36, 247, 244,   0, 247,   0,
+  0,   0,   0, 248,   0,   0,  40,   0,   0, 247,   0, 244, 247,  36,  27,  12,
+  0,  27,   0, 250, 244,  12, 250,   4,   0,  40,   0,   0, 248,   0,   0,   0 },
+{ 244,12, 250,   4,   0,  27,   0, 250, 247,  36,  27,  12,   0, 247,   0, 244,
+248,   0,   0,   0,   0,  40,   0,   0, 244,   0, 247,   0,  12,  27,  36, 247,
+250,   0,  27,   0,   4, 250,  12, 244,   0,   0,  40,   0,   0,   0,   0, 248,
+244,  12, 250,   4,   0,  27,   0, 250, 247,  36,  27,  12,   0, 247,   0, 244,
+248,   0,   0,   0,   0,  40,   0,   0, 244,   0, 247,   0,  12,  27,  36, 247,
+250,   0,  27,   0,   4, 250,  12, 244,   0,   0,  40,   0,   0,   0,   0, 248,
+244,  12, 250,   4,   0,  27,   0, 250, 247,  36,  27,  12,   0, 247,   0, 244,
+248,   0,   0,   0,   0,  40,   0,   0, 244,   0, 247,   0,  12,  27,  36, 247,
+250,   0,  27,   0,   4, 250,  12, 244,   0,   0,  40,   0,   0,   0,   0, 248 },
diff --git a/drivers/media/platform/omap3isp/gamma_table.h b/drivers/media/platform/omap3isp/gamma_table.h
new file mode 100644
index 0000000..78deebf
--- /dev/null
+++ b/drivers/media/platform/omap3isp/gamma_table.h
@@ -0,0 +1,90 @@
+/*
+ * gamma_table.h
+ *
+ * TI OMAP3 ISP - Default gamma table for all components
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+  0,   0,   1,   2,   3,   3,   4,   5,   6,   8,  10,  12,  14,  16,  18,  20,
+ 22,  23,  25,  26,  28,  29,  31,  32,  34,  35,  36,  37,  39,  40,  41,  42,
+ 43,  44,  45,  46,  47,  48,  49,  50,  51,  52,  52,  53,  54,  55,  56,  57,
+ 58,  59,  60,  61,  62,  63,  63,  64,  65,  66,  66,  67,  68,  69,  69,  70,
+ 71,  72,  72,  73,  74,  75,  75,  76,  77,  78,  78,  79,  80,  81,  81,  82,
+ 83,  84,  84,  85,  86,  87,  88,  88,  89,  90,  91,  91,  92,  93,  94,  94,
+ 95,  96,  97,  97,  98,  98,  99,  99, 100, 100, 101, 101, 102, 103, 104, 104,
+105, 106, 107, 108, 108, 109, 110, 111, 111, 112, 113, 114, 114, 115, 116, 117,
+117, 118, 119, 119, 120, 120, 121, 121, 122, 122, 123, 123, 124, 124, 125, 125,
+126, 126, 127, 127, 128, 128, 129, 129, 130, 130, 131, 131, 132, 132, 133, 133,
+134, 134, 135, 135, 136, 136, 137, 137, 138, 138, 139, 139, 140, 140, 141, 141,
+142, 142, 143, 143, 144, 144, 145, 145, 146, 146, 147, 147, 148, 148, 149, 149,
+150, 150, 151, 151, 152, 152, 153, 153, 153, 153, 154, 154, 154, 154, 155, 155,
+156, 156, 157, 157, 158, 158, 158, 159, 159, 159, 160, 160, 160, 161, 161, 162,
+162, 163, 163, 164, 164, 164, 164, 165, 165, 165, 165, 166, 166, 167, 167, 168,
+168, 169, 169, 170, 170, 170, 170, 171, 171, 171, 171, 172, 172, 173, 173, 174,
+174, 175, 175, 176, 176, 176, 176, 177, 177, 177, 177, 178, 178, 178, 178, 179,
+179, 179, 179, 180, 180, 180, 180, 181, 181, 181, 181, 182, 182, 182, 182, 183,
+183, 183, 183, 184, 184, 184, 184, 185, 185, 185, 185, 186, 186, 186, 186, 187,
+187, 187, 187, 188, 188, 188, 188, 189, 189, 189, 189, 190, 190, 190, 190, 191,
+191, 191, 191, 192, 192, 192, 192, 193, 193, 193, 193, 194, 194, 194, 194, 195,
+195, 195, 195, 196, 196, 196, 196, 197, 197, 197, 197, 198, 198, 198, 198, 199,
+199, 199, 199, 200, 200, 200, 200, 201, 201, 201, 201, 202, 202, 202, 203, 203,
+203, 203, 204, 204, 204, 204, 205, 205, 205, 205, 206, 206, 206, 206, 207, 207,
+207, 207, 208, 208, 208, 208, 209, 209, 209, 209, 210, 210, 210, 210, 210, 210,
+210, 210, 210, 210, 210, 210, 211, 211, 211, 211, 211, 211, 211, 211, 211, 211,
+211, 212, 212, 212, 212, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213,
+213, 214, 214, 214, 214, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215,
+216, 216, 216, 216, 217, 217, 217, 217, 218, 218, 218, 218, 219, 219, 219, 219,
+219, 219, 219, 219, 219, 219, 219, 219, 220, 220, 220, 220, 221, 221, 221, 221,
+221, 221, 221, 221, 221, 221, 221, 222, 222, 222, 222, 223, 223, 223, 223, 223,
+223, 223, 223, 223, 223, 223, 223, 224, 224, 224, 224, 225, 225, 225, 225, 225,
+225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 226, 226,
+226, 226, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 228, 228,
+228, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 230, 230, 230,
+230, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 232, 232, 232,
+232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232,
+233, 233, 233, 233, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 235,
+235, 235, 235, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236,
+236, 236, 236, 236, 236, 236, 237, 237, 237, 237, 238, 238, 238, 238, 238, 238,
+238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238,
+238, 238, 238, 238, 238, 239, 239, 239, 239, 240, 240, 240, 240, 240, 240, 240,
+240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240,
+240, 240, 240, 240, 241, 241, 241, 241, 242, 242, 242, 242, 242, 242, 242, 242,
+242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242,
+242, 242, 243, 243, 243, 243, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244,
+244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244,
+244, 245, 245, 245, 245, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246,
+246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246,
+246, 246, 246, 246, 246, 246, 246, 247, 247, 247, 247, 248, 248, 248, 248, 248,
+248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248,
+248, 248, 248, 248, 248, 248, 249, 249, 249, 249, 250, 250, 250, 250, 250, 250,
+250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250,
+250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250,
+250, 250, 250, 250, 251, 251, 251, 251, 252, 252, 252, 252, 252, 252, 252, 252,
+252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
+252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
+252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
+252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, 253, 253,
+253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253,
+253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253,
+253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253,
+253, 254, 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
new file mode 100644
index 0000000..e0096e0
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -0,0 +1,2241 @@
+/*
+ * isp.c
+ *
+ * TI OMAP3 ISP - Core
+ *
+ * Copyright (C) 2006-2010 Nokia Corporation
+ * Copyright (C) 2007-2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * Contributors:
+ *	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	Sakari Ailus <sakari.ailus@iki.fi>
+ *	David Cohen <dacohen@gmail.com>
+ *	Stanimir Varbanov <svarbanov@mm-sol.com>
+ *	Vimarsh Zutshi <vimarsh.zutshi@gmail.com>
+ *	Tuukka Toivonen <tuukkat76@gmail.com>
+ *	Sergio Aguirre <saaguirre@ti.com>
+ *	Antti Koskipaa <akoskipa@gmail.com>
+ *	Ivan T. Ivanov <iivanov@mm-sol.com>
+ *	RaniSuneela <r-m@ti.com>
+ *	Atanas Filipov <afilipov@mm-sol.com>
+ *	Gjorgji Rosikopulos <grosikopulos@mm-sol.com>
+ *	Hiroshi DOYU <hiroshi.doyu@nokia.com>
+ *	Nayden Kanchev <nkanchev@mm-sol.com>
+ *	Phil Carmody <ext-phil.2.carmody@nokia.com>
+ *	Artem Bityutskiy <artem.bityutskiy@nokia.com>
+ *	Dominic Curran <dcurran@ti.com>
+ *	Ilkka Myllyperkio <ilkka.myllyperkio@sofica.fi>
+ *	Pallavi Kulkarni <p-kulkarni@ti.com>
+ *	Vaibhav Hiremath <hvaibhav@ti.com>
+ *	Mohit Jalori <mjalori@ti.com>
+ *	Sameer Venkatraman <sameerv@ti.com>
+ *	Senthilvadivu Guruswamy <svadivu@ti.com>
+ *	Thara Gopinath <thara@ti.com>
+ *	Toni Leinonen <toni.leinonen@nokia.com>
+ *	Troy Laramy <t-laramy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <asm/cacheflush.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispccdc.h"
+#include "isppreview.h"
+#include "ispresizer.h"
+#include "ispcsi2.h"
+#include "ispccp2.h"
+#include "isph3a.h"
+#include "isphist.h"
+
+static unsigned int autoidle;
+module_param(autoidle, int, 0444);
+MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support");
+
+static void isp_save_ctx(struct isp_device *isp);
+
+static void isp_restore_ctx(struct isp_device *isp);
+
+static const struct isp_res_mapping isp_res_maps[] = {
+	{
+		.isp_rev = ISP_REVISION_2_0,
+		.map = 1 << OMAP3_ISP_IOMEM_MAIN |
+		       1 << OMAP3_ISP_IOMEM_CCP2 |
+		       1 << OMAP3_ISP_IOMEM_CCDC |
+		       1 << OMAP3_ISP_IOMEM_HIST |
+		       1 << OMAP3_ISP_IOMEM_H3A |
+		       1 << OMAP3_ISP_IOMEM_PREV |
+		       1 << OMAP3_ISP_IOMEM_RESZ |
+		       1 << OMAP3_ISP_IOMEM_SBL |
+		       1 << OMAP3_ISP_IOMEM_CSI2A_REGS1 |
+		       1 << OMAP3_ISP_IOMEM_CSIPHY2,
+	},
+	{
+		.isp_rev = ISP_REVISION_15_0,
+		.map = 1 << OMAP3_ISP_IOMEM_MAIN |
+		       1 << OMAP3_ISP_IOMEM_CCP2 |
+		       1 << OMAP3_ISP_IOMEM_CCDC |
+		       1 << OMAP3_ISP_IOMEM_HIST |
+		       1 << OMAP3_ISP_IOMEM_H3A |
+		       1 << OMAP3_ISP_IOMEM_PREV |
+		       1 << OMAP3_ISP_IOMEM_RESZ |
+		       1 << OMAP3_ISP_IOMEM_SBL |
+		       1 << OMAP3_ISP_IOMEM_CSI2A_REGS1 |
+		       1 << OMAP3_ISP_IOMEM_CSIPHY2 |
+		       1 << OMAP3_ISP_IOMEM_CSI2A_REGS2 |
+		       1 << OMAP3_ISP_IOMEM_CSI2C_REGS1 |
+		       1 << OMAP3_ISP_IOMEM_CSIPHY1 |
+		       1 << OMAP3_ISP_IOMEM_CSI2C_REGS2,
+	},
+};
+
+/* Structure for saving/restoring ISP module registers */
+static struct isp_reg isp_reg_list[] = {
+	{OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG, 0},
+	{OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 0},
+	{OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, 0},
+	{0, ISP_TOK_TERM, 0}
+};
+
+/*
+ * omap3isp_flush - Post pending L3 bus writes by doing a register readback
+ * @isp: OMAP3 ISP device
+ *
+ * In order to force posting of pending writes, we need to write and
+ * readback the same register, in this case the revision register.
+ *
+ * See this link for reference:
+ *   http://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
+ */
+void omap3isp_flush(struct isp_device *isp)
+{
+	isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
+	isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
+}
+
+/*
+ * isp_enable_interrupts - Enable ISP interrupts.
+ * @isp: OMAP3 ISP device
+ */
+static void isp_enable_interrupts(struct isp_device *isp)
+{
+	static const u32 irq = IRQ0ENABLE_CSIA_IRQ
+			     | IRQ0ENABLE_CSIB_IRQ
+			     | IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ
+			     | IRQ0ENABLE_CCDC_LSC_DONE_IRQ
+			     | IRQ0ENABLE_CCDC_VD0_IRQ
+			     | IRQ0ENABLE_CCDC_VD1_IRQ
+			     | IRQ0ENABLE_HS_VS_IRQ
+			     | IRQ0ENABLE_HIST_DONE_IRQ
+			     | IRQ0ENABLE_H3A_AWB_DONE_IRQ
+			     | IRQ0ENABLE_H3A_AF_DONE_IRQ
+			     | IRQ0ENABLE_PRV_DONE_IRQ
+			     | IRQ0ENABLE_RSZ_DONE_IRQ;
+
+	isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+	isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE);
+}
+
+/*
+ * isp_disable_interrupts - Disable ISP interrupts.
+ * @isp: OMAP3 ISP device
+ */
+static void isp_disable_interrupts(struct isp_device *isp)
+{
+	isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE);
+}
+
+/**
+ * isp_set_xclk - Configures the specified cam_xclk to the desired frequency.
+ * @isp: OMAP3 ISP device
+ * @xclk: Desired frequency of the clock in Hz. 0 = stable low, 1 is stable high
+ * @xclksel: XCLK to configure (0 = A, 1 = B).
+ *
+ * Configures the specified MCLK divisor in the ISP timing control register
+ * (TCTRL_CTRL) to generate the desired xclk clock value.
+ *
+ * Divisor = cam_mclk_hz / xclk
+ *
+ * Returns the final frequency that is actually being generated
+ **/
+static u32 isp_set_xclk(struct isp_device *isp, u32 xclk, u8 xclksel)
+{
+	u32 divisor;
+	u32 currentxclk;
+	unsigned long mclk_hz;
+
+	if (!omap3isp_get(isp))
+		return 0;
+
+	mclk_hz = clk_get_rate(isp->clock[ISP_CLK_CAM_MCLK]);
+
+	if (xclk >= mclk_hz) {
+		divisor = ISPTCTRL_CTRL_DIV_BYPASS;
+		currentxclk = mclk_hz;
+	} else if (xclk >= 2) {
+		divisor = mclk_hz / xclk;
+		if (divisor >= ISPTCTRL_CTRL_DIV_BYPASS)
+			divisor = ISPTCTRL_CTRL_DIV_BYPASS - 1;
+		currentxclk = mclk_hz / divisor;
+	} else {
+		divisor = xclk;
+		currentxclk = 0;
+	}
+
+	switch (xclksel) {
+	case ISP_XCLK_A:
+		isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
+				ISPTCTRL_CTRL_DIVA_MASK,
+				divisor << ISPTCTRL_CTRL_DIVA_SHIFT);
+		dev_dbg(isp->dev, "isp_set_xclk(): cam_xclka set to %d Hz\n",
+			currentxclk);
+		break;
+	case ISP_XCLK_B:
+		isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
+				ISPTCTRL_CTRL_DIVB_MASK,
+				divisor << ISPTCTRL_CTRL_DIVB_SHIFT);
+		dev_dbg(isp->dev, "isp_set_xclk(): cam_xclkb set to %d Hz\n",
+			currentxclk);
+		break;
+	case ISP_XCLK_NONE:
+	default:
+		omap3isp_put(isp);
+		dev_dbg(isp->dev, "ISP_ERR: isp_set_xclk(): Invalid requested "
+			"xclk. Must be 0 (A) or 1 (B).\n");
+		return -EINVAL;
+	}
+
+	/* Do we go from stable whatever to clock? */
+	if (divisor >= 2 && isp->xclk_divisor[xclksel - 1] < 2)
+		omap3isp_get(isp);
+	/* Stopping the clock. */
+	else if (divisor < 2 && isp->xclk_divisor[xclksel - 1] >= 2)
+		omap3isp_put(isp);
+
+	isp->xclk_divisor[xclksel - 1] = divisor;
+
+	omap3isp_put(isp);
+
+	return currentxclk;
+}
+
+/*
+ * isp_core_init - ISP core settings
+ * @isp: OMAP3 ISP device
+ * @idle: Consider idle state.
+ *
+ * Set the power settings for the ISP and SBL bus and cConfigure the HS/VS
+ * interrupt source.
+ *
+ * We need to configure the HS/VS interrupt source before interrupts get
+ * enabled, as the sensor might be free-running and the ISP default setting
+ * (HS edge) would put an unnecessary burden on the CPU.
+ */
+static void isp_core_init(struct isp_device *isp, int idle)
+{
+	isp_reg_writel(isp,
+		       ((idle ? ISP_SYSCONFIG_MIDLEMODE_SMARTSTANDBY :
+				ISP_SYSCONFIG_MIDLEMODE_FORCESTANDBY) <<
+			ISP_SYSCONFIG_MIDLEMODE_SHIFT) |
+			((isp->revision == ISP_REVISION_15_0) ?
+			  ISP_SYSCONFIG_AUTOIDLE : 0),
+		       OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
+
+	isp_reg_writel(isp,
+		       (isp->autoidle ? ISPCTRL_SBL_AUTOIDLE : 0) |
+		       ISPCTRL_SYNC_DETECT_VSRISE,
+		       OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
+}
+
+/*
+ * Configure the bridge and lane shifter. Valid inputs are
+ *
+ * CCDC_INPUT_PARALLEL: Parallel interface
+ * CCDC_INPUT_CSI2A: CSI2a receiver
+ * CCDC_INPUT_CCP2B: CCP2b receiver
+ * CCDC_INPUT_CSI2C: CSI2c receiver
+ *
+ * The bridge and lane shifter are configured according to the selected input
+ * and the ISP platform data.
+ */
+void omap3isp_configure_bridge(struct isp_device *isp,
+			       enum ccdc_input_entity input,
+			       const struct isp_parallel_platform_data *pdata,
+			       unsigned int shift, unsigned int bridge)
+{
+	u32 ispctrl_val;
+
+	ispctrl_val  = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
+	ispctrl_val &= ~ISPCTRL_SHIFT_MASK;
+	ispctrl_val &= ~ISPCTRL_PAR_CLK_POL_INV;
+	ispctrl_val &= ~ISPCTRL_PAR_SER_CLK_SEL_MASK;
+	ispctrl_val &= ~ISPCTRL_PAR_BRIDGE_MASK;
+	ispctrl_val |= bridge;
+
+	switch (input) {
+	case CCDC_INPUT_PARALLEL:
+		ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL;
+		ispctrl_val |= pdata->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT;
+		shift += pdata->data_lane_shift * 2;
+		break;
+
+	case CCDC_INPUT_CSI2A:
+		ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIA;
+		break;
+
+	case CCDC_INPUT_CCP2B:
+		ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIB;
+		break;
+
+	case CCDC_INPUT_CSI2C:
+		ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIC;
+		break;
+
+	default:
+		return;
+	}
+
+	ispctrl_val |= ((shift/2) << ISPCTRL_SHIFT_SHIFT) & ISPCTRL_SHIFT_MASK;
+
+	isp_reg_writel(isp, ispctrl_val, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
+}
+
+void omap3isp_hist_dma_done(struct isp_device *isp)
+{
+	if (omap3isp_ccdc_busy(&isp->isp_ccdc) ||
+	    omap3isp_stat_pcr_busy(&isp->isp_hist)) {
+		/* Histogram cannot be enabled in this frame anymore */
+		atomic_set(&isp->isp_hist.buf_err, 1);
+		dev_dbg(isp->dev, "hist: Out of synchronization with "
+				  "CCDC. Ignoring next buffer.\n");
+	}
+}
+
+static inline void isp_isr_dbg(struct isp_device *isp, u32 irqstatus)
+{
+	static const char *name[] = {
+		"CSIA_IRQ",
+		"res1",
+		"res2",
+		"CSIB_LCM_IRQ",
+		"CSIB_IRQ",
+		"res5",
+		"res6",
+		"res7",
+		"CCDC_VD0_IRQ",
+		"CCDC_VD1_IRQ",
+		"CCDC_VD2_IRQ",
+		"CCDC_ERR_IRQ",
+		"H3A_AF_DONE_IRQ",
+		"H3A_AWB_DONE_IRQ",
+		"res14",
+		"res15",
+		"HIST_DONE_IRQ",
+		"CCDC_LSC_DONE",
+		"CCDC_LSC_PREFETCH_COMPLETED",
+		"CCDC_LSC_PREFETCH_ERROR",
+		"PRV_DONE_IRQ",
+		"CBUFF_IRQ",
+		"res22",
+		"res23",
+		"RSZ_DONE_IRQ",
+		"OVF_IRQ",
+		"res26",
+		"res27",
+		"MMU_ERR_IRQ",
+		"OCP_ERR_IRQ",
+		"SEC_ERR_IRQ",
+		"HS_VS_IRQ",
+	};
+	int i;
+
+	dev_dbg(isp->dev, "ISP IRQ: ");
+
+	for (i = 0; i < ARRAY_SIZE(name); i++) {
+		if ((1 << i) & irqstatus)
+			printk(KERN_CONT "%s ", name[i]);
+	}
+	printk(KERN_CONT "\n");
+}
+
+static void isp_isr_sbl(struct isp_device *isp)
+{
+	struct device *dev = isp->dev;
+	struct isp_pipeline *pipe;
+	u32 sbl_pcr;
+
+	/*
+	 * Handle shared buffer logic overflows for video buffers.
+	 * ISPSBL_PCR_CCDCPRV_2_RSZ_OVF can be safely ignored.
+	 */
+	sbl_pcr = isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR);
+	isp_reg_writel(isp, sbl_pcr, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR);
+	sbl_pcr &= ~ISPSBL_PCR_CCDCPRV_2_RSZ_OVF;
+
+	if (sbl_pcr)
+		dev_dbg(dev, "SBL overflow (PCR = 0x%08x)\n", sbl_pcr);
+
+	if (sbl_pcr & ISPSBL_PCR_CSIB_WBL_OVF) {
+		pipe = to_isp_pipeline(&isp->isp_ccp2.subdev.entity);
+		if (pipe != NULL)
+			pipe->error = true;
+	}
+
+	if (sbl_pcr & ISPSBL_PCR_CSIA_WBL_OVF) {
+		pipe = to_isp_pipeline(&isp->isp_csi2a.subdev.entity);
+		if (pipe != NULL)
+			pipe->error = true;
+	}
+
+	if (sbl_pcr & ISPSBL_PCR_CCDC_WBL_OVF) {
+		pipe = to_isp_pipeline(&isp->isp_ccdc.subdev.entity);
+		if (pipe != NULL)
+			pipe->error = true;
+	}
+
+	if (sbl_pcr & ISPSBL_PCR_PRV_WBL_OVF) {
+		pipe = to_isp_pipeline(&isp->isp_prev.subdev.entity);
+		if (pipe != NULL)
+			pipe->error = true;
+	}
+
+	if (sbl_pcr & (ISPSBL_PCR_RSZ1_WBL_OVF
+		       | ISPSBL_PCR_RSZ2_WBL_OVF
+		       | ISPSBL_PCR_RSZ3_WBL_OVF
+		       | ISPSBL_PCR_RSZ4_WBL_OVF)) {
+		pipe = to_isp_pipeline(&isp->isp_res.subdev.entity);
+		if (pipe != NULL)
+			pipe->error = true;
+	}
+
+	if (sbl_pcr & ISPSBL_PCR_H3A_AF_WBL_OVF)
+		omap3isp_stat_sbl_overflow(&isp->isp_af);
+
+	if (sbl_pcr & ISPSBL_PCR_H3A_AEAWB_WBL_OVF)
+		omap3isp_stat_sbl_overflow(&isp->isp_aewb);
+}
+
+/*
+ * isp_isr - Interrupt Service Routine for Camera ISP module.
+ * @irq: Not used currently.
+ * @_isp: Pointer to the OMAP3 ISP device
+ *
+ * Handles the corresponding callback if plugged in.
+ *
+ * Returns IRQ_HANDLED when IRQ was correctly handled, or IRQ_NONE when the
+ * IRQ wasn't handled.
+ */
+static irqreturn_t isp_isr(int irq, void *_isp)
+{
+	static const u32 ccdc_events = IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ |
+				       IRQ0STATUS_CCDC_LSC_DONE_IRQ |
+				       IRQ0STATUS_CCDC_VD0_IRQ |
+				       IRQ0STATUS_CCDC_VD1_IRQ |
+				       IRQ0STATUS_HS_VS_IRQ;
+	struct isp_device *isp = _isp;
+	u32 irqstatus;
+
+	irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+	isp_reg_writel(isp, irqstatus, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+
+	isp_isr_sbl(isp);
+
+	if (irqstatus & IRQ0STATUS_CSIA_IRQ)
+		omap3isp_csi2_isr(&isp->isp_csi2a);
+
+	if (irqstatus & IRQ0STATUS_CSIB_IRQ)
+		omap3isp_ccp2_isr(&isp->isp_ccp2);
+
+	if (irqstatus & IRQ0STATUS_CCDC_VD0_IRQ) {
+		if (isp->isp_ccdc.output & CCDC_OUTPUT_PREVIEW)
+			omap3isp_preview_isr_frame_sync(&isp->isp_prev);
+		if (isp->isp_ccdc.output & CCDC_OUTPUT_RESIZER)
+			omap3isp_resizer_isr_frame_sync(&isp->isp_res);
+		omap3isp_stat_isr_frame_sync(&isp->isp_aewb);
+		omap3isp_stat_isr_frame_sync(&isp->isp_af);
+		omap3isp_stat_isr_frame_sync(&isp->isp_hist);
+	}
+
+	if (irqstatus & ccdc_events)
+		omap3isp_ccdc_isr(&isp->isp_ccdc, irqstatus & ccdc_events);
+
+	if (irqstatus & IRQ0STATUS_PRV_DONE_IRQ) {
+		if (isp->isp_prev.output & PREVIEW_OUTPUT_RESIZER)
+			omap3isp_resizer_isr_frame_sync(&isp->isp_res);
+		omap3isp_preview_isr(&isp->isp_prev);
+	}
+
+	if (irqstatus & IRQ0STATUS_RSZ_DONE_IRQ)
+		omap3isp_resizer_isr(&isp->isp_res);
+
+	if (irqstatus & IRQ0STATUS_H3A_AWB_DONE_IRQ)
+		omap3isp_stat_isr(&isp->isp_aewb);
+
+	if (irqstatus & IRQ0STATUS_H3A_AF_DONE_IRQ)
+		omap3isp_stat_isr(&isp->isp_af);
+
+	if (irqstatus & IRQ0STATUS_HIST_DONE_IRQ)
+		omap3isp_stat_isr(&isp->isp_hist);
+
+	omap3isp_flush(isp);
+
+#if defined(DEBUG) && defined(ISP_ISR_DEBUG)
+	isp_isr_dbg(isp, irqstatus);
+#endif
+
+	return IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline power management
+ *
+ * Entities must be powered up when part of a pipeline that contains at least
+ * one open video device node.
+ *
+ * To achieve this use the entity use_count field to track the number of users.
+ * For entities corresponding to video device nodes the use_count field stores
+ * the users count of the node. For entities corresponding to subdevs the
+ * use_count field stores the total number of users of all video device nodes
+ * in the pipeline.
+ *
+ * The omap3isp_pipeline_pm_use() function must be called in the open() and
+ * close() handlers of video device nodes. It increments or decrements the use
+ * count of all subdev entities in the pipeline.
+ *
+ * To react to link management on powered pipelines, the link setup notification
+ * callback updates the use count of all entities in the source and sink sides
+ * of the link.
+ */
+
+/*
+ * isp_pipeline_pm_use_count - Count the number of users of a pipeline
+ * @entity: The entity
+ *
+ * Return the total number of users of all video device nodes in the pipeline.
+ */
+static int isp_pipeline_pm_use_count(struct media_entity *entity)
+{
+	struct media_entity_graph graph;
+	int use = 0;
+
+	media_entity_graph_walk_start(&graph, entity);
+
+	while ((entity = media_entity_graph_walk_next(&graph))) {
+		if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
+			use += entity->use_count;
+	}
+
+	return use;
+}
+
+/*
+ * isp_pipeline_pm_power_one - Apply power change to an entity
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Change the entity use count by @change. If the entity is a subdev update its
+ * power state by calling the core::s_power operation when the use count goes
+ * from 0 to != 0 or from != 0 to 0.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int isp_pipeline_pm_power_one(struct media_entity *entity, int change)
+{
+	struct v4l2_subdev *subdev;
+	int ret;
+
+	subdev = media_entity_type(entity) == MEDIA_ENT_T_V4L2_SUBDEV
+	       ? media_entity_to_v4l2_subdev(entity) : NULL;
+
+	if (entity->use_count == 0 && change > 0 && subdev != NULL) {
+		ret = v4l2_subdev_call(subdev, core, s_power, 1);
+		if (ret < 0 && ret != -ENOIOCTLCMD)
+			return ret;
+	}
+
+	entity->use_count += change;
+	WARN_ON(entity->use_count < 0);
+
+	if (entity->use_count == 0 && change < 0 && subdev != NULL)
+		v4l2_subdev_call(subdev, core, s_power, 0);
+
+	return 0;
+}
+
+/*
+ * isp_pipeline_pm_power - Apply power change to all entities in a pipeline
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Walk the pipeline to update the use count and the power state of all non-node
+ * entities.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int isp_pipeline_pm_power(struct media_entity *entity, int change)
+{
+	struct media_entity_graph graph;
+	struct media_entity *first = entity;
+	int ret = 0;
+
+	if (!change)
+		return 0;
+
+	media_entity_graph_walk_start(&graph, entity);
+
+	while (!ret && (entity = media_entity_graph_walk_next(&graph)))
+		if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
+			ret = isp_pipeline_pm_power_one(entity, change);
+
+	if (!ret)
+		return 0;
+
+	media_entity_graph_walk_start(&graph, first);
+
+	while ((first = media_entity_graph_walk_next(&graph))
+	       && first != entity)
+		if (media_entity_type(first) != MEDIA_ENT_T_DEVNODE)
+			isp_pipeline_pm_power_one(first, -change);
+
+	return ret;
+}
+
+/*
+ * omap3isp_pipeline_pm_use - Update the use count of an entity
+ * @entity: The entity
+ * @use: Use (1) or stop using (0) the entity
+ *
+ * Update the use count of all entities in the pipeline and power entities on or
+ * off accordingly.
+ *
+ * Return 0 on success or a negative error code on failure. Powering entities
+ * off is assumed to never fail. No failure can occur when the use parameter is
+ * set to 0.
+ */
+int omap3isp_pipeline_pm_use(struct media_entity *entity, int use)
+{
+	int change = use ? 1 : -1;
+	int ret;
+
+	mutex_lock(&entity->parent->graph_mutex);
+
+	/* Apply use count to node. */
+	entity->use_count += change;
+	WARN_ON(entity->use_count < 0);
+
+	/* Apply power change to connected non-nodes. */
+	ret = isp_pipeline_pm_power(entity, change);
+	if (ret < 0)
+		entity->use_count -= change;
+
+	mutex_unlock(&entity->parent->graph_mutex);
+
+	return ret;
+}
+
+/*
+ * isp_pipeline_link_notify - Link management notification callback
+ * @source: Pad at the start of the link
+ * @sink: Pad at the end of the link
+ * @flags: New link flags that will be applied
+ *
+ * React to link management on powered pipelines by updating the use count of
+ * all entities in the source and sink sides of the link. Entities are powered
+ * on or off accordingly.
+ *
+ * Return 0 on success or a negative error code on failure. Powering entities
+ * off is assumed to never fail. This function will not fail for disconnection
+ * events.
+ */
+static int isp_pipeline_link_notify(struct media_pad *source,
+				    struct media_pad *sink, u32 flags)
+{
+	int source_use = isp_pipeline_pm_use_count(source->entity);
+	int sink_use = isp_pipeline_pm_use_count(sink->entity);
+	int ret;
+
+	if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+		/* Powering off entities is assumed to never fail. */
+		isp_pipeline_pm_power(source->entity, -sink_use);
+		isp_pipeline_pm_power(sink->entity, -source_use);
+		return 0;
+	}
+
+	ret = isp_pipeline_pm_power(source->entity, sink_use);
+	if (ret < 0)
+		return ret;
+
+	ret = isp_pipeline_pm_power(sink->entity, source_use);
+	if (ret < 0)
+		isp_pipeline_pm_power(source->entity, -sink_use);
+
+	return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline stream management
+ */
+
+/*
+ * isp_pipeline_enable - Enable streaming on a pipeline
+ * @pipe: ISP pipeline
+ * @mode: Stream mode (single shot or continuous)
+ *
+ * Walk the entities chain starting at the pipeline output video node and start
+ * all modules in the chain in the given mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int isp_pipeline_enable(struct isp_pipeline *pipe,
+			       enum isp_pipeline_stream_state mode)
+{
+	struct isp_device *isp = pipe->output->isp;
+	struct media_entity *entity;
+	struct media_pad *pad;
+	struct v4l2_subdev *subdev;
+	unsigned long flags;
+	int ret;
+
+	/* If the preview engine crashed it might not respond to read/write
+	 * operations on the L4 bus. This would result in a bus fault and a
+	 * kernel oops. Refuse to start streaming in that case. This check must
+	 * be performed before the loop below to avoid starting entities if the
+	 * pipeline won't start anyway (those entities would then likely fail to
+	 * stop, making the problem worse).
+	 */
+	if ((pipe->entities & isp->crashed) &
+	    (1U << isp->isp_prev.subdev.entity.id))
+		return -EIO;
+
+	spin_lock_irqsave(&pipe->lock, flags);
+	pipe->state &= ~(ISP_PIPELINE_IDLE_INPUT | ISP_PIPELINE_IDLE_OUTPUT);
+	spin_unlock_irqrestore(&pipe->lock, flags);
+
+	pipe->do_propagation = false;
+
+	entity = &pipe->output->video.entity;
+	while (1) {
+		pad = &entity->pads[0];
+		if (!(pad->flags & MEDIA_PAD_FL_SINK))
+			break;
+
+		pad = media_entity_remote_source(pad);
+		if (pad == NULL ||
+		    media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+			break;
+
+		entity = pad->entity;
+		subdev = media_entity_to_v4l2_subdev(entity);
+
+		ret = v4l2_subdev_call(subdev, video, s_stream, mode);
+		if (ret < 0 && ret != -ENOIOCTLCMD)
+			return ret;
+
+		if (subdev == &isp->isp_ccdc.subdev) {
+			v4l2_subdev_call(&isp->isp_aewb.subdev, video,
+					s_stream, mode);
+			v4l2_subdev_call(&isp->isp_af.subdev, video,
+					s_stream, mode);
+			v4l2_subdev_call(&isp->isp_hist.subdev, video,
+					s_stream, mode);
+			pipe->do_propagation = true;
+		}
+	}
+
+	return 0;
+}
+
+static int isp_pipeline_wait_resizer(struct isp_device *isp)
+{
+	return omap3isp_resizer_busy(&isp->isp_res);
+}
+
+static int isp_pipeline_wait_preview(struct isp_device *isp)
+{
+	return omap3isp_preview_busy(&isp->isp_prev);
+}
+
+static int isp_pipeline_wait_ccdc(struct isp_device *isp)
+{
+	return omap3isp_stat_busy(&isp->isp_af)
+	    || omap3isp_stat_busy(&isp->isp_aewb)
+	    || omap3isp_stat_busy(&isp->isp_hist)
+	    || omap3isp_ccdc_busy(&isp->isp_ccdc);
+}
+
+#define ISP_STOP_TIMEOUT	msecs_to_jiffies(1000)
+
+static int isp_pipeline_wait(struct isp_device *isp,
+			     int(*busy)(struct isp_device *isp))
+{
+	unsigned long timeout = jiffies + ISP_STOP_TIMEOUT;
+
+	while (!time_after(jiffies, timeout)) {
+		if (!busy(isp))
+			return 0;
+	}
+
+	return 1;
+}
+
+/*
+ * isp_pipeline_disable - Disable streaming on a pipeline
+ * @pipe: ISP pipeline
+ *
+ * Walk the entities chain starting at the pipeline output video node and stop
+ * all modules in the chain. Wait synchronously for the modules to be stopped if
+ * necessary.
+ *
+ * Return 0 if all modules have been properly stopped, or -ETIMEDOUT if a module
+ * can't be stopped (in which case a software reset of the ISP is probably
+ * necessary).
+ */
+static int isp_pipeline_disable(struct isp_pipeline *pipe)
+{
+	struct isp_device *isp = pipe->output->isp;
+	struct media_entity *entity;
+	struct media_pad *pad;
+	struct v4l2_subdev *subdev;
+	int failure = 0;
+	int ret;
+
+	/*
+	 * We need to stop all the modules after CCDC first or they'll
+	 * never stop since they may not get a full frame from CCDC.
+	 */
+	entity = &pipe->output->video.entity;
+	while (1) {
+		pad = &entity->pads[0];
+		if (!(pad->flags & MEDIA_PAD_FL_SINK))
+			break;
+
+		pad = media_entity_remote_source(pad);
+		if (pad == NULL ||
+		    media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+			break;
+
+		entity = pad->entity;
+		subdev = media_entity_to_v4l2_subdev(entity);
+
+		if (subdev == &isp->isp_ccdc.subdev) {
+			v4l2_subdev_call(&isp->isp_aewb.subdev,
+					 video, s_stream, 0);
+			v4l2_subdev_call(&isp->isp_af.subdev,
+					 video, s_stream, 0);
+			v4l2_subdev_call(&isp->isp_hist.subdev,
+					 video, s_stream, 0);
+		}
+
+		v4l2_subdev_call(subdev, video, s_stream, 0);
+
+		if (subdev == &isp->isp_res.subdev)
+			ret = isp_pipeline_wait(isp, isp_pipeline_wait_resizer);
+		else if (subdev == &isp->isp_prev.subdev)
+			ret = isp_pipeline_wait(isp, isp_pipeline_wait_preview);
+		else if (subdev == &isp->isp_ccdc.subdev)
+			ret = isp_pipeline_wait(isp, isp_pipeline_wait_ccdc);
+		else
+			ret = 0;
+
+		if (ret) {
+			dev_info(isp->dev, "Unable to stop %s\n", subdev->name);
+			/* If the entity failed to stopped, assume it has
+			 * crashed. Mark it as such, the ISP will be reset when
+			 * applications will release it.
+			 */
+			isp->crashed |= 1U << subdev->entity.id;
+			failure = -ETIMEDOUT;
+		}
+	}
+
+	return failure;
+}
+
+/*
+ * omap3isp_pipeline_set_stream - Enable/disable streaming on a pipeline
+ * @pipe: ISP pipeline
+ * @state: Stream state (stopped, single shot or continuous)
+ *
+ * Set the pipeline to the given stream state. Pipelines can be started in
+ * single-shot or continuous mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise. The pipeline state is not updated when the operation
+ * fails, except when stopping the pipeline.
+ */
+int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
+				 enum isp_pipeline_stream_state state)
+{
+	int ret;
+
+	if (state == ISP_PIPELINE_STREAM_STOPPED)
+		ret = isp_pipeline_disable(pipe);
+	else
+		ret = isp_pipeline_enable(pipe, state);
+
+	if (ret == 0 || state == ISP_PIPELINE_STREAM_STOPPED)
+		pipe->stream_state = state;
+
+	return ret;
+}
+
+/*
+ * isp_pipeline_resume - Resume streaming on a pipeline
+ * @pipe: ISP pipeline
+ *
+ * Resume video output and input and re-enable pipeline.
+ */
+static void isp_pipeline_resume(struct isp_pipeline *pipe)
+{
+	int singleshot = pipe->stream_state == ISP_PIPELINE_STREAM_SINGLESHOT;
+
+	omap3isp_video_resume(pipe->output, !singleshot);
+	if (singleshot)
+		omap3isp_video_resume(pipe->input, 0);
+	isp_pipeline_enable(pipe, pipe->stream_state);
+}
+
+/*
+ * isp_pipeline_suspend - Suspend streaming on a pipeline
+ * @pipe: ISP pipeline
+ *
+ * Suspend pipeline.
+ */
+static void isp_pipeline_suspend(struct isp_pipeline *pipe)
+{
+	isp_pipeline_disable(pipe);
+}
+
+/*
+ * isp_pipeline_is_last - Verify if entity has an enabled link to the output
+ * 			  video node
+ * @me: ISP module's media entity
+ *
+ * Returns 1 if the entity has an enabled link to the output video node or 0
+ * otherwise. It's true only while pipeline can have no more than one output
+ * node.
+ */
+static int isp_pipeline_is_last(struct media_entity *me)
+{
+	struct isp_pipeline *pipe;
+	struct media_pad *pad;
+
+	if (!me->pipe)
+		return 0;
+	pipe = to_isp_pipeline(me);
+	if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED)
+		return 0;
+	pad = media_entity_remote_source(&pipe->output->pad);
+	return pad->entity == me;
+}
+
+/*
+ * isp_suspend_module_pipeline - Suspend pipeline to which belongs the module
+ * @me: ISP module's media entity
+ *
+ * Suspend the whole pipeline if module's entity has an enabled link to the
+ * output video node. It works only while pipeline can have no more than one
+ * output node.
+ */
+static void isp_suspend_module_pipeline(struct media_entity *me)
+{
+	if (isp_pipeline_is_last(me))
+		isp_pipeline_suspend(to_isp_pipeline(me));
+}
+
+/*
+ * isp_resume_module_pipeline - Resume pipeline to which belongs the module
+ * @me: ISP module's media entity
+ *
+ * Resume the whole pipeline if module's entity has an enabled link to the
+ * output video node. It works only while pipeline can have no more than one
+ * output node.
+ */
+static void isp_resume_module_pipeline(struct media_entity *me)
+{
+	if (isp_pipeline_is_last(me))
+		isp_pipeline_resume(to_isp_pipeline(me));
+}
+
+/*
+ * isp_suspend_modules - Suspend ISP submodules.
+ * @isp: OMAP3 ISP device
+ *
+ * Returns 0 if suspend left in idle state all the submodules properly,
+ * or returns 1 if a general Reset is required to suspend the submodules.
+ */
+static int isp_suspend_modules(struct isp_device *isp)
+{
+	unsigned long timeout;
+
+	omap3isp_stat_suspend(&isp->isp_aewb);
+	omap3isp_stat_suspend(&isp->isp_af);
+	omap3isp_stat_suspend(&isp->isp_hist);
+	isp_suspend_module_pipeline(&isp->isp_res.subdev.entity);
+	isp_suspend_module_pipeline(&isp->isp_prev.subdev.entity);
+	isp_suspend_module_pipeline(&isp->isp_ccdc.subdev.entity);
+	isp_suspend_module_pipeline(&isp->isp_csi2a.subdev.entity);
+	isp_suspend_module_pipeline(&isp->isp_ccp2.subdev.entity);
+
+	timeout = jiffies + ISP_STOP_TIMEOUT;
+	while (omap3isp_stat_busy(&isp->isp_af)
+	    || omap3isp_stat_busy(&isp->isp_aewb)
+	    || omap3isp_stat_busy(&isp->isp_hist)
+	    || omap3isp_preview_busy(&isp->isp_prev)
+	    || omap3isp_resizer_busy(&isp->isp_res)
+	    || omap3isp_ccdc_busy(&isp->isp_ccdc)) {
+		if (time_after(jiffies, timeout)) {
+			dev_info(isp->dev, "can't stop modules.\n");
+			return 1;
+		}
+		msleep(1);
+	}
+
+	return 0;
+}
+
+/*
+ * isp_resume_modules - Resume ISP submodules.
+ * @isp: OMAP3 ISP device
+ */
+static void isp_resume_modules(struct isp_device *isp)
+{
+	omap3isp_stat_resume(&isp->isp_aewb);
+	omap3isp_stat_resume(&isp->isp_af);
+	omap3isp_stat_resume(&isp->isp_hist);
+	isp_resume_module_pipeline(&isp->isp_res.subdev.entity);
+	isp_resume_module_pipeline(&isp->isp_prev.subdev.entity);
+	isp_resume_module_pipeline(&isp->isp_ccdc.subdev.entity);
+	isp_resume_module_pipeline(&isp->isp_csi2a.subdev.entity);
+	isp_resume_module_pipeline(&isp->isp_ccp2.subdev.entity);
+}
+
+/*
+ * isp_reset - Reset ISP with a timeout wait for idle.
+ * @isp: OMAP3 ISP device
+ */
+static int isp_reset(struct isp_device *isp)
+{
+	unsigned long timeout = 0;
+
+	isp_reg_writel(isp,
+		       isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG)
+		       | ISP_SYSCONFIG_SOFTRESET,
+		       OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
+	while (!(isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN,
+			       ISP_SYSSTATUS) & 0x1)) {
+		if (timeout++ > 10000) {
+			dev_alert(isp->dev, "cannot reset ISP\n");
+			return -ETIMEDOUT;
+		}
+		udelay(1);
+	}
+
+	isp->crashed = 0;
+	return 0;
+}
+
+/*
+ * isp_save_context - Saves the values of the ISP module registers.
+ * @isp: OMAP3 ISP device
+ * @reg_list: Structure containing pairs of register address and value to
+ *            modify on OMAP.
+ */
+static void
+isp_save_context(struct isp_device *isp, struct isp_reg *reg_list)
+{
+	struct isp_reg *next = reg_list;
+
+	for (; next->reg != ISP_TOK_TERM; next++)
+		next->val = isp_reg_readl(isp, next->mmio_range, next->reg);
+}
+
+/*
+ * isp_restore_context - Restores the values of the ISP module registers.
+ * @isp: OMAP3 ISP device
+ * @reg_list: Structure containing pairs of register address and value to
+ *            modify on OMAP.
+ */
+static void
+isp_restore_context(struct isp_device *isp, struct isp_reg *reg_list)
+{
+	struct isp_reg *next = reg_list;
+
+	for (; next->reg != ISP_TOK_TERM; next++)
+		isp_reg_writel(isp, next->val, next->mmio_range, next->reg);
+}
+
+/*
+ * isp_save_ctx - Saves ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context.
+ * @isp: OMAP3 ISP device
+ *
+ * Routine for saving the context of each module in the ISP.
+ * CCDC, HIST, H3A, PREV, RESZ and MMU.
+ */
+static void isp_save_ctx(struct isp_device *isp)
+{
+	isp_save_context(isp, isp_reg_list);
+	omap_iommu_save_ctx(isp->dev);
+}
+
+/*
+ * isp_restore_ctx - Restores ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context.
+ * @isp: OMAP3 ISP device
+ *
+ * Routine for restoring the context of each module in the ISP.
+ * CCDC, HIST, H3A, PREV, RESZ and MMU.
+ */
+static void isp_restore_ctx(struct isp_device *isp)
+{
+	isp_restore_context(isp, isp_reg_list);
+	omap_iommu_restore_ctx(isp->dev);
+	omap3isp_ccdc_restore_context(isp);
+	omap3isp_preview_restore_context(isp);
+}
+
+/* -----------------------------------------------------------------------------
+ * SBL resources management
+ */
+#define OMAP3_ISP_SBL_READ	(OMAP3_ISP_SBL_CSI1_READ | \
+				 OMAP3_ISP_SBL_CCDC_LSC_READ | \
+				 OMAP3_ISP_SBL_PREVIEW_READ | \
+				 OMAP3_ISP_SBL_RESIZER_READ)
+#define OMAP3_ISP_SBL_WRITE	(OMAP3_ISP_SBL_CSI1_WRITE | \
+				 OMAP3_ISP_SBL_CSI2A_WRITE | \
+				 OMAP3_ISP_SBL_CSI2C_WRITE | \
+				 OMAP3_ISP_SBL_CCDC_WRITE | \
+				 OMAP3_ISP_SBL_PREVIEW_WRITE)
+
+void omap3isp_sbl_enable(struct isp_device *isp, enum isp_sbl_resource res)
+{
+	u32 sbl = 0;
+
+	isp->sbl_resources |= res;
+
+	if (isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ)
+		sbl |= ISPCTRL_SBL_SHARED_RPORTA;
+
+	if (isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ)
+		sbl |= ISPCTRL_SBL_SHARED_RPORTB;
+
+	if (isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE)
+		sbl |= ISPCTRL_SBL_SHARED_WPORTC;
+
+	if (isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE)
+		sbl |= ISPCTRL_SBL_WR0_RAM_EN;
+
+	if (isp->sbl_resources & OMAP3_ISP_SBL_WRITE)
+		sbl |= ISPCTRL_SBL_WR1_RAM_EN;
+
+	if (isp->sbl_resources & OMAP3_ISP_SBL_READ)
+		sbl |= ISPCTRL_SBL_RD_RAM_EN;
+
+	isp_reg_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl);
+}
+
+void omap3isp_sbl_disable(struct isp_device *isp, enum isp_sbl_resource res)
+{
+	u32 sbl = 0;
+
+	isp->sbl_resources &= ~res;
+
+	if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ))
+		sbl |= ISPCTRL_SBL_SHARED_RPORTA;
+
+	if (!(isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ))
+		sbl |= ISPCTRL_SBL_SHARED_RPORTB;
+
+	if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE))
+		sbl |= ISPCTRL_SBL_SHARED_WPORTC;
+
+	if (!(isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE))
+		sbl |= ISPCTRL_SBL_WR0_RAM_EN;
+
+	if (!(isp->sbl_resources & OMAP3_ISP_SBL_WRITE))
+		sbl |= ISPCTRL_SBL_WR1_RAM_EN;
+
+	if (!(isp->sbl_resources & OMAP3_ISP_SBL_READ))
+		sbl |= ISPCTRL_SBL_RD_RAM_EN;
+
+	isp_reg_clr(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl);
+}
+
+/*
+ * isp_module_sync_idle - Helper to sync module with its idle state
+ * @me: ISP submodule's media entity
+ * @wait: ISP submodule's wait queue for streamoff/interrupt synchronization
+ * @stopping: flag which tells module wants to stop
+ *
+ * This function checks if ISP submodule needs to wait for next interrupt. If
+ * yes, makes the caller to sleep while waiting for such event.
+ */
+int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
+			      atomic_t *stopping)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(me);
+
+	if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED ||
+	    (pipe->stream_state == ISP_PIPELINE_STREAM_SINGLESHOT &&
+	     !isp_pipeline_ready(pipe)))
+		return 0;
+
+	/*
+	 * atomic_set() doesn't include memory barrier on ARM platform for SMP
+	 * scenario. We'll call it here to avoid race conditions.
+	 */
+	atomic_set(stopping, 1);
+	smp_mb();
+
+	/*
+	 * If module is the last one, it's writing to memory. In this case,
+	 * it's necessary to check if the module is already paused due to
+	 * DMA queue underrun or if it has to wait for next interrupt to be
+	 * idle.
+	 * If it isn't the last one, the function won't sleep but *stopping
+	 * will still be set to warn next submodule caller's interrupt the
+	 * module wants to be idle.
+	 */
+	if (isp_pipeline_is_last(me)) {
+		struct isp_video *video = pipe->output;
+		unsigned long flags;
+		spin_lock_irqsave(&video->queue->irqlock, flags);
+		if (video->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) {
+			spin_unlock_irqrestore(&video->queue->irqlock, flags);
+			atomic_set(stopping, 0);
+			smp_mb();
+			return 0;
+		}
+		spin_unlock_irqrestore(&video->queue->irqlock, flags);
+		if (!wait_event_timeout(*wait, !atomic_read(stopping),
+					msecs_to_jiffies(1000))) {
+			atomic_set(stopping, 0);
+			smp_mb();
+			return -ETIMEDOUT;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * omap3isp_module_sync_is_stopped - Helper to verify if module was stopping
+ * @wait: ISP submodule's wait queue for streamoff/interrupt synchronization
+ * @stopping: flag which tells module wants to stop
+ *
+ * This function checks if ISP submodule was stopping. In case of yes, it
+ * notices the caller by setting stopping to 0 and waking up the wait queue.
+ * Returns 1 if it was stopping or 0 otherwise.
+ */
+int omap3isp_module_sync_is_stopping(wait_queue_head_t *wait,
+				     atomic_t *stopping)
+{
+	if (atomic_cmpxchg(stopping, 1, 0)) {
+		wake_up(wait);
+		return 1;
+	}
+
+	return 0;
+}
+
+/* --------------------------------------------------------------------------
+ * Clock management
+ */
+
+#define ISPCTRL_CLKS_MASK	(ISPCTRL_H3A_CLK_EN | \
+				 ISPCTRL_HIST_CLK_EN | \
+				 ISPCTRL_RSZ_CLK_EN | \
+				 (ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN) | \
+				 (ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN))
+
+static void __isp_subclk_update(struct isp_device *isp)
+{
+	u32 clk = 0;
+
+	/* AEWB and AF share the same clock. */
+	if (isp->subclk_resources &
+	    (OMAP3_ISP_SUBCLK_AEWB | OMAP3_ISP_SUBCLK_AF))
+		clk |= ISPCTRL_H3A_CLK_EN;
+
+	if (isp->subclk_resources & OMAP3_ISP_SUBCLK_HIST)
+		clk |= ISPCTRL_HIST_CLK_EN;
+
+	if (isp->subclk_resources & OMAP3_ISP_SUBCLK_RESIZER)
+		clk |= ISPCTRL_RSZ_CLK_EN;
+
+	/* NOTE: For CCDC & Preview submodules, we need to affect internal
+	 *       RAM as well.
+	 */
+	if (isp->subclk_resources & OMAP3_ISP_SUBCLK_CCDC)
+		clk |= ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN;
+
+	if (isp->subclk_resources & OMAP3_ISP_SUBCLK_PREVIEW)
+		clk |= ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN;
+
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL,
+			ISPCTRL_CLKS_MASK, clk);
+}
+
+void omap3isp_subclk_enable(struct isp_device *isp,
+			    enum isp_subclk_resource res)
+{
+	isp->subclk_resources |= res;
+
+	__isp_subclk_update(isp);
+}
+
+void omap3isp_subclk_disable(struct isp_device *isp,
+			     enum isp_subclk_resource res)
+{
+	isp->subclk_resources &= ~res;
+
+	__isp_subclk_update(isp);
+}
+
+/*
+ * isp_enable_clocks - Enable ISP clocks
+ * @isp: OMAP3 ISP device
+ *
+ * Return 0 if successful, or clk_enable return value if any of tthem fails.
+ */
+static int isp_enable_clocks(struct isp_device *isp)
+{
+	int r;
+	unsigned long rate;
+	int divisor;
+
+	/*
+	 * cam_mclk clock chain:
+	 *   dpll4 -> dpll4_m5 -> dpll4_m5x2 -> cam_mclk
+	 *
+	 * In OMAP3630 dpll4_m5x2 != 2 x dpll4_m5 but both are
+	 * set to the same value. Hence the rate set for dpll4_m5
+	 * has to be twice of what is set on OMAP3430 to get
+	 * the required value for cam_mclk
+	 */
+	if (cpu_is_omap3630())
+		divisor = 1;
+	else
+		divisor = 2;
+
+	r = clk_enable(isp->clock[ISP_CLK_CAM_ICK]);
+	if (r) {
+		dev_err(isp->dev, "clk_enable cam_ick failed\n");
+		goto out_clk_enable_ick;
+	}
+	r = clk_set_rate(isp->clock[ISP_CLK_DPLL4_M5_CK],
+			 CM_CAM_MCLK_HZ/divisor);
+	if (r) {
+		dev_err(isp->dev, "clk_set_rate for dpll4_m5_ck failed\n");
+		goto out_clk_enable_mclk;
+	}
+	r = clk_enable(isp->clock[ISP_CLK_CAM_MCLK]);
+	if (r) {
+		dev_err(isp->dev, "clk_enable cam_mclk failed\n");
+		goto out_clk_enable_mclk;
+	}
+	rate = clk_get_rate(isp->clock[ISP_CLK_CAM_MCLK]);
+	if (rate != CM_CAM_MCLK_HZ)
+		dev_warn(isp->dev, "unexpected cam_mclk rate:\n"
+				   " expected : %d\n"
+				   " actual   : %ld\n", CM_CAM_MCLK_HZ, rate);
+	r = clk_enable(isp->clock[ISP_CLK_CSI2_FCK]);
+	if (r) {
+		dev_err(isp->dev, "clk_enable csi2_fck failed\n");
+		goto out_clk_enable_csi2_fclk;
+	}
+	return 0;
+
+out_clk_enable_csi2_fclk:
+	clk_disable(isp->clock[ISP_CLK_CAM_MCLK]);
+out_clk_enable_mclk:
+	clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
+out_clk_enable_ick:
+	return r;
+}
+
+/*
+ * isp_disable_clocks - Disable ISP clocks
+ * @isp: OMAP3 ISP device
+ */
+static void isp_disable_clocks(struct isp_device *isp)
+{
+	clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
+	clk_disable(isp->clock[ISP_CLK_CAM_MCLK]);
+	clk_disable(isp->clock[ISP_CLK_CSI2_FCK]);
+}
+
+static const char *isp_clocks[] = {
+	"cam_ick",
+	"cam_mclk",
+	"dpll4_m5_ck",
+	"csi2_96m_fck",
+	"l3_ick",
+};
+
+static void isp_put_clocks(struct isp_device *isp)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(isp_clocks); ++i) {
+		if (isp->clock[i]) {
+			clk_put(isp->clock[i]);
+			isp->clock[i] = NULL;
+		}
+	}
+}
+
+static int isp_get_clocks(struct isp_device *isp)
+{
+	struct clk *clk;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(isp_clocks); ++i) {
+		clk = clk_get(isp->dev, isp_clocks[i]);
+		if (IS_ERR(clk)) {
+			dev_err(isp->dev, "clk_get %s failed\n", isp_clocks[i]);
+			isp_put_clocks(isp);
+			return PTR_ERR(clk);
+		}
+
+		isp->clock[i] = clk;
+	}
+
+	return 0;
+}
+
+/*
+ * omap3isp_get - Acquire the ISP resource.
+ *
+ * Initializes the clocks for the first acquire.
+ *
+ * Increment the reference count on the ISP. If the first reference is taken,
+ * enable clocks and power-up all submodules.
+ *
+ * Return a pointer to the ISP device structure, or NULL if an error occurred.
+ */
+static struct isp_device *__omap3isp_get(struct isp_device *isp, bool irq)
+{
+	struct isp_device *__isp = isp;
+
+	if (isp == NULL)
+		return NULL;
+
+	mutex_lock(&isp->isp_mutex);
+	if (isp->ref_count > 0)
+		goto out;
+
+	if (isp_enable_clocks(isp) < 0) {
+		__isp = NULL;
+		goto out;
+	}
+
+	/* We don't want to restore context before saving it! */
+	if (isp->has_context)
+		isp_restore_ctx(isp);
+
+	if (irq)
+		isp_enable_interrupts(isp);
+
+out:
+	if (__isp != NULL)
+		isp->ref_count++;
+	mutex_unlock(&isp->isp_mutex);
+
+	return __isp;
+}
+
+struct isp_device *omap3isp_get(struct isp_device *isp)
+{
+	return __omap3isp_get(isp, true);
+}
+
+/*
+ * omap3isp_put - Release the ISP
+ *
+ * Decrement the reference count on the ISP. If the last reference is released,
+ * power-down all submodules, disable clocks and free temporary buffers.
+ */
+void omap3isp_put(struct isp_device *isp)
+{
+	if (isp == NULL)
+		return;
+
+	mutex_lock(&isp->isp_mutex);
+	BUG_ON(isp->ref_count == 0);
+	if (--isp->ref_count == 0) {
+		isp_disable_interrupts(isp);
+		if (isp->domain) {
+			isp_save_ctx(isp);
+			isp->has_context = 1;
+		}
+		/* Reset the ISP if an entity has failed to stop. This is the
+		 * only way to recover from such conditions.
+		 */
+		if (isp->crashed)
+			isp_reset(isp);
+		isp_disable_clocks(isp);
+	}
+	mutex_unlock(&isp->isp_mutex);
+}
+
+/* --------------------------------------------------------------------------
+ * Platform device driver
+ */
+
+/*
+ * omap3isp_print_status - Prints the values of the ISP Control Module registers
+ * @isp: OMAP3 ISP device
+ */
+#define ISP_PRINT_REGISTER(isp, name)\
+	dev_dbg(isp->dev, "###ISP " #name "=0x%08x\n", \
+		isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_##name))
+#define SBL_PRINT_REGISTER(isp, name)\
+	dev_dbg(isp->dev, "###SBL " #name "=0x%08x\n", \
+		isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_##name))
+
+void omap3isp_print_status(struct isp_device *isp)
+{
+	dev_dbg(isp->dev, "-------------ISP Register dump--------------\n");
+
+	ISP_PRINT_REGISTER(isp, SYSCONFIG);
+	ISP_PRINT_REGISTER(isp, SYSSTATUS);
+	ISP_PRINT_REGISTER(isp, IRQ0ENABLE);
+	ISP_PRINT_REGISTER(isp, IRQ0STATUS);
+	ISP_PRINT_REGISTER(isp, TCTRL_GRESET_LENGTH);
+	ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_REPLAY);
+	ISP_PRINT_REGISTER(isp, CTRL);
+	ISP_PRINT_REGISTER(isp, TCTRL_CTRL);
+	ISP_PRINT_REGISTER(isp, TCTRL_FRAME);
+	ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_DELAY);
+	ISP_PRINT_REGISTER(isp, TCTRL_STRB_DELAY);
+	ISP_PRINT_REGISTER(isp, TCTRL_SHUT_DELAY);
+	ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_LENGTH);
+	ISP_PRINT_REGISTER(isp, TCTRL_STRB_LENGTH);
+	ISP_PRINT_REGISTER(isp, TCTRL_SHUT_LENGTH);
+
+	SBL_PRINT_REGISTER(isp, PCR);
+	SBL_PRINT_REGISTER(isp, SDR_REQ_EXP);
+
+	dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+#ifdef CONFIG_PM
+
+/*
+ * Power management support.
+ *
+ * As the ISP can't properly handle an input video stream interruption on a non
+ * frame boundary, the ISP pipelines need to be stopped before sensors get
+ * suspended. However, as suspending the sensors can require a running clock,
+ * which can be provided by the ISP, the ISP can't be completely suspended
+ * before the sensor.
+ *
+ * To solve this problem power management support is split into prepare/complete
+ * and suspend/resume operations. The pipelines are stopped in prepare() and the
+ * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in
+ * resume(), and the the pipelines are restarted in complete().
+ *
+ * TODO: PM dependencies between the ISP and sensors are not modeled explicitly
+ * yet.
+ */
+static int isp_pm_prepare(struct device *dev)
+{
+	struct isp_device *isp = dev_get_drvdata(dev);
+	int reset;
+
+	WARN_ON(mutex_is_locked(&isp->isp_mutex));
+
+	if (isp->ref_count == 0)
+		return 0;
+
+	reset = isp_suspend_modules(isp);
+	isp_disable_interrupts(isp);
+	isp_save_ctx(isp);
+	if (reset)
+		isp_reset(isp);
+
+	return 0;
+}
+
+static int isp_pm_suspend(struct device *dev)
+{
+	struct isp_device *isp = dev_get_drvdata(dev);
+
+	WARN_ON(mutex_is_locked(&isp->isp_mutex));
+
+	if (isp->ref_count)
+		isp_disable_clocks(isp);
+
+	return 0;
+}
+
+static int isp_pm_resume(struct device *dev)
+{
+	struct isp_device *isp = dev_get_drvdata(dev);
+
+	if (isp->ref_count == 0)
+		return 0;
+
+	return isp_enable_clocks(isp);
+}
+
+static void isp_pm_complete(struct device *dev)
+{
+	struct isp_device *isp = dev_get_drvdata(dev);
+
+	if (isp->ref_count == 0)
+		return;
+
+	isp_restore_ctx(isp);
+	isp_enable_interrupts(isp);
+	isp_resume_modules(isp);
+}
+
+#else
+
+#define isp_pm_prepare	NULL
+#define isp_pm_suspend	NULL
+#define isp_pm_resume	NULL
+#define isp_pm_complete	NULL
+
+#endif /* CONFIG_PM */
+
+static void isp_unregister_entities(struct isp_device *isp)
+{
+	omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
+	omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
+	omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
+	omap3isp_preview_unregister_entities(&isp->isp_prev);
+	omap3isp_resizer_unregister_entities(&isp->isp_res);
+	omap3isp_stat_unregister_entities(&isp->isp_aewb);
+	omap3isp_stat_unregister_entities(&isp->isp_af);
+	omap3isp_stat_unregister_entities(&isp->isp_hist);
+
+	v4l2_device_unregister(&isp->v4l2_dev);
+	media_device_unregister(&isp->media_dev);
+}
+
+/*
+ * isp_register_subdev_group - Register a group of subdevices
+ * @isp: OMAP3 ISP device
+ * @board_info: I2C subdevs board information array
+ *
+ * Register all I2C subdevices in the board_info array. The array must be
+ * terminated by a NULL entry, and the first entry must be the sensor.
+ *
+ * Return a pointer to the sensor media entity if it has been successfully
+ * registered, or NULL otherwise.
+ */
+static struct v4l2_subdev *
+isp_register_subdev_group(struct isp_device *isp,
+		     struct isp_subdev_i2c_board_info *board_info)
+{
+	struct v4l2_subdev *sensor = NULL;
+	unsigned int first;
+
+	if (board_info->board_info == NULL)
+		return NULL;
+
+	for (first = 1; board_info->board_info; ++board_info, first = 0) {
+		struct v4l2_subdev *subdev;
+		struct i2c_adapter *adapter;
+
+		adapter = i2c_get_adapter(board_info->i2c_adapter_id);
+		if (adapter == NULL) {
+			printk(KERN_ERR "%s: Unable to get I2C adapter %d for "
+				"device %s\n", __func__,
+				board_info->i2c_adapter_id,
+				board_info->board_info->type);
+			continue;
+		}
+
+		subdev = v4l2_i2c_new_subdev_board(&isp->v4l2_dev, adapter,
+				board_info->board_info, NULL);
+		if (subdev == NULL) {
+			printk(KERN_ERR "%s: Unable to register subdev %s\n",
+				__func__, board_info->board_info->type);
+			continue;
+		}
+
+		if (first)
+			sensor = subdev;
+	}
+
+	return sensor;
+}
+
+static int isp_register_entities(struct isp_device *isp)
+{
+	struct isp_platform_data *pdata = isp->pdata;
+	struct isp_v4l2_subdevs_group *subdevs;
+	int ret;
+
+	isp->media_dev.dev = isp->dev;
+	strlcpy(isp->media_dev.model, "TI OMAP3 ISP",
+		sizeof(isp->media_dev.model));
+	isp->media_dev.hw_revision = isp->revision;
+	isp->media_dev.link_notify = isp_pipeline_link_notify;
+	ret = media_device_register(&isp->media_dev);
+	if (ret < 0) {
+		printk(KERN_ERR "%s: Media device registration failed (%d)\n",
+			__func__, ret);
+		return ret;
+	}
+
+	isp->v4l2_dev.mdev = &isp->media_dev;
+	ret = v4l2_device_register(isp->dev, &isp->v4l2_dev);
+	if (ret < 0) {
+		printk(KERN_ERR "%s: V4L2 device registration failed (%d)\n",
+			__func__, ret);
+		goto done;
+	}
+
+	/* Register internal entities */
+	ret = omap3isp_ccp2_register_entities(&isp->isp_ccp2, &isp->v4l2_dev);
+	if (ret < 0)
+		goto done;
+
+	ret = omap3isp_csi2_register_entities(&isp->isp_csi2a, &isp->v4l2_dev);
+	if (ret < 0)
+		goto done;
+
+	ret = omap3isp_ccdc_register_entities(&isp->isp_ccdc, &isp->v4l2_dev);
+	if (ret < 0)
+		goto done;
+
+	ret = omap3isp_preview_register_entities(&isp->isp_prev,
+						 &isp->v4l2_dev);
+	if (ret < 0)
+		goto done;
+
+	ret = omap3isp_resizer_register_entities(&isp->isp_res, &isp->v4l2_dev);
+	if (ret < 0)
+		goto done;
+
+	ret = omap3isp_stat_register_entities(&isp->isp_aewb, &isp->v4l2_dev);
+	if (ret < 0)
+		goto done;
+
+	ret = omap3isp_stat_register_entities(&isp->isp_af, &isp->v4l2_dev);
+	if (ret < 0)
+		goto done;
+
+	ret = omap3isp_stat_register_entities(&isp->isp_hist, &isp->v4l2_dev);
+	if (ret < 0)
+		goto done;
+
+	/* Register external entities */
+	for (subdevs = pdata->subdevs; subdevs && subdevs->subdevs; ++subdevs) {
+		struct v4l2_subdev *sensor;
+		struct media_entity *input;
+		unsigned int flags;
+		unsigned int pad;
+
+		sensor = isp_register_subdev_group(isp, subdevs->subdevs);
+		if (sensor == NULL)
+			continue;
+
+		sensor->host_priv = subdevs;
+
+		/* Connect the sensor to the correct interface module. Parallel
+		 * sensors are connected directly to the CCDC, while serial
+		 * sensors are connected to the CSI2a, CCP2b or CSI2c receiver
+		 * through CSIPHY1 or CSIPHY2.
+		 */
+		switch (subdevs->interface) {
+		case ISP_INTERFACE_PARALLEL:
+			input = &isp->isp_ccdc.subdev.entity;
+			pad = CCDC_PAD_SINK;
+			flags = 0;
+			break;
+
+		case ISP_INTERFACE_CSI2A_PHY2:
+			input = &isp->isp_csi2a.subdev.entity;
+			pad = CSI2_PAD_SINK;
+			flags = MEDIA_LNK_FL_IMMUTABLE
+			      | MEDIA_LNK_FL_ENABLED;
+			break;
+
+		case ISP_INTERFACE_CCP2B_PHY1:
+		case ISP_INTERFACE_CCP2B_PHY2:
+			input = &isp->isp_ccp2.subdev.entity;
+			pad = CCP2_PAD_SINK;
+			flags = 0;
+			break;
+
+		case ISP_INTERFACE_CSI2C_PHY1:
+			input = &isp->isp_csi2c.subdev.entity;
+			pad = CSI2_PAD_SINK;
+			flags = MEDIA_LNK_FL_IMMUTABLE
+			      | MEDIA_LNK_FL_ENABLED;
+			break;
+
+		default:
+			printk(KERN_ERR "%s: invalid interface type %u\n",
+			       __func__, subdevs->interface);
+			ret = -EINVAL;
+			goto done;
+		}
+
+		ret = media_entity_create_link(&sensor->entity, 0, input, pad,
+					       flags);
+		if (ret < 0)
+			goto done;
+	}
+
+	ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
+
+done:
+	if (ret < 0)
+		isp_unregister_entities(isp);
+
+	return ret;
+}
+
+static void isp_cleanup_modules(struct isp_device *isp)
+{
+	omap3isp_h3a_aewb_cleanup(isp);
+	omap3isp_h3a_af_cleanup(isp);
+	omap3isp_hist_cleanup(isp);
+	omap3isp_resizer_cleanup(isp);
+	omap3isp_preview_cleanup(isp);
+	omap3isp_ccdc_cleanup(isp);
+	omap3isp_ccp2_cleanup(isp);
+	omap3isp_csi2_cleanup(isp);
+}
+
+static int isp_initialize_modules(struct isp_device *isp)
+{
+	int ret;
+
+	ret = omap3isp_csiphy_init(isp);
+	if (ret < 0) {
+		dev_err(isp->dev, "CSI PHY initialization failed\n");
+		goto error_csiphy;
+	}
+
+	ret = omap3isp_csi2_init(isp);
+	if (ret < 0) {
+		dev_err(isp->dev, "CSI2 initialization failed\n");
+		goto error_csi2;
+	}
+
+	ret = omap3isp_ccp2_init(isp);
+	if (ret < 0) {
+		dev_err(isp->dev, "CCP2 initialization failed\n");
+		goto error_ccp2;
+	}
+
+	ret = omap3isp_ccdc_init(isp);
+	if (ret < 0) {
+		dev_err(isp->dev, "CCDC initialization failed\n");
+		goto error_ccdc;
+	}
+
+	ret = omap3isp_preview_init(isp);
+	if (ret < 0) {
+		dev_err(isp->dev, "Preview initialization failed\n");
+		goto error_preview;
+	}
+
+	ret = omap3isp_resizer_init(isp);
+	if (ret < 0) {
+		dev_err(isp->dev, "Resizer initialization failed\n");
+		goto error_resizer;
+	}
+
+	ret = omap3isp_hist_init(isp);
+	if (ret < 0) {
+		dev_err(isp->dev, "Histogram initialization failed\n");
+		goto error_hist;
+	}
+
+	ret = omap3isp_h3a_aewb_init(isp);
+	if (ret < 0) {
+		dev_err(isp->dev, "H3A AEWB initialization failed\n");
+		goto error_h3a_aewb;
+	}
+
+	ret = omap3isp_h3a_af_init(isp);
+	if (ret < 0) {
+		dev_err(isp->dev, "H3A AF initialization failed\n");
+		goto error_h3a_af;
+	}
+
+	/* Connect the submodules. */
+	ret = media_entity_create_link(
+			&isp->isp_csi2a.subdev.entity, CSI2_PAD_SOURCE,
+			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0);
+	if (ret < 0)
+		goto error_link;
+
+	ret = media_entity_create_link(
+			&isp->isp_ccp2.subdev.entity, CCP2_PAD_SOURCE,
+			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0);
+	if (ret < 0)
+		goto error_link;
+
+	ret = media_entity_create_link(
+			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
+			&isp->isp_prev.subdev.entity, PREV_PAD_SINK, 0);
+	if (ret < 0)
+		goto error_link;
+
+	ret = media_entity_create_link(
+			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_OF,
+			&isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
+	if (ret < 0)
+		goto error_link;
+
+	ret = media_entity_create_link(
+			&isp->isp_prev.subdev.entity, PREV_PAD_SOURCE,
+			&isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
+	if (ret < 0)
+		goto error_link;
+
+	ret = media_entity_create_link(
+			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
+			&isp->isp_aewb.subdev.entity, 0,
+			MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+	if (ret < 0)
+		goto error_link;
+
+	ret = media_entity_create_link(
+			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
+			&isp->isp_af.subdev.entity, 0,
+			MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+	if (ret < 0)
+		goto error_link;
+
+	ret = media_entity_create_link(
+			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
+			&isp->isp_hist.subdev.entity, 0,
+			MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+	if (ret < 0)
+		goto error_link;
+
+	return 0;
+
+error_link:
+	omap3isp_h3a_af_cleanup(isp);
+error_h3a_af:
+	omap3isp_h3a_aewb_cleanup(isp);
+error_h3a_aewb:
+	omap3isp_hist_cleanup(isp);
+error_hist:
+	omap3isp_resizer_cleanup(isp);
+error_resizer:
+	omap3isp_preview_cleanup(isp);
+error_preview:
+	omap3isp_ccdc_cleanup(isp);
+error_ccdc:
+	omap3isp_ccp2_cleanup(isp);
+error_ccp2:
+	omap3isp_csi2_cleanup(isp);
+error_csi2:
+error_csiphy:
+	return ret;
+}
+
+/*
+ * isp_remove - Remove ISP platform device
+ * @pdev: Pointer to ISP platform device
+ *
+ * Always returns 0.
+ */
+static int __devexit isp_remove(struct platform_device *pdev)
+{
+	struct isp_device *isp = platform_get_drvdata(pdev);
+	int i;
+
+	isp_unregister_entities(isp);
+	isp_cleanup_modules(isp);
+
+	__omap3isp_get(isp, false);
+	iommu_detach_device(isp->domain, &pdev->dev);
+	iommu_domain_free(isp->domain);
+	isp->domain = NULL;
+	omap3isp_put(isp);
+
+	free_irq(isp->irq_num, isp);
+	isp_put_clocks(isp);
+
+	for (i = 0; i < OMAP3_ISP_IOMEM_LAST; i++) {
+		if (isp->mmio_base[i]) {
+			iounmap(isp->mmio_base[i]);
+			isp->mmio_base[i] = NULL;
+		}
+
+		if (isp->mmio_base_phys[i]) {
+			release_mem_region(isp->mmio_base_phys[i],
+					   isp->mmio_size[i]);
+			isp->mmio_base_phys[i] = 0;
+		}
+	}
+
+	regulator_put(isp->isp_csiphy1.vdd);
+	regulator_put(isp->isp_csiphy2.vdd);
+	kfree(isp);
+
+	return 0;
+}
+
+static int isp_map_mem_resource(struct platform_device *pdev,
+				struct isp_device *isp,
+				enum isp_mem_resources res)
+{
+	struct resource *mem;
+
+	/* request the mem region for the camera registers */
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
+	if (!mem) {
+		dev_err(isp->dev, "no mem resource?\n");
+		return -ENODEV;
+	}
+
+	if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
+		dev_err(isp->dev,
+			"cannot reserve camera register I/O region\n");
+		return -ENODEV;
+	}
+	isp->mmio_base_phys[res] = mem->start;
+	isp->mmio_size[res] = resource_size(mem);
+
+	/* map the region */
+	isp->mmio_base[res] = ioremap_nocache(isp->mmio_base_phys[res],
+					      isp->mmio_size[res]);
+	if (!isp->mmio_base[res]) {
+		dev_err(isp->dev, "cannot map camera register I/O region\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/*
+ * isp_probe - Probe ISP platform device
+ * @pdev: Pointer to ISP platform device
+ *
+ * Returns 0 if successful,
+ *   -ENOMEM if no memory available,
+ *   -ENODEV if no platform device resources found
+ *     or no space for remapping registers,
+ *   -EINVAL if couldn't install ISR,
+ *   or clk_get return error value.
+ */
+static int __devinit isp_probe(struct platform_device *pdev)
+{
+	struct isp_platform_data *pdata = pdev->dev.platform_data;
+	struct isp_device *isp;
+	int ret;
+	int i, m;
+
+	if (pdata == NULL)
+		return -EINVAL;
+
+	isp = kzalloc(sizeof(*isp), GFP_KERNEL);
+	if (!isp) {
+		dev_err(&pdev->dev, "could not allocate memory\n");
+		return -ENOMEM;
+	}
+
+	isp->autoidle = autoidle;
+	isp->platform_cb.set_xclk = isp_set_xclk;
+
+	mutex_init(&isp->isp_mutex);
+	spin_lock_init(&isp->stat_lock);
+
+	isp->dev = &pdev->dev;
+	isp->pdata = pdata;
+	isp->ref_count = 0;
+
+	isp->raw_dmamask = DMA_BIT_MASK(32);
+	isp->dev->dma_mask = &isp->raw_dmamask;
+	isp->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+	platform_set_drvdata(pdev, isp);
+
+	/* Regulators */
+	isp->isp_csiphy1.vdd = regulator_get(&pdev->dev, "VDD_CSIPHY1");
+	isp->isp_csiphy2.vdd = regulator_get(&pdev->dev, "VDD_CSIPHY2");
+
+	/* Clocks */
+	ret = isp_map_mem_resource(pdev, isp, OMAP3_ISP_IOMEM_MAIN);
+	if (ret < 0)
+		goto error;
+
+	ret = isp_get_clocks(isp);
+	if (ret < 0)
+		goto error;
+
+	if (__omap3isp_get(isp, false) == NULL)
+		goto error;
+
+	ret = isp_reset(isp);
+	if (ret < 0)
+		goto error_isp;
+
+	/* Memory resources */
+	isp->revision = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
+	dev_info(isp->dev, "Revision %d.%d found\n",
+		 (isp->revision & 0xf0) >> 4, isp->revision & 0x0f);
+
+	for (m = 0; m < ARRAY_SIZE(isp_res_maps); m++)
+		if (isp->revision == isp_res_maps[m].isp_rev)
+			break;
+
+	if (m == ARRAY_SIZE(isp_res_maps)) {
+		dev_err(isp->dev, "No resource map found for ISP rev %d.%d\n",
+			(isp->revision & 0xf0) >> 4, isp->revision & 0xf);
+		ret = -ENODEV;
+		goto error_isp;
+	}
+
+	for (i = 1; i < OMAP3_ISP_IOMEM_LAST; i++) {
+		if (isp_res_maps[m].map & 1 << i) {
+			ret = isp_map_mem_resource(pdev, isp, i);
+			if (ret)
+				goto error_isp;
+		}
+	}
+
+	isp->domain = iommu_domain_alloc(pdev->dev.bus);
+	if (!isp->domain) {
+		dev_err(isp->dev, "can't alloc iommu domain\n");
+		ret = -ENOMEM;
+		goto error_isp;
+	}
+
+	ret = iommu_attach_device(isp->domain, &pdev->dev);
+	if (ret) {
+		dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret);
+		goto free_domain;
+	}
+
+	/* Interrupt */
+	isp->irq_num = platform_get_irq(pdev, 0);
+	if (isp->irq_num <= 0) {
+		dev_err(isp->dev, "No IRQ resource\n");
+		ret = -ENODEV;
+		goto detach_dev;
+	}
+
+	if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) {
+		dev_err(isp->dev, "Unable to request IRQ\n");
+		ret = -EINVAL;
+		goto detach_dev;
+	}
+
+	/* Entities */
+	ret = isp_initialize_modules(isp);
+	if (ret < 0)
+		goto error_irq;
+
+	ret = isp_register_entities(isp);
+	if (ret < 0)
+		goto error_modules;
+
+	isp_core_init(isp, 1);
+	omap3isp_put(isp);
+
+	return 0;
+
+error_modules:
+	isp_cleanup_modules(isp);
+error_irq:
+	free_irq(isp->irq_num, isp);
+detach_dev:
+	iommu_detach_device(isp->domain, &pdev->dev);
+free_domain:
+	iommu_domain_free(isp->domain);
+error_isp:
+	omap3isp_put(isp);
+error:
+	isp_put_clocks(isp);
+
+	for (i = 0; i < OMAP3_ISP_IOMEM_LAST; i++) {
+		if (isp->mmio_base[i]) {
+			iounmap(isp->mmio_base[i]);
+			isp->mmio_base[i] = NULL;
+		}
+
+		if (isp->mmio_base_phys[i]) {
+			release_mem_region(isp->mmio_base_phys[i],
+					   isp->mmio_size[i]);
+			isp->mmio_base_phys[i] = 0;
+		}
+	}
+	regulator_put(isp->isp_csiphy2.vdd);
+	regulator_put(isp->isp_csiphy1.vdd);
+	platform_set_drvdata(pdev, NULL);
+
+	mutex_destroy(&isp->isp_mutex);
+	kfree(isp);
+
+	return ret;
+}
+
+static const struct dev_pm_ops omap3isp_pm_ops = {
+	.prepare = isp_pm_prepare,
+	.suspend = isp_pm_suspend,
+	.resume = isp_pm_resume,
+	.complete = isp_pm_complete,
+};
+
+static struct platform_device_id omap3isp_id_table[] = {
+	{ "omap3isp", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, omap3isp_id_table);
+
+static struct platform_driver omap3isp_driver = {
+	.probe = isp_probe,
+	.remove = __devexit_p(isp_remove),
+	.id_table = omap3isp_id_table,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "omap3isp",
+		.pm	= &omap3isp_pm_ops,
+	},
+};
+
+module_platform_driver(omap3isp_driver);
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("TI OMAP3 ISP driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ISP_VIDEO_DRIVER_VERSION);
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
new file mode 100644
index 0000000..8be7487
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -0,0 +1,352 @@
+/*
+ * isp.h
+ *
+ * TI OMAP3 ISP - Core
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_CORE_H
+#define OMAP3_ISP_CORE_H
+
+#include <media/omap3isp.h>
+#include <media/v4l2-device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/iommu.h>
+#include <plat/iommu.h>
+#include <plat/iovmm.h>
+
+#include "ispstat.h"
+#include "ispccdc.h"
+#include "ispreg.h"
+#include "ispresizer.h"
+#include "isppreview.h"
+#include "ispcsiphy.h"
+#include "ispcsi2.h"
+#include "ispccp2.h"
+
+#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
+
+#define ISP_TOK_TERM		0xFFFFFFFF	/*
+						 * terminating token for ISP
+						 * modules reg list
+						 */
+#define to_isp_device(ptr_module)				\
+	container_of(ptr_module, struct isp_device, isp_##ptr_module)
+#define to_device(ptr_module)						\
+	(to_isp_device(ptr_module)->dev)
+
+enum isp_mem_resources {
+	OMAP3_ISP_IOMEM_MAIN,
+	OMAP3_ISP_IOMEM_CCP2,
+	OMAP3_ISP_IOMEM_CCDC,
+	OMAP3_ISP_IOMEM_HIST,
+	OMAP3_ISP_IOMEM_H3A,
+	OMAP3_ISP_IOMEM_PREV,
+	OMAP3_ISP_IOMEM_RESZ,
+	OMAP3_ISP_IOMEM_SBL,
+	OMAP3_ISP_IOMEM_CSI2A_REGS1,
+	OMAP3_ISP_IOMEM_CSIPHY2,
+	OMAP3_ISP_IOMEM_CSI2A_REGS2,
+	OMAP3_ISP_IOMEM_CSI2C_REGS1,
+	OMAP3_ISP_IOMEM_CSIPHY1,
+	OMAP3_ISP_IOMEM_CSI2C_REGS2,
+	OMAP3_ISP_IOMEM_LAST
+};
+
+enum isp_sbl_resource {
+	OMAP3_ISP_SBL_CSI1_READ		= 0x1,
+	OMAP3_ISP_SBL_CSI1_WRITE	= 0x2,
+	OMAP3_ISP_SBL_CSI2A_WRITE	= 0x4,
+	OMAP3_ISP_SBL_CSI2C_WRITE	= 0x8,
+	OMAP3_ISP_SBL_CCDC_LSC_READ	= 0x10,
+	OMAP3_ISP_SBL_CCDC_WRITE	= 0x20,
+	OMAP3_ISP_SBL_PREVIEW_READ	= 0x40,
+	OMAP3_ISP_SBL_PREVIEW_WRITE	= 0x80,
+	OMAP3_ISP_SBL_RESIZER_READ	= 0x100,
+	OMAP3_ISP_SBL_RESIZER_WRITE	= 0x200,
+};
+
+enum isp_subclk_resource {
+	OMAP3_ISP_SUBCLK_CCDC		= (1 << 0),
+	OMAP3_ISP_SUBCLK_AEWB		= (1 << 1),
+	OMAP3_ISP_SUBCLK_AF		= (1 << 2),
+	OMAP3_ISP_SUBCLK_HIST		= (1 << 3),
+	OMAP3_ISP_SUBCLK_PREVIEW	= (1 << 4),
+	OMAP3_ISP_SUBCLK_RESIZER	= (1 << 5),
+};
+
+/* ISP: OMAP 34xx ES 1.0 */
+#define ISP_REVISION_1_0		0x10
+/* ISP2: OMAP 34xx ES 2.0, 2.1 and 3.0 */
+#define ISP_REVISION_2_0		0x20
+/* ISP2P: OMAP 36xx */
+#define ISP_REVISION_15_0		0xF0
+
+/*
+ * struct isp_res_mapping - Map ISP io resources to ISP revision.
+ * @isp_rev: ISP_REVISION_x_x
+ * @map: bitmap for enum isp_mem_resources
+ */
+struct isp_res_mapping {
+	u32 isp_rev;
+	u32 map;
+};
+
+/*
+ * struct isp_reg - Structure for ISP register values.
+ * @reg: 32-bit Register address.
+ * @val: 32-bit Register value.
+ */
+struct isp_reg {
+	enum isp_mem_resources mmio_range;
+	u32 reg;
+	u32 val;
+};
+
+struct isp_platform_callback {
+	u32 (*set_xclk)(struct isp_device *isp, u32 xclk, u8 xclksel);
+	int (*csiphy_config)(struct isp_csiphy *phy,
+			     struct isp_csiphy_dphy_cfg *dphy,
+			     struct isp_csiphy_lanes_cfg *lanes);
+};
+
+/*
+ * struct isp_device - ISP device structure.
+ * @dev: Device pointer specific to the OMAP3 ISP.
+ * @revision: Stores current ISP module revision.
+ * @irq_num: Currently used IRQ number.
+ * @mmio_base: Array with kernel base addresses for ioremapped ISP register
+ *             regions.
+ * @mmio_base_phys: Array with physical L4 bus addresses for ISP register
+ *                  regions.
+ * @mmio_size: Array with ISP register regions size in bytes.
+ * @raw_dmamask: Raw DMA mask
+ * @stat_lock: Spinlock for handling statistics
+ * @isp_mutex: Mutex for serializing requests to ISP.
+ * @crashed: Bitmask of crashed entities (indexed by entity ID)
+ * @has_context: Context has been saved at least once and can be restored.
+ * @ref_count: Reference count for handling multiple ISP requests.
+ * @cam_ick: Pointer to camera interface clock structure.
+ * @cam_mclk: Pointer to camera functional clock structure.
+ * @dpll4_m5_ck: Pointer to DPLL4 M5 clock structure.
+ * @csi2_fck: Pointer to camera CSI2 complexIO clock structure.
+ * @l3_ick: Pointer to OMAP3 L3 bus interface clock.
+ * @irq: Currently attached ISP ISR callbacks information structure.
+ * @isp_af: Pointer to current settings for ISP AutoFocus SCM.
+ * @isp_hist: Pointer to current settings for ISP Histogram SCM.
+ * @isp_h3a: Pointer to current settings for ISP Auto Exposure and
+ *           White Balance SCM.
+ * @isp_res: Pointer to current settings for ISP Resizer.
+ * @isp_prev: Pointer to current settings for ISP Preview.
+ * @isp_ccdc: Pointer to current settings for ISP CCDC.
+ * @iommu: Pointer to requested IOMMU instance for ISP.
+ * @platform_cb: ISP driver callback function pointers for platform code
+ *
+ * This structure is used to store the OMAP ISP Information.
+ */
+struct isp_device {
+	struct v4l2_device v4l2_dev;
+	struct media_device media_dev;
+	struct device *dev;
+	u32 revision;
+
+	/* platform HW resources */
+	struct isp_platform_data *pdata;
+	unsigned int irq_num;
+
+	void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST];
+	unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
+	resource_size_t mmio_size[OMAP3_ISP_IOMEM_LAST];
+
+	u64 raw_dmamask;
+
+	/* ISP Obj */
+	spinlock_t stat_lock;	/* common lock for statistic drivers */
+	struct mutex isp_mutex;	/* For handling ref_count field */
+	u32 crashed;
+	int has_context;
+	int ref_count;
+	unsigned int autoidle;
+	u32 xclk_divisor[2];	/* Two clocks, a and b. */
+#define ISP_CLK_CAM_ICK		0
+#define ISP_CLK_CAM_MCLK	1
+#define ISP_CLK_DPLL4_M5_CK	2
+#define ISP_CLK_CSI2_FCK	3
+#define ISP_CLK_L3_ICK		4
+	struct clk *clock[5];
+
+	/* ISP modules */
+	struct ispstat isp_af;
+	struct ispstat isp_aewb;
+	struct ispstat isp_hist;
+	struct isp_res_device isp_res;
+	struct isp_prev_device isp_prev;
+	struct isp_ccdc_device isp_ccdc;
+	struct isp_csi2_device isp_csi2a;
+	struct isp_csi2_device isp_csi2c;
+	struct isp_ccp2_device isp_ccp2;
+	struct isp_csiphy isp_csiphy1;
+	struct isp_csiphy isp_csiphy2;
+
+	unsigned int sbl_resources;
+	unsigned int subclk_resources;
+
+	struct iommu_domain *domain;
+
+	struct isp_platform_callback platform_cb;
+};
+
+#define v4l2_dev_to_isp_device(dev) \
+	container_of(dev, struct isp_device, v4l2_dev)
+
+void omap3isp_hist_dma_done(struct isp_device *isp);
+
+void omap3isp_flush(struct isp_device *isp);
+
+int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
+			      atomic_t *stopping);
+
+int omap3isp_module_sync_is_stopping(wait_queue_head_t *wait,
+				     atomic_t *stopping);
+
+int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
+				 enum isp_pipeline_stream_state state);
+void omap3isp_configure_bridge(struct isp_device *isp,
+			       enum ccdc_input_entity input,
+			       const struct isp_parallel_platform_data *pdata,
+			       unsigned int shift, unsigned int bridge);
+
+struct isp_device *omap3isp_get(struct isp_device *isp);
+void omap3isp_put(struct isp_device *isp);
+
+void omap3isp_print_status(struct isp_device *isp);
+
+void omap3isp_sbl_enable(struct isp_device *isp, enum isp_sbl_resource res);
+void omap3isp_sbl_disable(struct isp_device *isp, enum isp_sbl_resource res);
+
+void omap3isp_subclk_enable(struct isp_device *isp,
+			    enum isp_subclk_resource res);
+void omap3isp_subclk_disable(struct isp_device *isp,
+			     enum isp_subclk_resource res);
+
+int omap3isp_pipeline_pm_use(struct media_entity *entity, int use);
+
+int omap3isp_register_entities(struct platform_device *pdev,
+			       struct v4l2_device *v4l2_dev);
+void omap3isp_unregister_entities(struct platform_device *pdev);
+
+/*
+ * isp_reg_readl - Read value of an OMAP3 ISP register
+ * @dev: Device pointer specific to the OMAP3 ISP.
+ * @isp_mmio_range: Range to which the register offset refers to.
+ * @reg_offset: Register offset to read from.
+ *
+ * Returns an unsigned 32 bit value with the required register contents.
+ */
+static inline
+u32 isp_reg_readl(struct isp_device *isp, enum isp_mem_resources isp_mmio_range,
+		  u32 reg_offset)
+{
+	return __raw_readl(isp->mmio_base[isp_mmio_range] + reg_offset);
+}
+
+/*
+ * isp_reg_writel - Write value to an OMAP3 ISP register
+ * @dev: Device pointer specific to the OMAP3 ISP.
+ * @reg_value: 32 bit value to write to the register.
+ * @isp_mmio_range: Range to which the register offset refers to.
+ * @reg_offset: Register offset to write into.
+ */
+static inline
+void isp_reg_writel(struct isp_device *isp, u32 reg_value,
+		    enum isp_mem_resources isp_mmio_range, u32 reg_offset)
+{
+	__raw_writel(reg_value, isp->mmio_base[isp_mmio_range] + reg_offset);
+}
+
+/*
+ * isp_reg_and - Clear individual bits in an OMAP3 ISP register
+ * @dev: Device pointer specific to the OMAP3 ISP.
+ * @mmio_range: Range to which the register offset refers to.
+ * @reg: Register offset to work on.
+ * @clr_bits: 32 bit value which would be cleared in the register.
+ */
+static inline
+void isp_reg_clr(struct isp_device *isp, enum isp_mem_resources mmio_range,
+		 u32 reg, u32 clr_bits)
+{
+	u32 v = isp_reg_readl(isp, mmio_range, reg);
+
+	isp_reg_writel(isp, v & ~clr_bits, mmio_range, reg);
+}
+
+/*
+ * isp_reg_set - Set individual bits in an OMAP3 ISP register
+ * @dev: Device pointer specific to the OMAP3 ISP.
+ * @mmio_range: Range to which the register offset refers to.
+ * @reg: Register offset to work on.
+ * @set_bits: 32 bit value which would be set in the register.
+ */
+static inline
+void isp_reg_set(struct isp_device *isp, enum isp_mem_resources mmio_range,
+		 u32 reg, u32 set_bits)
+{
+	u32 v = isp_reg_readl(isp, mmio_range, reg);
+
+	isp_reg_writel(isp, v | set_bits, mmio_range, reg);
+}
+
+/*
+ * isp_reg_clr_set - Clear and set invidial bits in an OMAP3 ISP register
+ * @dev: Device pointer specific to the OMAP3 ISP.
+ * @mmio_range: Range to which the register offset refers to.
+ * @reg: Register offset to work on.
+ * @clr_bits: 32 bit value which would be cleared in the register.
+ * @set_bits: 32 bit value which would be set in the register.
+ *
+ * The clear operation is done first, and then the set operation.
+ */
+static inline
+void isp_reg_clr_set(struct isp_device *isp, enum isp_mem_resources mmio_range,
+		     u32 reg, u32 clr_bits, u32 set_bits)
+{
+	u32 v = isp_reg_readl(isp, mmio_range, reg);
+
+	isp_reg_writel(isp, (v & ~clr_bits) | set_bits, mmio_range, reg);
+}
+
+static inline enum v4l2_buf_type
+isp_pad_buffer_type(const struct v4l2_subdev *subdev, int pad)
+{
+	if (pad >= subdev->entity.num_pads)
+		return 0;
+
+	if (subdev->entity.pads[pad].flags & MEDIA_PAD_FL_SINK)
+		return V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	else
+		return V4L2_BUF_TYPE_VIDEO_CAPTURE;
+}
+
+#endif	/* OMAP3_ISP_CORE_H */
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
new file mode 100644
index 0000000..aa9df9d
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -0,0 +1,2583 @@
+/*
+ * ispccdc.c
+ *
+ * TI OMAP3 ISP - CCDC module
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <media/v4l2-event.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispccdc.h"
+
+#define CCDC_MIN_WIDTH		32
+#define CCDC_MIN_HEIGHT		32
+
+static struct v4l2_mbus_framefmt *
+__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
+		  unsigned int pad, enum v4l2_subdev_format_whence which);
+
+static const unsigned int ccdc_fmts[] = {
+	V4L2_MBUS_FMT_Y8_1X8,
+	V4L2_MBUS_FMT_Y10_1X10,
+	V4L2_MBUS_FMT_Y12_1X12,
+	V4L2_MBUS_FMT_SGRBG8_1X8,
+	V4L2_MBUS_FMT_SRGGB8_1X8,
+	V4L2_MBUS_FMT_SBGGR8_1X8,
+	V4L2_MBUS_FMT_SGBRG8_1X8,
+	V4L2_MBUS_FMT_SGRBG10_1X10,
+	V4L2_MBUS_FMT_SRGGB10_1X10,
+	V4L2_MBUS_FMT_SBGGR10_1X10,
+	V4L2_MBUS_FMT_SGBRG10_1X10,
+	V4L2_MBUS_FMT_SGRBG12_1X12,
+	V4L2_MBUS_FMT_SRGGB12_1X12,
+	V4L2_MBUS_FMT_SBGGR12_1X12,
+	V4L2_MBUS_FMT_SGBRG12_1X12,
+	V4L2_MBUS_FMT_YUYV8_2X8,
+	V4L2_MBUS_FMT_UYVY8_2X8,
+};
+
+/*
+ * ccdc_print_status - Print current CCDC Module register values.
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * Also prints other debug information stored in the CCDC module.
+ */
+#define CCDC_PRINT_REGISTER(isp, name)\
+	dev_dbg(isp->dev, "###CCDC " #name "=0x%08x\n", \
+		isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_##name))
+
+static void ccdc_print_status(struct isp_ccdc_device *ccdc)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+
+	dev_dbg(isp->dev, "-------------CCDC Register dump-------------\n");
+
+	CCDC_PRINT_REGISTER(isp, PCR);
+	CCDC_PRINT_REGISTER(isp, SYN_MODE);
+	CCDC_PRINT_REGISTER(isp, HD_VD_WID);
+	CCDC_PRINT_REGISTER(isp, PIX_LINES);
+	CCDC_PRINT_REGISTER(isp, HORZ_INFO);
+	CCDC_PRINT_REGISTER(isp, VERT_START);
+	CCDC_PRINT_REGISTER(isp, VERT_LINES);
+	CCDC_PRINT_REGISTER(isp, CULLING);
+	CCDC_PRINT_REGISTER(isp, HSIZE_OFF);
+	CCDC_PRINT_REGISTER(isp, SDOFST);
+	CCDC_PRINT_REGISTER(isp, SDR_ADDR);
+	CCDC_PRINT_REGISTER(isp, CLAMP);
+	CCDC_PRINT_REGISTER(isp, DCSUB);
+	CCDC_PRINT_REGISTER(isp, COLPTN);
+	CCDC_PRINT_REGISTER(isp, BLKCMP);
+	CCDC_PRINT_REGISTER(isp, FPC);
+	CCDC_PRINT_REGISTER(isp, FPC_ADDR);
+	CCDC_PRINT_REGISTER(isp, VDINT);
+	CCDC_PRINT_REGISTER(isp, ALAW);
+	CCDC_PRINT_REGISTER(isp, REC656IF);
+	CCDC_PRINT_REGISTER(isp, CFG);
+	CCDC_PRINT_REGISTER(isp, FMTCFG);
+	CCDC_PRINT_REGISTER(isp, FMT_HORZ);
+	CCDC_PRINT_REGISTER(isp, FMT_VERT);
+	CCDC_PRINT_REGISTER(isp, PRGEVEN0);
+	CCDC_PRINT_REGISTER(isp, PRGEVEN1);
+	CCDC_PRINT_REGISTER(isp, PRGODD0);
+	CCDC_PRINT_REGISTER(isp, PRGODD1);
+	CCDC_PRINT_REGISTER(isp, VP_OUT);
+	CCDC_PRINT_REGISTER(isp, LSC_CONFIG);
+	CCDC_PRINT_REGISTER(isp, LSC_INITIAL);
+	CCDC_PRINT_REGISTER(isp, LSC_TABLE_BASE);
+	CCDC_PRINT_REGISTER(isp, LSC_TABLE_OFFSET);
+
+	dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+/*
+ * omap3isp_ccdc_busy - Get busy state of the CCDC.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+int omap3isp_ccdc_busy(struct isp_ccdc_device *ccdc)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+
+	return isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR) &
+		ISPCCDC_PCR_BUSY;
+}
+
+/* -----------------------------------------------------------------------------
+ * Lens Shading Compensation
+ */
+
+/*
+ * ccdc_lsc_validate_config - Check that LSC configuration is valid.
+ * @ccdc: Pointer to ISP CCDC device.
+ * @lsc_cfg: the LSC configuration to check.
+ *
+ * Returns 0 if the LSC configuration is valid, or -EINVAL if invalid.
+ */
+static int ccdc_lsc_validate_config(struct isp_ccdc_device *ccdc,
+				    struct omap3isp_ccdc_lsc_config *lsc_cfg)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+	struct v4l2_mbus_framefmt *format;
+	unsigned int paxel_width, paxel_height;
+	unsigned int paxel_shift_x, paxel_shift_y;
+	unsigned int min_width, min_height, min_size;
+	unsigned int input_width, input_height;
+
+	paxel_shift_x = lsc_cfg->gain_mode_m;
+	paxel_shift_y = lsc_cfg->gain_mode_n;
+
+	if ((paxel_shift_x < 2) || (paxel_shift_x > 6) ||
+	    (paxel_shift_y < 2) || (paxel_shift_y > 6)) {
+		dev_dbg(isp->dev, "CCDC: LSC: Invalid paxel size\n");
+		return -EINVAL;
+	}
+
+	if (lsc_cfg->offset & 3) {
+		dev_dbg(isp->dev, "CCDC: LSC: Offset must be a multiple of "
+			"4\n");
+		return -EINVAL;
+	}
+
+	if ((lsc_cfg->initial_x & 1) || (lsc_cfg->initial_y & 1)) {
+		dev_dbg(isp->dev, "CCDC: LSC: initial_x and y must be even\n");
+		return -EINVAL;
+	}
+
+	format = __ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK,
+				   V4L2_SUBDEV_FORMAT_ACTIVE);
+	input_width = format->width;
+	input_height = format->height;
+
+	/* Calculate minimum bytesize for validation */
+	paxel_width = 1 << paxel_shift_x;
+	min_width = ((input_width + lsc_cfg->initial_x + paxel_width - 1)
+		     >> paxel_shift_x) + 1;
+
+	paxel_height = 1 << paxel_shift_y;
+	min_height = ((input_height + lsc_cfg->initial_y + paxel_height - 1)
+		     >> paxel_shift_y) + 1;
+
+	min_size = 4 * min_width * min_height;
+	if (min_size > lsc_cfg->size) {
+		dev_dbg(isp->dev, "CCDC: LSC: too small table\n");
+		return -EINVAL;
+	}
+	if (lsc_cfg->offset < (min_width * 4)) {
+		dev_dbg(isp->dev, "CCDC: LSC: Offset is too small\n");
+		return -EINVAL;
+	}
+	if ((lsc_cfg->size / lsc_cfg->offset) < min_height) {
+		dev_dbg(isp->dev, "CCDC: LSC: Wrong size/offset combination\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*
+ * ccdc_lsc_program_table - Program Lens Shading Compensation table address.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_lsc_program_table(struct isp_ccdc_device *ccdc, u32 addr)
+{
+	isp_reg_writel(to_isp_device(ccdc), addr,
+		       OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE);
+}
+
+/*
+ * ccdc_lsc_setup_regs - Configures the lens shading compensation module
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_lsc_setup_regs(struct isp_ccdc_device *ccdc,
+				struct omap3isp_ccdc_lsc_config *cfg)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+	int reg;
+
+	isp_reg_writel(isp, cfg->offset, OMAP3_ISP_IOMEM_CCDC,
+		       ISPCCDC_LSC_TABLE_OFFSET);
+
+	reg = 0;
+	reg |= cfg->gain_mode_n << ISPCCDC_LSC_GAIN_MODE_N_SHIFT;
+	reg |= cfg->gain_mode_m << ISPCCDC_LSC_GAIN_MODE_M_SHIFT;
+	reg |= cfg->gain_format << ISPCCDC_LSC_GAIN_FORMAT_SHIFT;
+	isp_reg_writel(isp, reg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG);
+
+	reg = 0;
+	reg &= ~ISPCCDC_LSC_INITIAL_X_MASK;
+	reg |= cfg->initial_x << ISPCCDC_LSC_INITIAL_X_SHIFT;
+	reg &= ~ISPCCDC_LSC_INITIAL_Y_MASK;
+	reg |= cfg->initial_y << ISPCCDC_LSC_INITIAL_Y_SHIFT;
+	isp_reg_writel(isp, reg, OMAP3_ISP_IOMEM_CCDC,
+		       ISPCCDC_LSC_INITIAL);
+}
+
+static int ccdc_lsc_wait_prefetch(struct isp_ccdc_device *ccdc)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+	unsigned int wait;
+
+	isp_reg_writel(isp, IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ,
+		       OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+
+	/* timeout 1 ms */
+	for (wait = 0; wait < 1000; wait++) {
+		if (isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS) &
+				  IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ) {
+			isp_reg_writel(isp, IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ,
+				       OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
+			return 0;
+		}
+
+		rmb();
+		udelay(1);
+	}
+
+	return -ETIMEDOUT;
+}
+
+/*
+ * __ccdc_lsc_enable - Enables/Disables the Lens Shading Compensation module.
+ * @ccdc: Pointer to ISP CCDC device.
+ * @enable: 0 Disables LSC, 1 Enables LSC.
+ */
+static int __ccdc_lsc_enable(struct isp_ccdc_device *ccdc, int enable)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+	const struct v4l2_mbus_framefmt *format =
+		__ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK,
+				  V4L2_SUBDEV_FORMAT_ACTIVE);
+
+	if ((format->code != V4L2_MBUS_FMT_SGRBG10_1X10) &&
+	    (format->code != V4L2_MBUS_FMT_SRGGB10_1X10) &&
+	    (format->code != V4L2_MBUS_FMT_SBGGR10_1X10) &&
+	    (format->code != V4L2_MBUS_FMT_SGBRG10_1X10))
+		return -EINVAL;
+
+	if (enable)
+		omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_LSC_READ);
+
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG,
+			ISPCCDC_LSC_ENABLE, enable ? ISPCCDC_LSC_ENABLE : 0);
+
+	if (enable) {
+		if (ccdc_lsc_wait_prefetch(ccdc) < 0) {
+			isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC,
+				    ISPCCDC_LSC_CONFIG, ISPCCDC_LSC_ENABLE);
+			ccdc->lsc.state = LSC_STATE_STOPPED;
+			dev_warn(to_device(ccdc), "LSC prefecth timeout\n");
+			return -ETIMEDOUT;
+		}
+		ccdc->lsc.state = LSC_STATE_RUNNING;
+	} else {
+		ccdc->lsc.state = LSC_STATE_STOPPING;
+	}
+
+	return 0;
+}
+
+static int ccdc_lsc_busy(struct isp_ccdc_device *ccdc)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+
+	return isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG) &
+			     ISPCCDC_LSC_BUSY;
+}
+
+/* __ccdc_lsc_configure - Apply a new configuration to the LSC engine
+ * @ccdc: Pointer to ISP CCDC device
+ * @req: New configuration request
+ *
+ * context: in_interrupt()
+ */
+static int __ccdc_lsc_configure(struct isp_ccdc_device *ccdc,
+				struct ispccdc_lsc_config_req *req)
+{
+	if (!req->enable)
+		return -EINVAL;
+
+	if (ccdc_lsc_validate_config(ccdc, &req->config) < 0) {
+		dev_dbg(to_device(ccdc), "Discard LSC configuration\n");
+		return -EINVAL;
+	}
+
+	if (ccdc_lsc_busy(ccdc))
+		return -EBUSY;
+
+	ccdc_lsc_setup_regs(ccdc, &req->config);
+	ccdc_lsc_program_table(ccdc, req->table);
+	return 0;
+}
+
+/*
+ * ccdc_lsc_error_handler - Handle LSC prefetch error scenario.
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * Disables LSC, and defers enablement to shadow registers update time.
+ */
+static void ccdc_lsc_error_handler(struct isp_ccdc_device *ccdc)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+	/*
+	 * From OMAP3 TRM: When this event is pending, the module
+	 * goes into transparent mode (output =input). Normal
+	 * operation can be resumed at the start of the next frame
+	 * after:
+	 *  1) Clearing this event
+	 *  2) Disabling the LSC module
+	 *  3) Enabling it
+	 */
+	isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG,
+		    ISPCCDC_LSC_ENABLE);
+	ccdc->lsc.state = LSC_STATE_STOPPED;
+}
+
+static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc,
+				  struct ispccdc_lsc_config_req *req)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+
+	if (req == NULL)
+		return;
+
+	if (req->iovm)
+		dma_unmap_sg(isp->dev, req->iovm->sgt->sgl,
+			     req->iovm->sgt->nents, DMA_TO_DEVICE);
+	if (req->table)
+		omap_iommu_vfree(isp->domain, isp->dev, req->table);
+	kfree(req);
+}
+
+static void ccdc_lsc_free_queue(struct isp_ccdc_device *ccdc,
+				struct list_head *queue)
+{
+	struct ispccdc_lsc_config_req *req, *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+	list_for_each_entry_safe(req, n, queue, list) {
+		list_del(&req->list);
+		spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+		ccdc_lsc_free_request(ccdc, req);
+		spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+	}
+	spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+}
+
+static void ccdc_lsc_free_table_work(struct work_struct *work)
+{
+	struct isp_ccdc_device *ccdc;
+	struct ispccdc_lsc *lsc;
+
+	lsc = container_of(work, struct ispccdc_lsc, table_work);
+	ccdc = container_of(lsc, struct isp_ccdc_device, lsc);
+
+	ccdc_lsc_free_queue(ccdc, &lsc->free_queue);
+}
+
+/*
+ * ccdc_lsc_config - Configure the LSC module from a userspace request
+ *
+ * Store the request LSC configuration in the LSC engine request pointer. The
+ * configuration will be applied to the hardware when the CCDC will be enabled,
+ * or at the next LSC interrupt if the CCDC is already running.
+ */
+static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
+			   struct omap3isp_ccdc_update_config *config)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+	struct ispccdc_lsc_config_req *req;
+	unsigned long flags;
+	void *table;
+	u16 update;
+	int ret;
+
+	update = config->update &
+		 (OMAP3ISP_CCDC_CONFIG_LSC | OMAP3ISP_CCDC_TBL_LSC);
+	if (!update)
+		return 0;
+
+	if (update != (OMAP3ISP_CCDC_CONFIG_LSC | OMAP3ISP_CCDC_TBL_LSC)) {
+		dev_dbg(to_device(ccdc), "%s: Both LSC configuration and table "
+			"need to be supplied\n", __func__);
+		return -EINVAL;
+	}
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (req == NULL)
+		return -ENOMEM;
+
+	if (config->flag & OMAP3ISP_CCDC_CONFIG_LSC) {
+		if (copy_from_user(&req->config, config->lsc_cfg,
+				   sizeof(req->config))) {
+			ret = -EFAULT;
+			goto done;
+		}
+
+		req->enable = 1;
+
+		req->table = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
+					req->config.size, IOMMU_FLAG);
+		if (IS_ERR_VALUE(req->table)) {
+			req->table = 0;
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		req->iovm = omap_find_iovm_area(isp->dev, req->table);
+		if (req->iovm == NULL) {
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		if (!dma_map_sg(isp->dev, req->iovm->sgt->sgl,
+				req->iovm->sgt->nents, DMA_TO_DEVICE)) {
+			ret = -ENOMEM;
+			req->iovm = NULL;
+			goto done;
+		}
+
+		dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl,
+				    req->iovm->sgt->nents, DMA_TO_DEVICE);
+
+		table = omap_da_to_va(isp->dev, req->table);
+		if (copy_from_user(table, config->lsc, req->config.size)) {
+			ret = -EFAULT;
+			goto done;
+		}
+
+		dma_sync_sg_for_device(isp->dev, req->iovm->sgt->sgl,
+				       req->iovm->sgt->nents, DMA_TO_DEVICE);
+	}
+
+	spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+	if (ccdc->lsc.request) {
+		list_add_tail(&ccdc->lsc.request->list, &ccdc->lsc.free_queue);
+		schedule_work(&ccdc->lsc.table_work);
+	}
+	ccdc->lsc.request = req;
+	spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+
+	ret = 0;
+
+done:
+	if (ret < 0)
+		ccdc_lsc_free_request(ccdc, req);
+
+	return ret;
+}
+
+static inline int ccdc_lsc_is_configured(struct isp_ccdc_device *ccdc)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+	if (ccdc->lsc.active) {
+		spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+		return 1;
+	}
+	spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+	return 0;
+}
+
+static int ccdc_lsc_enable(struct isp_ccdc_device *ccdc)
+{
+	struct ispccdc_lsc *lsc = &ccdc->lsc;
+
+	if (lsc->state != LSC_STATE_STOPPED)
+		return -EINVAL;
+
+	if (lsc->active) {
+		list_add_tail(&lsc->active->list, &lsc->free_queue);
+		lsc->active = NULL;
+	}
+
+	if (__ccdc_lsc_configure(ccdc, lsc->request) < 0) {
+		omap3isp_sbl_disable(to_isp_device(ccdc),
+				OMAP3_ISP_SBL_CCDC_LSC_READ);
+		list_add_tail(&lsc->request->list, &lsc->free_queue);
+		lsc->request = NULL;
+		goto done;
+	}
+
+	lsc->active = lsc->request;
+	lsc->request = NULL;
+	__ccdc_lsc_enable(ccdc, 1);
+
+done:
+	if (!list_empty(&lsc->free_queue))
+		schedule_work(&lsc->table_work);
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Parameters configuration
+ */
+
+/*
+ * ccdc_configure_clamp - Configure optical-black or digital clamping
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * The CCDC performs either optical-black or digital clamp. Configure and enable
+ * the selected clamp method.
+ */
+static void ccdc_configure_clamp(struct isp_ccdc_device *ccdc)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+	u32 clamp;
+
+	if (ccdc->obclamp) {
+		clamp  = ccdc->clamp.obgain << ISPCCDC_CLAMP_OBGAIN_SHIFT;
+		clamp |= ccdc->clamp.oblen << ISPCCDC_CLAMP_OBSLEN_SHIFT;
+		clamp |= ccdc->clamp.oblines << ISPCCDC_CLAMP_OBSLN_SHIFT;
+		clamp |= ccdc->clamp.obstpixel << ISPCCDC_CLAMP_OBST_SHIFT;
+		isp_reg_writel(isp, clamp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP);
+	} else {
+		isp_reg_writel(isp, ccdc->clamp.dcsubval,
+			       OMAP3_ISP_IOMEM_CCDC, ISPCCDC_DCSUB);
+	}
+
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP,
+			ISPCCDC_CLAMP_CLAMPEN,
+			ccdc->obclamp ? ISPCCDC_CLAMP_CLAMPEN : 0);
+}
+
+/*
+ * ccdc_configure_fpc - Configure Faulty Pixel Correction
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_configure_fpc(struct isp_ccdc_device *ccdc)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+
+	isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC, ISPCCDC_FPC_FPCEN);
+
+	if (!ccdc->fpc_en)
+		return;
+
+	isp_reg_writel(isp, ccdc->fpc.fpcaddr, OMAP3_ISP_IOMEM_CCDC,
+		       ISPCCDC_FPC_ADDR);
+	/* The FPNUM field must be set before enabling FPC. */
+	isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT),
+		       OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC);
+	isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT) |
+		       ISPCCDC_FPC_FPCEN, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC);
+}
+
+/*
+ * ccdc_configure_black_comp - Configure Black Level Compensation.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_configure_black_comp(struct isp_ccdc_device *ccdc)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+	u32 blcomp;
+
+	blcomp  = ccdc->blcomp.b_mg << ISPCCDC_BLKCMP_B_MG_SHIFT;
+	blcomp |= ccdc->blcomp.gb_g << ISPCCDC_BLKCMP_GB_G_SHIFT;
+	blcomp |= ccdc->blcomp.gr_cy << ISPCCDC_BLKCMP_GR_CY_SHIFT;
+	blcomp |= ccdc->blcomp.r_ye << ISPCCDC_BLKCMP_R_YE_SHIFT;
+
+	isp_reg_writel(isp, blcomp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_BLKCMP);
+}
+
+/*
+ * ccdc_configure_lpf - Configure Low-Pass Filter (LPF).
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_configure_lpf(struct isp_ccdc_device *ccdc)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE,
+			ISPCCDC_SYN_MODE_LPF,
+			ccdc->lpf ? ISPCCDC_SYN_MODE_LPF : 0);
+}
+
+/*
+ * ccdc_configure_alaw - Configure A-law compression.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_configure_alaw(struct isp_ccdc_device *ccdc)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+	const struct isp_format_info *info;
+	u32 alaw = 0;
+
+	info = omap3isp_video_format_info(ccdc->formats[CCDC_PAD_SINK].code);
+
+	switch (info->width) {
+	case 8:
+		return;
+
+	case 10:
+		alaw = ISPCCDC_ALAW_GWDI_9_0;
+		break;
+	case 11:
+		alaw = ISPCCDC_ALAW_GWDI_10_1;
+		break;
+	case 12:
+		alaw = ISPCCDC_ALAW_GWDI_11_2;
+		break;
+	case 13:
+		alaw = ISPCCDC_ALAW_GWDI_12_3;
+		break;
+	}
+
+	if (ccdc->alaw)
+		alaw |= ISPCCDC_ALAW_CCDTBL;
+
+	isp_reg_writel(isp, alaw, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW);
+}
+
+/*
+ * ccdc_config_imgattr - Configure sensor image specific attributes.
+ * @ccdc: Pointer to ISP CCDC device.
+ * @colptn: Color pattern of the sensor.
+ */
+static void ccdc_config_imgattr(struct isp_ccdc_device *ccdc, u32 colptn)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+
+	isp_reg_writel(isp, colptn, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_COLPTN);
+}
+
+/*
+ * ccdc_config - Set CCDC configuration from userspace
+ * @ccdc: Pointer to ISP CCDC device.
+ * @userspace_add: Structure containing CCDC configuration sent from userspace.
+ *
+ * Returns 0 if successful, -EINVAL if the pointer to the configuration
+ * structure is null, or the copy_from_user function fails to copy user space
+ * memory to kernel space memory.
+ */
+static int ccdc_config(struct isp_ccdc_device *ccdc,
+		       struct omap3isp_ccdc_update_config *ccdc_struct)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ccdc->lock, flags);
+	ccdc->shadow_update = 1;
+	spin_unlock_irqrestore(&ccdc->lock, flags);
+
+	if (OMAP3ISP_CCDC_ALAW & ccdc_struct->update) {
+		ccdc->alaw = !!(OMAP3ISP_CCDC_ALAW & ccdc_struct->flag);
+		ccdc->update |= OMAP3ISP_CCDC_ALAW;
+	}
+
+	if (OMAP3ISP_CCDC_LPF & ccdc_struct->update) {
+		ccdc->lpf = !!(OMAP3ISP_CCDC_LPF & ccdc_struct->flag);
+		ccdc->update |= OMAP3ISP_CCDC_LPF;
+	}
+
+	if (OMAP3ISP_CCDC_BLCLAMP & ccdc_struct->update) {
+		if (copy_from_user(&ccdc->clamp, ccdc_struct->bclamp,
+				   sizeof(ccdc->clamp))) {
+			ccdc->shadow_update = 0;
+			return -EFAULT;
+		}
+
+		ccdc->obclamp = !!(OMAP3ISP_CCDC_BLCLAMP & ccdc_struct->flag);
+		ccdc->update |= OMAP3ISP_CCDC_BLCLAMP;
+	}
+
+	if (OMAP3ISP_CCDC_BCOMP & ccdc_struct->update) {
+		if (copy_from_user(&ccdc->blcomp, ccdc_struct->blcomp,
+				   sizeof(ccdc->blcomp))) {
+			ccdc->shadow_update = 0;
+			return -EFAULT;
+		}
+
+		ccdc->update |= OMAP3ISP_CCDC_BCOMP;
+	}
+
+	ccdc->shadow_update = 0;
+
+	if (OMAP3ISP_CCDC_FPC & ccdc_struct->update) {
+		u32 table_old = 0;
+		u32 table_new;
+		u32 size;
+
+		if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED)
+			return -EBUSY;
+
+		ccdc->fpc_en = !!(OMAP3ISP_CCDC_FPC & ccdc_struct->flag);
+
+		if (ccdc->fpc_en) {
+			if (copy_from_user(&ccdc->fpc, ccdc_struct->fpc,
+					   sizeof(ccdc->fpc)))
+				return -EFAULT;
+
+			/*
+			 * table_new must be 64-bytes aligned, but it's
+			 * already done by omap_iommu_vmalloc().
+			 */
+			size = ccdc->fpc.fpnum * 4;
+			table_new = omap_iommu_vmalloc(isp->domain, isp->dev,
+							0, size, IOMMU_FLAG);
+			if (IS_ERR_VALUE(table_new))
+				return -ENOMEM;
+
+			if (copy_from_user(omap_da_to_va(isp->dev, table_new),
+					   (__force void __user *)
+					   ccdc->fpc.fpcaddr, size)) {
+				omap_iommu_vfree(isp->domain, isp->dev,
+								table_new);
+				return -EFAULT;
+			}
+
+			table_old = ccdc->fpc.fpcaddr;
+			ccdc->fpc.fpcaddr = table_new;
+		}
+
+		ccdc_configure_fpc(ccdc);
+		if (table_old != 0)
+			omap_iommu_vfree(isp->domain, isp->dev, table_old);
+	}
+
+	return ccdc_lsc_config(ccdc, ccdc_struct);
+}
+
+static void ccdc_apply_controls(struct isp_ccdc_device *ccdc)
+{
+	if (ccdc->update & OMAP3ISP_CCDC_ALAW) {
+		ccdc_configure_alaw(ccdc);
+		ccdc->update &= ~OMAP3ISP_CCDC_ALAW;
+	}
+
+	if (ccdc->update & OMAP3ISP_CCDC_LPF) {
+		ccdc_configure_lpf(ccdc);
+		ccdc->update &= ~OMAP3ISP_CCDC_LPF;
+	}
+
+	if (ccdc->update & OMAP3ISP_CCDC_BLCLAMP) {
+		ccdc_configure_clamp(ccdc);
+		ccdc->update &= ~OMAP3ISP_CCDC_BLCLAMP;
+	}
+
+	if (ccdc->update & OMAP3ISP_CCDC_BCOMP) {
+		ccdc_configure_black_comp(ccdc);
+		ccdc->update &= ~OMAP3ISP_CCDC_BCOMP;
+	}
+}
+
+/*
+ * omap3isp_ccdc_restore_context - Restore values of the CCDC module registers
+ * @dev: Pointer to ISP device
+ */
+void omap3isp_ccdc_restore_context(struct isp_device *isp)
+{
+	struct isp_ccdc_device *ccdc = &isp->isp_ccdc;
+
+	isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_VDLC);
+
+	ccdc->update = OMAP3ISP_CCDC_ALAW | OMAP3ISP_CCDC_LPF
+		     | OMAP3ISP_CCDC_BLCLAMP | OMAP3ISP_CCDC_BCOMP;
+	ccdc_apply_controls(ccdc);
+	ccdc_configure_fpc(ccdc);
+}
+
+/* -----------------------------------------------------------------------------
+ * Format- and pipeline-related configuration helpers
+ */
+
+/*
+ * ccdc_config_vp - Configure the Video Port.
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_config_vp(struct isp_ccdc_device *ccdc)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+	struct isp_device *isp = to_isp_device(ccdc);
+	const struct isp_format_info *info;
+	unsigned long l3_ick = pipe->l3_ick;
+	unsigned int max_div = isp->revision == ISP_REVISION_15_0 ? 64 : 8;
+	unsigned int div = 0;
+	u32 fmtcfg_vp;
+
+	fmtcfg_vp = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG)
+		  & ~(ISPCCDC_FMTCFG_VPIN_MASK | ISPCCDC_FMTCFG_VPIF_FRQ_MASK);
+
+	info = omap3isp_video_format_info(ccdc->formats[CCDC_PAD_SINK].code);
+
+	switch (info->width) {
+	case 8:
+	case 10:
+		fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_9_0;
+		break;
+	case 11:
+		fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_10_1;
+		break;
+	case 12:
+		fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_11_2;
+		break;
+	case 13:
+		fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_12_3;
+		break;
+	}
+
+	if (pipe->input)
+		div = DIV_ROUND_UP(l3_ick, pipe->max_rate);
+	else if (pipe->external_rate)
+		div = l3_ick / pipe->external_rate;
+
+	div = clamp(div, 2U, max_div);
+	fmtcfg_vp |= (div - 2) << ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT;
+
+	isp_reg_writel(isp, fmtcfg_vp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG);
+}
+
+/*
+ * ccdc_enable_vp - Enable Video Port.
+ * @ccdc: Pointer to ISP CCDC device.
+ * @enable: 0 Disables VP, 1 Enables VP
+ *
+ * This is needed for outputting image to Preview, H3A and HIST ISP submodules.
+ */
+static void ccdc_enable_vp(struct isp_ccdc_device *ccdc, u8 enable)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG,
+			ISPCCDC_FMTCFG_VPEN, enable ? ISPCCDC_FMTCFG_VPEN : 0);
+}
+
+/*
+ * ccdc_config_outlineoffset - Configure memory saving output line offset
+ * @ccdc: Pointer to ISP CCDC device.
+ * @offset: Address offset to start a new line. Must be twice the
+ *          Output width and aligned on 32 byte boundary
+ * @oddeven: Specifies the odd/even line pattern to be chosen to store the
+ *           output.
+ * @numlines: Set the value 0-3 for +1-4lines, 4-7 for -1-4lines.
+ *
+ * - Configures the output line offset when stored in memory
+ * - Sets the odd/even line pattern to store the output
+ *    (EVENEVEN (1), ODDEVEN (2), EVENODD (3), ODDODD (4))
+ * - Configures the number of even and odd line fields in case of rearranging
+ * the lines.
+ */
+static void ccdc_config_outlineoffset(struct isp_ccdc_device *ccdc,
+					u32 offset, u8 oddeven, u8 numlines)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+
+	isp_reg_writel(isp, offset & 0xffff,
+		       OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HSIZE_OFF);
+
+	isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
+		    ISPCCDC_SDOFST_FINV);
+
+	isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
+		    ISPCCDC_SDOFST_FOFST_4L);
+
+	switch (oddeven) {
+	case EVENEVEN:
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
+			    (numlines & 0x7) << ISPCCDC_SDOFST_LOFST0_SHIFT);
+		break;
+	case ODDEVEN:
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
+			    (numlines & 0x7) << ISPCCDC_SDOFST_LOFST1_SHIFT);
+		break;
+	case EVENODD:
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
+			    (numlines & 0x7) << ISPCCDC_SDOFST_LOFST2_SHIFT);
+		break;
+	case ODDODD:
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
+			    (numlines & 0x7) << ISPCCDC_SDOFST_LOFST3_SHIFT);
+		break;
+	default:
+		break;
+	}
+}
+
+/*
+ * ccdc_set_outaddr - Set memory address to save output image
+ * @ccdc: Pointer to ISP CCDC device.
+ * @addr: ISP MMU Mapped 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ */
+static void ccdc_set_outaddr(struct isp_ccdc_device *ccdc, u32 addr)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+
+	isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDR_ADDR);
+}
+
+/*
+ * omap3isp_ccdc_max_rate - Calculate maximum input data rate based on the input
+ * @ccdc: Pointer to ISP CCDC device.
+ * @max_rate: Maximum calculated data rate.
+ *
+ * Returns in *max_rate less value between calculated and passed
+ */
+void omap3isp_ccdc_max_rate(struct isp_ccdc_device *ccdc,
+			    unsigned int *max_rate)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+	unsigned int rate;
+
+	if (pipe == NULL)
+		return;
+
+	/*
+	 * TRM says that for parallel sensors the maximum data rate
+	 * should be 90% form L3/2 clock, otherwise just L3/2.
+	 */
+	if (ccdc->input == CCDC_INPUT_PARALLEL)
+		rate = pipe->l3_ick / 2 * 9 / 10;
+	else
+		rate = pipe->l3_ick / 2;
+
+	*max_rate = min(*max_rate, rate);
+}
+
+/*
+ * ccdc_config_sync_if - Set CCDC sync interface configuration
+ * @ccdc: Pointer to ISP CCDC device.
+ * @pdata: Parallel interface platform data (may be NULL)
+ * @data_size: Data size
+ */
+static void ccdc_config_sync_if(struct isp_ccdc_device *ccdc,
+				struct isp_parallel_platform_data *pdata,
+				unsigned int data_size)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+	const struct v4l2_mbus_framefmt *format;
+	u32 syn_mode = ISPCCDC_SYN_MODE_VDHDEN;
+
+	format = &ccdc->formats[CCDC_PAD_SINK];
+
+	if (format->code == V4L2_MBUS_FMT_YUYV8_2X8 ||
+	    format->code == V4L2_MBUS_FMT_UYVY8_2X8) {
+		/* The bridge is enabled for YUV8 formats. Configure the input
+		 * mode accordingly.
+		 */
+		syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR16;
+	}
+
+	switch (data_size) {
+	case 8:
+		syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_8;
+		break;
+	case 10:
+		syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_10;
+		break;
+	case 11:
+		syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_11;
+		break;
+	case 12:
+		syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_12;
+		break;
+	}
+
+	if (pdata && pdata->data_pol)
+		syn_mode |= ISPCCDC_SYN_MODE_DATAPOL;
+
+	if (pdata && pdata->hs_pol)
+		syn_mode |= ISPCCDC_SYN_MODE_HDPOL;
+
+	if (pdata && pdata->vs_pol)
+		syn_mode |= ISPCCDC_SYN_MODE_VDPOL;
+
+	isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
+
+	/* The CCDC_CFG.Y8POS bit is used in YCbCr8 input mode only. The
+	 * hardware seems to ignore it in all other input modes.
+	 */
+	if (format->code == V4L2_MBUS_FMT_UYVY8_2X8)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+			    ISPCCDC_CFG_Y8POS);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+			    ISPCCDC_CFG_Y8POS);
+
+	isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_REC656IF,
+		    ISPCCDC_REC656IF_R656ON);
+}
+
+/* CCDC formats descriptions */
+static const u32 ccdc_sgrbg_pattern =
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
+
+static const u32 ccdc_srggb_pattern =
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
+
+static const u32 ccdc_sbggr_pattern =
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
+
+static const u32 ccdc_sgbrg_pattern =
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP0PLC0_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP0PLC1_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP0PLC2_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP0PLC3_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP1PLC0_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC1_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP1PLC2_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC3_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP2PLC0_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP2PLC1_SHIFT |
+	ISPCCDC_COLPTN_Gb_G  << ISPCCDC_COLPTN_CP2PLC2_SHIFT |
+	ISPCCDC_COLPTN_B_Mg  << ISPCCDC_COLPTN_CP2PLC3_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP3PLC0_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC1_SHIFT |
+	ISPCCDC_COLPTN_R_Ye  << ISPCCDC_COLPTN_CP3PLC2_SHIFT |
+	ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC3_SHIFT;
+
+static void ccdc_configure(struct isp_ccdc_device *ccdc)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+	struct isp_parallel_platform_data *pdata = NULL;
+	struct v4l2_subdev *sensor;
+	struct v4l2_mbus_framefmt *format;
+	const struct v4l2_rect *crop;
+	const struct isp_format_info *fmt_info;
+	struct v4l2_subdev_format fmt_src;
+	unsigned int depth_out;
+	unsigned int depth_in = 0;
+	struct media_pad *pad;
+	unsigned long flags;
+	unsigned int bridge;
+	unsigned int shift;
+	u32 syn_mode;
+	u32 ccdc_pattern;
+
+	pad = media_entity_remote_source(&ccdc->pads[CCDC_PAD_SINK]);
+	sensor = media_entity_to_v4l2_subdev(pad->entity);
+	if (ccdc->input == CCDC_INPUT_PARALLEL)
+		pdata = &((struct isp_v4l2_subdevs_group *)sensor->host_priv)
+			->bus.parallel;
+
+	/* Compute the lane shifter shift value and enable the bridge when the
+	 * input format is YUV.
+	 */
+	fmt_src.pad = pad->index;
+	fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+	if (!v4l2_subdev_call(sensor, pad, get_fmt, NULL, &fmt_src)) {
+		fmt_info = omap3isp_video_format_info(fmt_src.format.code);
+		depth_in = fmt_info->width;
+	}
+
+	fmt_info = omap3isp_video_format_info
+		(isp->isp_ccdc.formats[CCDC_PAD_SINK].code);
+	depth_out = fmt_info->width;
+	shift = depth_in - depth_out;
+
+	if (fmt_info->code == V4L2_MBUS_FMT_YUYV8_2X8)
+		bridge = ISPCTRL_PAR_BRIDGE_LENDIAN;
+	else if (fmt_info->code == V4L2_MBUS_FMT_UYVY8_2X8)
+		bridge = ISPCTRL_PAR_BRIDGE_BENDIAN;
+	else
+		bridge = ISPCTRL_PAR_BRIDGE_DISABLE;
+
+	omap3isp_configure_bridge(isp, ccdc->input, pdata, shift, bridge);
+
+	ccdc_config_sync_if(ccdc, pdata, depth_out);
+
+	syn_mode = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
+
+	/* Use the raw, unprocessed data when writing to memory. The H3A and
+	 * histogram modules are still fed with lens shading corrected data.
+	 */
+	syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR;
+
+	if (ccdc->output & CCDC_OUTPUT_MEMORY)
+		syn_mode |= ISPCCDC_SYN_MODE_WEN;
+	else
+		syn_mode &= ~ISPCCDC_SYN_MODE_WEN;
+
+	if (ccdc->output & CCDC_OUTPUT_RESIZER)
+		syn_mode |= ISPCCDC_SYN_MODE_SDR2RSZ;
+	else
+		syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ;
+
+	/* CCDC_PAD_SINK */
+	format = &ccdc->formats[CCDC_PAD_SINK];
+
+	/* Mosaic filter */
+	switch (format->code) {
+	case V4L2_MBUS_FMT_SRGGB10_1X10:
+	case V4L2_MBUS_FMT_SRGGB12_1X12:
+		ccdc_pattern = ccdc_srggb_pattern;
+		break;
+	case V4L2_MBUS_FMT_SBGGR10_1X10:
+	case V4L2_MBUS_FMT_SBGGR12_1X12:
+		ccdc_pattern = ccdc_sbggr_pattern;
+		break;
+	case V4L2_MBUS_FMT_SGBRG10_1X10:
+	case V4L2_MBUS_FMT_SGBRG12_1X12:
+		ccdc_pattern = ccdc_sgbrg_pattern;
+		break;
+	default:
+		/* Use GRBG */
+		ccdc_pattern = ccdc_sgrbg_pattern;
+		break;
+	}
+	ccdc_config_imgattr(ccdc, ccdc_pattern);
+
+	/* Generate VD0 on the last line of the image and VD1 on the
+	 * 2/3 height line.
+	 */
+	isp_reg_writel(isp, ((format->height - 2) << ISPCCDC_VDINT_0_SHIFT) |
+		       ((format->height * 2 / 3) << ISPCCDC_VDINT_1_SHIFT),
+		       OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VDINT);
+
+	/* CCDC_PAD_SOURCE_OF */
+	format = &ccdc->formats[CCDC_PAD_SOURCE_OF];
+	crop = &ccdc->crop;
+
+	isp_reg_writel(isp, (crop->left << ISPCCDC_HORZ_INFO_SPH_SHIFT) |
+		       ((crop->width - 1) << ISPCCDC_HORZ_INFO_NPH_SHIFT),
+		       OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HORZ_INFO);
+	isp_reg_writel(isp, crop->top << ISPCCDC_VERT_START_SLV0_SHIFT,
+		       OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_START);
+	isp_reg_writel(isp, (crop->height - 1)
+			<< ISPCCDC_VERT_LINES_NLV_SHIFT,
+		       OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_LINES);
+
+	ccdc_config_outlineoffset(ccdc, ccdc->video_out.bpl_value, 0, 0);
+
+	/* The CCDC outputs data in UYVY order by default. Swap bytes to get
+	 * YUYV.
+	 */
+	if (format->code == V4L2_MBUS_FMT_YUYV8_1X16)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+			    ISPCCDC_CFG_BSWD);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+			    ISPCCDC_CFG_BSWD);
+
+	/* Use PACK8 mode for 1byte per pixel formats. */
+	if (omap3isp_video_format_info(format->code)->width <= 8)
+		syn_mode |= ISPCCDC_SYN_MODE_PACK8;
+	else
+		syn_mode &= ~ISPCCDC_SYN_MODE_PACK8;
+
+	isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
+
+	/* CCDC_PAD_SOURCE_VP */
+	format = &ccdc->formats[CCDC_PAD_SOURCE_VP];
+
+	isp_reg_writel(isp, (0 << ISPCCDC_FMT_HORZ_FMTSPH_SHIFT) |
+		       (format->width << ISPCCDC_FMT_HORZ_FMTLNH_SHIFT),
+		       OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_HORZ);
+	isp_reg_writel(isp, (0 << ISPCCDC_FMT_VERT_FMTSLV_SHIFT) |
+		       ((format->height + 1) << ISPCCDC_FMT_VERT_FMTLNV_SHIFT),
+		       OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_VERT);
+
+	isp_reg_writel(isp, (format->width << ISPCCDC_VP_OUT_HORZ_NUM_SHIFT) |
+		       (format->height << ISPCCDC_VP_OUT_VERT_NUM_SHIFT),
+		       OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VP_OUT);
+
+	/* Lens shading correction. */
+	spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+	if (ccdc->lsc.request == NULL)
+		goto unlock;
+
+	WARN_ON(ccdc->lsc.active);
+
+	/* Get last good LSC configuration. If it is not supported for
+	 * the current active resolution discard it.
+	 */
+	if (ccdc->lsc.active == NULL &&
+	    __ccdc_lsc_configure(ccdc, ccdc->lsc.request) == 0) {
+		ccdc->lsc.active = ccdc->lsc.request;
+	} else {
+		list_add_tail(&ccdc->lsc.request->list, &ccdc->lsc.free_queue);
+		schedule_work(&ccdc->lsc.table_work);
+	}
+
+	ccdc->lsc.request = NULL;
+
+unlock:
+	spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+
+	ccdc_apply_controls(ccdc);
+}
+
+static void __ccdc_enable(struct isp_ccdc_device *ccdc, int enable)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR,
+			ISPCCDC_PCR_EN, enable ? ISPCCDC_PCR_EN : 0);
+}
+
+static int ccdc_disable(struct isp_ccdc_device *ccdc)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&ccdc->lock, flags);
+	if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS)
+		ccdc->stopping = CCDC_STOP_REQUEST;
+	spin_unlock_irqrestore(&ccdc->lock, flags);
+
+	ret = wait_event_timeout(ccdc->wait,
+				 ccdc->stopping == CCDC_STOP_FINISHED,
+				 msecs_to_jiffies(2000));
+	if (ret == 0) {
+		ret = -ETIMEDOUT;
+		dev_warn(to_device(ccdc), "CCDC stop timeout!\n");
+	}
+
+	omap3isp_sbl_disable(to_isp_device(ccdc), OMAP3_ISP_SBL_CCDC_LSC_READ);
+
+	mutex_lock(&ccdc->ioctl_lock);
+	ccdc_lsc_free_request(ccdc, ccdc->lsc.request);
+	ccdc->lsc.request = ccdc->lsc.active;
+	ccdc->lsc.active = NULL;
+	cancel_work_sync(&ccdc->lsc.table_work);
+	ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
+	mutex_unlock(&ccdc->ioctl_lock);
+
+	ccdc->stopping = CCDC_STOP_NOT_REQUESTED;
+
+	return ret > 0 ? 0 : ret;
+}
+
+static void ccdc_enable(struct isp_ccdc_device *ccdc)
+{
+	if (ccdc_lsc_is_configured(ccdc))
+		__ccdc_lsc_enable(ccdc, 1);
+	__ccdc_enable(ccdc, 1);
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+/*
+ * ccdc_sbl_busy - Poll idle state of CCDC and related SBL memory write bits
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * Returns zero if the CCDC is idle and the image has been written to
+ * memory, too.
+ */
+static int ccdc_sbl_busy(struct isp_ccdc_device *ccdc)
+{
+	struct isp_device *isp = to_isp_device(ccdc);
+
+	return omap3isp_ccdc_busy(ccdc)
+		| (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_0) &
+		   ISPSBL_CCDC_WR_0_DATA_READY)
+		| (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_1) &
+		   ISPSBL_CCDC_WR_0_DATA_READY)
+		| (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_2) &
+		   ISPSBL_CCDC_WR_0_DATA_READY)
+		| (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_3) &
+		   ISPSBL_CCDC_WR_0_DATA_READY);
+}
+
+/*
+ * ccdc_sbl_wait_idle - Wait until the CCDC and related SBL are idle
+ * @ccdc: Pointer to ISP CCDC device.
+ * @max_wait: Max retry count in us for wait for idle/busy transition.
+ */
+static int ccdc_sbl_wait_idle(struct isp_ccdc_device *ccdc,
+			      unsigned int max_wait)
+{
+	unsigned int wait = 0;
+
+	if (max_wait == 0)
+		max_wait = 10000; /* 10 ms */
+
+	for (wait = 0; wait <= max_wait; wait++) {
+		if (!ccdc_sbl_busy(ccdc))
+			return 0;
+
+		rmb();
+		udelay(1);
+	}
+
+	return -EBUSY;
+}
+
+/* __ccdc_handle_stopping - Handle CCDC and/or LSC stopping sequence
+ * @ccdc: Pointer to ISP CCDC device.
+ * @event: Pointing which event trigger handler
+ *
+ * Return 1 when the event and stopping request combination is satisfied,
+ * zero otherwise.
+ */
+static int __ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event)
+{
+	int rval = 0;
+
+	switch ((ccdc->stopping & 3) | event) {
+	case CCDC_STOP_REQUEST | CCDC_EVENT_VD1:
+		if (ccdc->lsc.state != LSC_STATE_STOPPED)
+			__ccdc_lsc_enable(ccdc, 0);
+		__ccdc_enable(ccdc, 0);
+		ccdc->stopping = CCDC_STOP_EXECUTED;
+		return 1;
+
+	case CCDC_STOP_EXECUTED | CCDC_EVENT_VD0:
+		ccdc->stopping |= CCDC_STOP_CCDC_FINISHED;
+		if (ccdc->lsc.state == LSC_STATE_STOPPED)
+			ccdc->stopping |= CCDC_STOP_LSC_FINISHED;
+		rval = 1;
+		break;
+
+	case CCDC_STOP_EXECUTED | CCDC_EVENT_LSC_DONE:
+		ccdc->stopping |= CCDC_STOP_LSC_FINISHED;
+		rval = 1;
+		break;
+
+	case CCDC_STOP_EXECUTED | CCDC_EVENT_VD1:
+		return 1;
+	}
+
+	if (ccdc->stopping == CCDC_STOP_FINISHED) {
+		wake_up(&ccdc->wait);
+		rval = 1;
+	}
+
+	return rval;
+}
+
+static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+	struct video_device *vdev = ccdc->subdev.devnode;
+	struct v4l2_event event;
+
+	/* Frame number propagation */
+	atomic_inc(&pipe->frame_number);
+
+	memset(&event, 0, sizeof(event));
+	event.type = V4L2_EVENT_FRAME_SYNC;
+	event.u.frame_sync.frame_sequence = atomic_read(&pipe->frame_number);
+
+	v4l2_event_queue(vdev, &event);
+}
+
+/*
+ * ccdc_lsc_isr - Handle LSC events
+ * @ccdc: Pointer to ISP CCDC device.
+ * @events: LSC events
+ */
+static void ccdc_lsc_isr(struct isp_ccdc_device *ccdc, u32 events)
+{
+	unsigned long flags;
+
+	if (events & IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ) {
+		struct isp_pipeline *pipe =
+			to_isp_pipeline(&ccdc->subdev.entity);
+
+		ccdc_lsc_error_handler(ccdc);
+		pipe->error = true;
+		dev_dbg(to_device(ccdc), "lsc prefetch error\n");
+	}
+
+	if (!(events & IRQ0STATUS_CCDC_LSC_DONE_IRQ))
+		return;
+
+	/* LSC_DONE interrupt occur, there are two cases
+	 * 1. stopping for reconfiguration
+	 * 2. stopping because of STREAM OFF command
+	 */
+	spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+
+	if (ccdc->lsc.state == LSC_STATE_STOPPING)
+		ccdc->lsc.state = LSC_STATE_STOPPED;
+
+	if (__ccdc_handle_stopping(ccdc, CCDC_EVENT_LSC_DONE))
+		goto done;
+
+	if (ccdc->lsc.state != LSC_STATE_RECONFIG)
+		goto done;
+
+	/* LSC is in STOPPING state, change to the new state */
+	ccdc->lsc.state = LSC_STATE_STOPPED;
+
+	/* This is an exception. Start of frame and LSC_DONE interrupt
+	 * have been received on the same time. Skip this event and wait
+	 * for better times.
+	 */
+	if (events & IRQ0STATUS_HS_VS_IRQ)
+		goto done;
+
+	/* The LSC engine is stopped at this point. Enable it if there's a
+	 * pending request.
+	 */
+	if (ccdc->lsc.request == NULL)
+		goto done;
+
+	ccdc_lsc_enable(ccdc);
+
+done:
+	spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+}
+
+static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+	struct isp_device *isp = to_isp_device(ccdc);
+	struct isp_buffer *buffer;
+	int restart = 0;
+
+	/* The CCDC generates VD0 interrupts even when disabled (the datasheet
+	 * doesn't explicitly state if that's supposed to happen or not, so it
+	 * can be considered as a hardware bug or as a feature, but we have to
+	 * deal with it anyway). Disabling the CCDC when no buffer is available
+	 * would thus not be enough, we need to handle the situation explicitly.
+	 */
+	if (list_empty(&ccdc->video_out.dmaqueue))
+		goto done;
+
+	/* We're in continuous mode, and memory writes were disabled due to a
+	 * buffer underrun. Reenable them now that we have a buffer. The buffer
+	 * address has been set in ccdc_video_queue.
+	 */
+	if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && ccdc->underrun) {
+		restart = 1;
+		ccdc->underrun = 0;
+		goto done;
+	}
+
+	if (ccdc_sbl_wait_idle(ccdc, 1000)) {
+		dev_info(isp->dev, "CCDC won't become idle!\n");
+		goto done;
+	}
+
+	buffer = omap3isp_video_buffer_next(&ccdc->video_out);
+	if (buffer != NULL) {
+		ccdc_set_outaddr(ccdc, buffer->isp_addr);
+		restart = 1;
+	}
+
+	pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
+
+	if (ccdc->state == ISP_PIPELINE_STREAM_SINGLESHOT &&
+	    isp_pipeline_ready(pipe))
+		omap3isp_pipeline_set_stream(pipe,
+					ISP_PIPELINE_STREAM_SINGLESHOT);
+
+done:
+	return restart;
+}
+
+/*
+ * ccdc_vd0_isr - Handle VD0 event
+ * @ccdc: Pointer to ISP CCDC device.
+ *
+ * Executes LSC deferred enablement before next frame starts.
+ */
+static void ccdc_vd0_isr(struct isp_ccdc_device *ccdc)
+{
+	unsigned long flags;
+	int restart = 0;
+
+	if (ccdc->output & CCDC_OUTPUT_MEMORY)
+		restart = ccdc_isr_buffer(ccdc);
+
+	spin_lock_irqsave(&ccdc->lock, flags);
+	if (__ccdc_handle_stopping(ccdc, CCDC_EVENT_VD0)) {
+		spin_unlock_irqrestore(&ccdc->lock, flags);
+		return;
+	}
+
+	if (!ccdc->shadow_update)
+		ccdc_apply_controls(ccdc);
+	spin_unlock_irqrestore(&ccdc->lock, flags);
+
+	if (restart)
+		ccdc_enable(ccdc);
+}
+
+/*
+ * ccdc_vd1_isr - Handle VD1 event
+ * @ccdc: Pointer to ISP CCDC device.
+ */
+static void ccdc_vd1_isr(struct isp_ccdc_device *ccdc)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
+
+	/*
+	 * Depending on the CCDC pipeline state, CCDC stopping should be
+	 * handled differently. In SINGLESHOT we emulate an internal CCDC
+	 * stopping because the CCDC hw works only in continuous mode.
+	 * When CONTINUOUS pipeline state is used and the CCDC writes it's
+	 * data to memory the CCDC and LSC are stopped immediately but
+	 * without change the CCDC stopping state machine. The CCDC
+	 * stopping state machine should be used only when user request
+	 * for stopping is received (SINGLESHOT is an exeption).
+	 */
+	switch (ccdc->state) {
+	case ISP_PIPELINE_STREAM_SINGLESHOT:
+		ccdc->stopping = CCDC_STOP_REQUEST;
+		break;
+
+	case ISP_PIPELINE_STREAM_CONTINUOUS:
+		if (ccdc->output & CCDC_OUTPUT_MEMORY) {
+			if (ccdc->lsc.state != LSC_STATE_STOPPED)
+				__ccdc_lsc_enable(ccdc, 0);
+			__ccdc_enable(ccdc, 0);
+		}
+		break;
+
+	case ISP_PIPELINE_STREAM_STOPPED:
+		break;
+	}
+
+	if (__ccdc_handle_stopping(ccdc, CCDC_EVENT_VD1))
+		goto done;
+
+	if (ccdc->lsc.request == NULL)
+		goto done;
+
+	/*
+	 * LSC need to be reconfigured. Stop it here and on next LSC_DONE IRQ
+	 * do the appropriate changes in registers
+	 */
+	if (ccdc->lsc.state == LSC_STATE_RUNNING) {
+		__ccdc_lsc_enable(ccdc, 0);
+		ccdc->lsc.state = LSC_STATE_RECONFIG;
+		goto done;
+	}
+
+	/* LSC has been in STOPPED state, enable it */
+	if (ccdc->lsc.state == LSC_STATE_STOPPED)
+		ccdc_lsc_enable(ccdc);
+
+done:
+	spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
+}
+
+/*
+ * omap3isp_ccdc_isr - Configure CCDC during interframe time.
+ * @ccdc: Pointer to ISP CCDC device.
+ * @events: CCDC events
+ */
+int omap3isp_ccdc_isr(struct isp_ccdc_device *ccdc, u32 events)
+{
+	if (ccdc->state == ISP_PIPELINE_STREAM_STOPPED)
+		return 0;
+
+	if (events & IRQ0STATUS_CCDC_VD1_IRQ)
+		ccdc_vd1_isr(ccdc);
+
+	ccdc_lsc_isr(ccdc, events);
+
+	if (events & IRQ0STATUS_CCDC_VD0_IRQ)
+		ccdc_vd0_isr(ccdc);
+
+	if (events & IRQ0STATUS_HS_VS_IRQ)
+		ccdc_hs_vs_isr(ccdc);
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP video operations
+ */
+
+static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer)
+{
+	struct isp_ccdc_device *ccdc = &video->isp->isp_ccdc;
+
+	if (!(ccdc->output & CCDC_OUTPUT_MEMORY))
+		return -ENODEV;
+
+	ccdc_set_outaddr(ccdc, buffer->isp_addr);
+
+	/* We now have a buffer queued on the output, restart the pipeline
+	 * on the next CCDC interrupt if running in continuous mode (or when
+	 * starting the stream).
+	 */
+	ccdc->underrun = 1;
+
+	return 0;
+}
+
+static const struct isp_video_operations ccdc_video_ops = {
+	.queue = ccdc_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+/*
+ * ccdc_ioctl - CCDC module private ioctl's
+ * @sd: ISP CCDC V4L2 subdevice
+ * @cmd: ioctl command
+ * @arg: ioctl argument
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static long ccdc_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+	struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+	int ret;
+
+	switch (cmd) {
+	case VIDIOC_OMAP3ISP_CCDC_CFG:
+		mutex_lock(&ccdc->ioctl_lock);
+		ret = ccdc_config(ccdc, arg);
+		mutex_unlock(&ccdc->ioctl_lock);
+		break;
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	return ret;
+}
+
+static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+				struct v4l2_event_subscription *sub)
+{
+	if (sub->type != V4L2_EVENT_FRAME_SYNC)
+		return -EINVAL;
+
+	/* line number is zero at frame start */
+	if (sub->id != 0)
+		return -EINVAL;
+
+	return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS, NULL);
+}
+
+static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+				  struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_unsubscribe(fh, sub);
+}
+
+/*
+ * ccdc_set_stream - Enable/Disable streaming on the CCDC module
+ * @sd: ISP CCDC V4L2 subdevice
+ * @enable: Enable/disable stream
+ *
+ * When writing to memory, the CCDC hardware can't be enabled without a memory
+ * buffer to write to. As the s_stream operation is called in response to a
+ * STREAMON call without any buffer queued yet, just update the enabled field
+ * and return immediately. The CCDC will be enabled in ccdc_isr_buffer().
+ *
+ * When not writing to memory enable the CCDC immediately.
+ */
+static int ccdc_set_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+	struct isp_device *isp = to_isp_device(ccdc);
+	int ret = 0;
+
+	if (ccdc->state == ISP_PIPELINE_STREAM_STOPPED) {
+		if (enable == ISP_PIPELINE_STREAM_STOPPED)
+			return 0;
+
+		omap3isp_subclk_enable(isp, OMAP3_ISP_SUBCLK_CCDC);
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
+			    ISPCCDC_CFG_VDLC);
+
+		ccdc_configure(ccdc);
+
+		/* TODO: Don't configure the video port if all of its output
+		 * links are inactive.
+		 */
+		ccdc_config_vp(ccdc);
+		ccdc_enable_vp(ccdc, 1);
+		ccdc_print_status(ccdc);
+	}
+
+	switch (enable) {
+	case ISP_PIPELINE_STREAM_CONTINUOUS:
+		if (ccdc->output & CCDC_OUTPUT_MEMORY)
+			omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_WRITE);
+
+		if (ccdc->underrun || !(ccdc->output & CCDC_OUTPUT_MEMORY))
+			ccdc_enable(ccdc);
+
+		ccdc->underrun = 0;
+		break;
+
+	case ISP_PIPELINE_STREAM_SINGLESHOT:
+		if (ccdc->output & CCDC_OUTPUT_MEMORY &&
+		    ccdc->state != ISP_PIPELINE_STREAM_SINGLESHOT)
+			omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_WRITE);
+
+		ccdc_enable(ccdc);
+		break;
+
+	case ISP_PIPELINE_STREAM_STOPPED:
+		ret = ccdc_disable(ccdc);
+		if (ccdc->output & CCDC_OUTPUT_MEMORY)
+			omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_CCDC_WRITE);
+		omap3isp_subclk_disable(isp, OMAP3_ISP_SUBCLK_CCDC);
+		ccdc->underrun = 0;
+		break;
+	}
+
+	ccdc->state = enable;
+	return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
+		  unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_format(fh, pad);
+	else
+		return &ccdc->formats[pad];
+}
+
+static struct v4l2_rect *
+__ccdc_get_crop(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
+		enum v4l2_subdev_format_whence which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_crop(fh, CCDC_PAD_SOURCE_OF);
+	else
+		return &ccdc->crop;
+}
+
+/*
+ * ccdc_try_format - Try video format on a pad
+ * @ccdc: ISP CCDC device
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ */
+static void
+ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
+		unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+		enum v4l2_subdev_format_whence which)
+{
+	const struct isp_format_info *info;
+	enum v4l2_mbus_pixelcode pixelcode;
+	unsigned int width = fmt->width;
+	unsigned int height = fmt->height;
+	struct v4l2_rect *crop;
+	unsigned int i;
+
+	switch (pad) {
+	case CCDC_PAD_SINK:
+		for (i = 0; i < ARRAY_SIZE(ccdc_fmts); i++) {
+			if (fmt->code == ccdc_fmts[i])
+				break;
+		}
+
+		/* If not found, use SGRBG10 as default */
+		if (i >= ARRAY_SIZE(ccdc_fmts))
+			fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+		/* Clamp the input size. */
+		fmt->width = clamp_t(u32, width, 32, 4096);
+		fmt->height = clamp_t(u32, height, 32, 4096);
+		break;
+
+	case CCDC_PAD_SOURCE_OF:
+		pixelcode = fmt->code;
+		*fmt = *__ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, which);
+
+		/* YUV formats are converted from 2X8 to 1X16 by the bridge and
+		 * can be byte-swapped.
+		 */
+		if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8 ||
+		    fmt->code == V4L2_MBUS_FMT_UYVY8_2X8) {
+			/* Use the user requested format if YUV. */
+			if (pixelcode == V4L2_MBUS_FMT_YUYV8_2X8 ||
+			    pixelcode == V4L2_MBUS_FMT_UYVY8_2X8 ||
+			    pixelcode == V4L2_MBUS_FMT_YUYV8_1X16 ||
+			    pixelcode == V4L2_MBUS_FMT_UYVY8_1X16)
+				fmt->code = pixelcode;
+
+			if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8)
+				fmt->code = V4L2_MBUS_FMT_YUYV8_1X16;
+			else if (fmt->code == V4L2_MBUS_FMT_UYVY8_2X8)
+				fmt->code = V4L2_MBUS_FMT_UYVY8_1X16;
+		}
+
+		/* Hardcode the output size to the crop rectangle size. */
+		crop = __ccdc_get_crop(ccdc, fh, which);
+		fmt->width = crop->width;
+		fmt->height = crop->height;
+		break;
+
+	case CCDC_PAD_SOURCE_VP:
+		*fmt = *__ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, which);
+
+		/* The video port interface truncates the data to 10 bits. */
+		info = omap3isp_video_format_info(fmt->code);
+		fmt->code = info->truncated;
+
+		/* YUV formats are not supported by the video port. */
+		if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8 ||
+		    fmt->code == V4L2_MBUS_FMT_UYVY8_2X8)
+			fmt->code = 0;
+
+		/* The number of lines that can be clocked out from the video
+		 * port output must be at least one line less than the number
+		 * of input lines.
+		 */
+		fmt->width = clamp_t(u32, width, 32, fmt->width);
+		fmt->height = clamp_t(u32, height, 32, fmt->height - 1);
+		break;
+	}
+
+	/* Data is written to memory unpacked, each 10-bit or 12-bit pixel is
+	 * stored on 2 bytes.
+	 */
+	fmt->colorspace = V4L2_COLORSPACE_SRGB;
+	fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * ccdc_try_crop - Validate a crop rectangle
+ * @ccdc: ISP CCDC device
+ * @sink: format on the sink pad
+ * @crop: crop rectangle to be validated
+ */
+static void ccdc_try_crop(struct isp_ccdc_device *ccdc,
+			  const struct v4l2_mbus_framefmt *sink,
+			  struct v4l2_rect *crop)
+{
+	const struct isp_format_info *info;
+	unsigned int max_width;
+
+	/* For Bayer formats, restrict left/top and width/height to even values
+	 * to keep the Bayer pattern.
+	 */
+	info = omap3isp_video_format_info(sink->code);
+	if (info->flavor != V4L2_MBUS_FMT_Y8_1X8) {
+		crop->left &= ~1;
+		crop->top &= ~1;
+	}
+
+	crop->left = clamp_t(u32, crop->left, 0, sink->width - CCDC_MIN_WIDTH);
+	crop->top = clamp_t(u32, crop->top, 0, sink->height - CCDC_MIN_HEIGHT);
+
+	/* The data formatter truncates the number of horizontal output pixels
+	 * to a multiple of 16. To avoid clipping data, allow callers to request
+	 * an output size bigger than the input size up to the nearest multiple
+	 * of 16.
+	 */
+	max_width = (sink->width - crop->left + 15) & ~15;
+	crop->width = clamp_t(u32, crop->width, CCDC_MIN_WIDTH, max_width)
+		    & ~15;
+	crop->height = clamp_t(u32, crop->height, CCDC_MIN_HEIGHT,
+			       sink->height - crop->top);
+
+	/* Odd width/height values don't make sense for Bayer formats. */
+	if (info->flavor != V4L2_MBUS_FMT_Y8_1X8) {
+		crop->width &= ~1;
+		crop->height &= ~1;
+	}
+}
+
+/*
+ * ccdc_enum_mbus_code - Handle pixel format enumeration
+ * @sd     : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code   : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
+			       struct v4l2_subdev_fh *fh,
+			       struct v4l2_subdev_mbus_code_enum *code)
+{
+	struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	switch (code->pad) {
+	case CCDC_PAD_SINK:
+		if (code->index >= ARRAY_SIZE(ccdc_fmts))
+			return -EINVAL;
+
+		code->code = ccdc_fmts[code->index];
+		break;
+
+	case CCDC_PAD_SOURCE_OF:
+		format = __ccdc_get_format(ccdc, fh, code->pad,
+					   V4L2_SUBDEV_FORMAT_TRY);
+
+		if (format->code == V4L2_MBUS_FMT_YUYV8_2X8 ||
+		    format->code == V4L2_MBUS_FMT_UYVY8_2X8) {
+			/* In YUV mode the CCDC can swap bytes. */
+			if (code->index == 0)
+				code->code = V4L2_MBUS_FMT_YUYV8_1X16;
+			else if (code->index == 1)
+				code->code = V4L2_MBUS_FMT_UYVY8_1X16;
+			else
+				return -EINVAL;
+		} else {
+			/* In raw mode, no configurable format confversion is
+			 * available.
+			 */
+			if (code->index == 0)
+				code->code = format->code;
+			else
+				return -EINVAL;
+		}
+		break;
+
+	case CCDC_PAD_SOURCE_VP:
+		/* The CCDC supports no configurable format conversion
+		 * compatible with the video port. Enumerate a single output
+		 * format code.
+		 */
+		if (code->index != 0)
+			return -EINVAL;
+
+		format = __ccdc_get_format(ccdc, fh, code->pad,
+					   V4L2_SUBDEV_FORMAT_TRY);
+
+		/* A pixel code equal to 0 means that the video port doesn't
+		 * support the input format. Don't enumerate any pixel code.
+		 */
+		if (format->code == 0)
+			return -EINVAL;
+
+		code->code = format->code;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
+				struct v4l2_subdev_fh *fh,
+				struct v4l2_subdev_frame_size_enum *fse)
+{
+	struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt format;
+
+	if (fse->index != 0)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = 1;
+	format.height = 1;
+	ccdc_try_format(ccdc, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+	fse->min_width = format.width;
+	fse->min_height = format.height;
+
+	if (format.code != fse->code)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = -1;
+	format.height = -1;
+	ccdc_try_format(ccdc, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+	fse->max_width = format.width;
+	fse->max_height = format.height;
+
+	return 0;
+}
+
+/*
+ * ccdc_get_selection - Retrieve a selection rectangle on a pad
+ * @sd: ISP CCDC V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangles are the crop rectangles on the output formatter
+ * source pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			      struct v4l2_subdev_selection *sel)
+{
+	struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	if (sel->pad != CCDC_PAD_SOURCE_OF)
+		return -EINVAL;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = INT_MAX;
+		sel->r.height = INT_MAX;
+
+		format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, sel->which);
+		ccdc_try_crop(ccdc, format, &sel->r);
+		break;
+
+	case V4L2_SEL_TGT_CROP:
+		sel->r = *__ccdc_get_crop(ccdc, fh, sel->which);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * ccdc_set_selection - Set a selection rectangle on a pad
+ * @sd: ISP CCDC V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangle is the actual crop rectangle on the output
+ * formatter source pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			      struct v4l2_subdev_selection *sel)
+{
+	struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	if (sel->target != V4L2_SEL_TGT_CROP ||
+	    sel->pad != CCDC_PAD_SOURCE_OF)
+		return -EINVAL;
+
+	/* The crop rectangle can't be changed while streaming. */
+	if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED)
+		return -EBUSY;
+
+	/* Modifying the crop rectangle always changes the format on the source
+	 * pad. If the KEEP_CONFIG flag is set, just return the current crop
+	 * rectangle.
+	 */
+	if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
+		sel->r = *__ccdc_get_crop(ccdc, fh, sel->which);
+		return 0;
+	}
+
+	format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, sel->which);
+	ccdc_try_crop(ccdc, format, &sel->r);
+	*__ccdc_get_crop(ccdc, fh, sel->which) = sel->r;
+
+	/* Update the source format. */
+	format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_OF, sel->which);
+	ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_OF, format, sel->which);
+
+	return 0;
+}
+
+/*
+ * ccdc_get_format - Retrieve the video format on a pad
+ * @sd : ISP CCDC V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ccdc_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __ccdc_get_format(ccdc, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	fmt->format = *format;
+	return 0;
+}
+
+/*
+ * ccdc_set_format - Set the video format on a pad
+ * @sd : ISP CCDC V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+	struct v4l2_rect *crop;
+
+	format = __ccdc_get_format(ccdc, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	ccdc_try_format(ccdc, fh, fmt->pad, &fmt->format, fmt->which);
+	*format = fmt->format;
+
+	/* Propagate the format from sink to source */
+	if (fmt->pad == CCDC_PAD_SINK) {
+		/* Reset the crop rectangle. */
+		crop = __ccdc_get_crop(ccdc, fh, fmt->which);
+		crop->left = 0;
+		crop->top = 0;
+		crop->width = fmt->format.width;
+		crop->height = fmt->format.height;
+
+		ccdc_try_crop(ccdc, &fmt->format, crop);
+
+		/* Update the source formats. */
+		format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_OF,
+					   fmt->which);
+		*format = fmt->format;
+		ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_OF, format,
+				fmt->which);
+
+		format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_VP,
+					   fmt->which);
+		*format = fmt->format;
+		ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_VP, format,
+				fmt->which);
+	}
+
+	return 0;
+}
+
+/*
+ * Decide whether desired output pixel code can be obtained with
+ * the lane shifter by shifting the input pixel code.
+ * @in: input pixelcode to shifter
+ * @out: output pixelcode from shifter
+ * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0]
+ *
+ * return true if the combination is possible
+ * return false otherwise
+ */
+static bool ccdc_is_shiftable(enum v4l2_mbus_pixelcode in,
+			      enum v4l2_mbus_pixelcode out,
+			      unsigned int additional_shift)
+{
+	const struct isp_format_info *in_info, *out_info;
+
+	if (in == out)
+		return true;
+
+	in_info = omap3isp_video_format_info(in);
+	out_info = omap3isp_video_format_info(out);
+
+	if ((in_info->flavor == 0) || (out_info->flavor == 0))
+		return false;
+
+	if (in_info->flavor != out_info->flavor)
+		return false;
+
+	return in_info->width - out_info->width + additional_shift <= 6;
+}
+
+static int ccdc_link_validate(struct v4l2_subdev *sd,
+			      struct media_link *link,
+			      struct v4l2_subdev_format *source_fmt,
+			      struct v4l2_subdev_format *sink_fmt)
+{
+	struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+	unsigned long parallel_shift;
+
+	/* Check if the two ends match */
+	if (source_fmt->format.width != sink_fmt->format.width ||
+	    source_fmt->format.height != sink_fmt->format.height)
+		return -EPIPE;
+
+	/* We've got a parallel sensor here. */
+	if (ccdc->input == CCDC_INPUT_PARALLEL) {
+		struct isp_parallel_platform_data *pdata =
+			&((struct isp_v4l2_subdevs_group *)
+			  media_entity_to_v4l2_subdev(link->source->entity)
+			  ->host_priv)->bus.parallel;
+		parallel_shift = pdata->data_lane_shift * 2;
+	} else {
+		parallel_shift = 0;
+	}
+
+	/* Lane shifter may be used to drop bits on CCDC sink pad */
+	if (!ccdc_is_shiftable(source_fmt->format.code,
+			       sink_fmt->format.code, parallel_shift))
+		return -EPIPE;
+
+	return 0;
+}
+
+/*
+ * ccdc_init_formats - Initialize formats on all pads
+ * @sd: ISP CCDC V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int ccdc_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	struct v4l2_subdev_format format;
+
+	memset(&format, 0, sizeof(format));
+	format.pad = CCDC_PAD_SINK;
+	format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+	format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+	format.format.width = 4096;
+	format.format.height = 4096;
+	ccdc_set_format(sd, fh, &format);
+
+	return 0;
+}
+
+/* V4L2 subdev core operations */
+static const struct v4l2_subdev_core_ops ccdc_v4l2_core_ops = {
+	.ioctl = ccdc_ioctl,
+	.subscribe_event = ccdc_subscribe_event,
+	.unsubscribe_event = ccdc_unsubscribe_event,
+};
+
+/* V4L2 subdev video operations */
+static const struct v4l2_subdev_video_ops ccdc_v4l2_video_ops = {
+	.s_stream = ccdc_set_stream,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops ccdc_v4l2_pad_ops = {
+	.enum_mbus_code = ccdc_enum_mbus_code,
+	.enum_frame_size = ccdc_enum_frame_size,
+	.get_fmt = ccdc_get_format,
+	.set_fmt = ccdc_set_format,
+	.get_selection = ccdc_get_selection,
+	.set_selection = ccdc_set_selection,
+	.link_validate = ccdc_link_validate,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops ccdc_v4l2_ops = {
+	.core = &ccdc_v4l2_core_ops,
+	.video = &ccdc_v4l2_video_ops,
+	.pad = &ccdc_v4l2_pad_ops,
+};
+
+/* V4L2 subdev internal operations */
+static const struct v4l2_subdev_internal_ops ccdc_v4l2_internal_ops = {
+	.open = ccdc_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * ccdc_link_setup - Setup CCDC connections
+ * @entity: CCDC media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int ccdc_link_setup(struct media_entity *entity,
+			   const struct media_pad *local,
+			   const struct media_pad *remote, u32 flags)
+{
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+	struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+	struct isp_device *isp = to_isp_device(ccdc);
+
+	switch (local->index | media_entity_type(remote->entity)) {
+	case CCDC_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+		/* Read from the sensor (parallel interface), CCP2, CSI2a or
+		 * CSI2c.
+		 */
+		if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+			ccdc->input = CCDC_INPUT_NONE;
+			break;
+		}
+
+		if (ccdc->input != CCDC_INPUT_NONE)
+			return -EBUSY;
+
+		if (remote->entity == &isp->isp_ccp2.subdev.entity)
+			ccdc->input = CCDC_INPUT_CCP2B;
+		else if (remote->entity == &isp->isp_csi2a.subdev.entity)
+			ccdc->input = CCDC_INPUT_CSI2A;
+		else if (remote->entity == &isp->isp_csi2c.subdev.entity)
+			ccdc->input = CCDC_INPUT_CSI2C;
+		else
+			ccdc->input = CCDC_INPUT_PARALLEL;
+
+		break;
+
+	/*
+	 * The ISP core doesn't support pipelines with multiple video outputs.
+	 * Revisit this when it will be implemented, and return -EBUSY for now.
+	 */
+
+	case CCDC_PAD_SOURCE_VP | MEDIA_ENT_T_V4L2_SUBDEV:
+		/* Write to preview engine, histogram and H3A. When none of
+		 * those links are active, the video port can be disabled.
+		 */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (ccdc->output & ~CCDC_OUTPUT_PREVIEW)
+				return -EBUSY;
+			ccdc->output |= CCDC_OUTPUT_PREVIEW;
+		} else {
+			ccdc->output &= ~CCDC_OUTPUT_PREVIEW;
+		}
+		break;
+
+	case CCDC_PAD_SOURCE_OF | MEDIA_ENT_T_DEVNODE:
+		/* Write to memory */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (ccdc->output & ~CCDC_OUTPUT_MEMORY)
+				return -EBUSY;
+			ccdc->output |= CCDC_OUTPUT_MEMORY;
+		} else {
+			ccdc->output &= ~CCDC_OUTPUT_MEMORY;
+		}
+		break;
+
+	case CCDC_PAD_SOURCE_OF | MEDIA_ENT_T_V4L2_SUBDEV:
+		/* Write to resizer */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (ccdc->output & ~CCDC_OUTPUT_RESIZER)
+				return -EBUSY;
+			ccdc->output |= CCDC_OUTPUT_RESIZER;
+		} else {
+			ccdc->output &= ~CCDC_OUTPUT_RESIZER;
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations ccdc_media_ops = {
+	.link_setup = ccdc_link_setup,
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+void omap3isp_ccdc_unregister_entities(struct isp_ccdc_device *ccdc)
+{
+	v4l2_device_unregister_subdev(&ccdc->subdev);
+	omap3isp_video_unregister(&ccdc->video_out);
+}
+
+int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
+	struct v4l2_device *vdev)
+{
+	int ret;
+
+	/* Register the subdev and video node. */
+	ret = v4l2_device_register_subdev(vdev, &ccdc->subdev);
+	if (ret < 0)
+		goto error;
+
+	ret = omap3isp_video_register(&ccdc->video_out, vdev);
+	if (ret < 0)
+		goto error;
+
+	return 0;
+
+error:
+	omap3isp_ccdc_unregister_entities(ccdc);
+	return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP CCDC initialisation and cleanup
+ */
+
+/*
+ * ccdc_init_entities - Initialize V4L2 subdev and media entity
+ * @ccdc: ISP CCDC module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int ccdc_init_entities(struct isp_ccdc_device *ccdc)
+{
+	struct v4l2_subdev *sd = &ccdc->subdev;
+	struct media_pad *pads = ccdc->pads;
+	struct media_entity *me = &sd->entity;
+	int ret;
+
+	ccdc->input = CCDC_INPUT_NONE;
+
+	v4l2_subdev_init(sd, &ccdc_v4l2_ops);
+	sd->internal_ops = &ccdc_v4l2_internal_ops;
+	strlcpy(sd->name, "OMAP3 ISP CCDC", sizeof(sd->name));
+	sd->grp_id = 1 << 16;	/* group ID for isp subdevs */
+	v4l2_set_subdevdata(sd, ccdc);
+	sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	pads[CCDC_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
+	pads[CCDC_PAD_SOURCE_OF].flags = MEDIA_PAD_FL_SOURCE;
+
+	me->ops = &ccdc_media_ops;
+	ret = media_entity_init(me, CCDC_PADS_NUM, pads, 0);
+	if (ret < 0)
+		return ret;
+
+	ccdc_init_formats(sd, NULL);
+
+	ccdc->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	ccdc->video_out.ops = &ccdc_video_ops;
+	ccdc->video_out.isp = to_isp_device(ccdc);
+	ccdc->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+	ccdc->video_out.bpl_alignment = 32;
+
+	ret = omap3isp_video_init(&ccdc->video_out, "CCDC");
+	if (ret < 0)
+		goto error_video;
+
+	/* Connect the CCDC subdev to the video node. */
+	ret = media_entity_create_link(&ccdc->subdev.entity, CCDC_PAD_SOURCE_OF,
+			&ccdc->video_out.video.entity, 0, 0);
+	if (ret < 0)
+		goto error_link;
+
+	return 0;
+
+error_link:
+	omap3isp_video_cleanup(&ccdc->video_out);
+error_video:
+	media_entity_cleanup(me);
+	return ret;
+}
+
+/*
+ * omap3isp_ccdc_init - CCDC module initialization.
+ * @dev: Device pointer specific to the OMAP3 ISP.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int omap3isp_ccdc_init(struct isp_device *isp)
+{
+	struct isp_ccdc_device *ccdc = &isp->isp_ccdc;
+	int ret;
+
+	spin_lock_init(&ccdc->lock);
+	init_waitqueue_head(&ccdc->wait);
+	mutex_init(&ccdc->ioctl_lock);
+
+	ccdc->stopping = CCDC_STOP_NOT_REQUESTED;
+
+	INIT_WORK(&ccdc->lsc.table_work, ccdc_lsc_free_table_work);
+	ccdc->lsc.state = LSC_STATE_STOPPED;
+	INIT_LIST_HEAD(&ccdc->lsc.free_queue);
+	spin_lock_init(&ccdc->lsc.req_lock);
+
+	ccdc->clamp.oblen = 0;
+	ccdc->clamp.dcsubval = 0;
+
+	ccdc->update = OMAP3ISP_CCDC_BLCLAMP;
+	ccdc_apply_controls(ccdc);
+
+	ret = ccdc_init_entities(ccdc);
+	if (ret < 0) {
+		mutex_destroy(&ccdc->ioctl_lock);
+		return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * omap3isp_ccdc_cleanup - CCDC module cleanup.
+ * @dev: Device pointer specific to the OMAP3 ISP.
+ */
+void omap3isp_ccdc_cleanup(struct isp_device *isp)
+{
+	struct isp_ccdc_device *ccdc = &isp->isp_ccdc;
+
+	omap3isp_video_cleanup(&ccdc->video_out);
+	media_entity_cleanup(&ccdc->subdev.entity);
+
+	/* Free LSC requests. As the CCDC is stopped there's no active request,
+	 * so only the pending request and the free queue need to be handled.
+	 */
+	ccdc_lsc_free_request(ccdc, ccdc->lsc.request);
+	cancel_work_sync(&ccdc->lsc.table_work);
+	ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
+
+	if (ccdc->fpc.fpcaddr != 0)
+		omap_iommu_vfree(isp->domain, isp->dev, ccdc->fpc.fpcaddr);
+
+	mutex_destroy(&ccdc->ioctl_lock);
+}
diff --git a/drivers/media/platform/omap3isp/ispccdc.h b/drivers/media/platform/omap3isp/ispccdc.h
new file mode 100644
index 0000000..a5da9e1
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispccdc.h
@@ -0,0 +1,172 @@
+/*
+ * ispccdc.h
+ *
+ * TI OMAP3 ISP - CCDC module
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_CCDC_H
+#define OMAP3_ISP_CCDC_H
+
+#include <linux/omap3isp.h>
+#include <linux/workqueue.h>
+
+#include "ispvideo.h"
+
+enum ccdc_input_entity {
+	CCDC_INPUT_NONE,
+	CCDC_INPUT_PARALLEL,
+	CCDC_INPUT_CSI2A,
+	CCDC_INPUT_CCP2B,
+	CCDC_INPUT_CSI2C
+};
+
+#define CCDC_OUTPUT_MEMORY	(1 << 0)
+#define CCDC_OUTPUT_PREVIEW	(1 << 1)
+#define CCDC_OUTPUT_RESIZER	(1 << 2)
+
+#define	OMAP3ISP_CCDC_NEVENTS	16
+
+enum ispccdc_lsc_state {
+	LSC_STATE_STOPPED = 0,
+	LSC_STATE_STOPPING = 1,
+	LSC_STATE_RUNNING = 2,
+	LSC_STATE_RECONFIG = 3,
+};
+
+struct ispccdc_lsc_config_req {
+	struct list_head list;
+	struct omap3isp_ccdc_lsc_config config;
+	unsigned char enable;
+	u32 table;
+	struct iovm_struct *iovm;
+};
+
+/*
+ * ispccdc_lsc - CCDC LSC parameters
+ * @update_config: Set when user changes config
+ * @request_enable: Whether LSC is requested to be enabled
+ * @config: LSC config set by user
+ * @update_table: Set when user provides a new LSC table to table_new
+ * @table_new: LSC table set by user, ISP address
+ * @table_inuse: LSC table currently in use, ISP address
+ */
+struct ispccdc_lsc {
+	enum ispccdc_lsc_state state;
+	struct work_struct table_work;
+
+	/* LSC queue of configurations */
+	spinlock_t req_lock;
+	struct ispccdc_lsc_config_req *request;	/* requested configuration */
+	struct ispccdc_lsc_config_req *active;	/* active configuration */
+	struct list_head free_queue;	/* configurations for freeing */
+};
+
+#define CCDC_STOP_NOT_REQUESTED		0x00
+#define CCDC_STOP_REQUEST		0x01
+#define CCDC_STOP_EXECUTED		(0x02 | CCDC_STOP_REQUEST)
+#define CCDC_STOP_CCDC_FINISHED		0x04
+#define CCDC_STOP_LSC_FINISHED		0x08
+#define CCDC_STOP_FINISHED		\
+	(CCDC_STOP_EXECUTED | CCDC_STOP_CCDC_FINISHED | CCDC_STOP_LSC_FINISHED)
+
+#define CCDC_EVENT_VD1			0x10
+#define CCDC_EVENT_VD0			0x20
+#define CCDC_EVENT_LSC_DONE		0x40
+
+/* Sink and source CCDC pads */
+#define CCDC_PAD_SINK			0
+#define CCDC_PAD_SOURCE_OF		1
+#define CCDC_PAD_SOURCE_VP		2
+#define CCDC_PADS_NUM			3
+
+/*
+ * struct isp_ccdc_device - Structure for the CCDC module to store its own
+ *			    information
+ * @subdev: V4L2 subdevice
+ * @pads: Sink and source media entity pads
+ * @formats: Active video formats
+ * @crop: Active crop rectangle on the OF source pad
+ * @input: Active input
+ * @output: Active outputs
+ * @video_out: Output video node
+ * @alaw: A-law compression enabled (1) or disabled (0)
+ * @lpf: Low pass filter enabled (1) or disabled (0)
+ * @obclamp: Optical-black clamp enabled (1) or disabled (0)
+ * @fpc_en: Faulty pixels correction enabled (1) or disabled (0)
+ * @blcomp: Black level compensation configuration
+ * @clamp: Optical-black or digital clamp configuration
+ * @fpc: Faulty pixels correction configuration
+ * @lsc: Lens shading compensation configuration
+ * @update: Bitmask of controls to update during the next interrupt
+ * @shadow_update: Controls update in progress by userspace
+ * @underrun: A buffer underrun occurred and a new buffer has been queued
+ * @state: Streaming state
+ * @lock: Serializes shadow_update with interrupt handler
+ * @wait: Wait queue used to stop the module
+ * @stopping: Stopping state
+ * @ioctl_lock: Serializes ioctl calls and LSC requests freeing
+ */
+struct isp_ccdc_device {
+	struct v4l2_subdev subdev;
+	struct media_pad pads[CCDC_PADS_NUM];
+	struct v4l2_mbus_framefmt formats[CCDC_PADS_NUM];
+	struct v4l2_rect crop;
+
+	enum ccdc_input_entity input;
+	unsigned int output;
+	struct isp_video video_out;
+
+	unsigned int alaw:1,
+		     lpf:1,
+		     obclamp:1,
+		     fpc_en:1;
+	struct omap3isp_ccdc_blcomp blcomp;
+	struct omap3isp_ccdc_bclamp clamp;
+	struct omap3isp_ccdc_fpc fpc;
+	struct ispccdc_lsc lsc;
+	unsigned int update;
+	unsigned int shadow_update;
+
+	unsigned int underrun:1;
+	enum isp_pipeline_stream_state state;
+	spinlock_t lock;
+	wait_queue_head_t wait;
+	unsigned int stopping;
+	struct mutex ioctl_lock;
+};
+
+struct isp_device;
+
+int omap3isp_ccdc_init(struct isp_device *isp);
+void omap3isp_ccdc_cleanup(struct isp_device *isp);
+int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
+	struct v4l2_device *vdev);
+void omap3isp_ccdc_unregister_entities(struct isp_ccdc_device *ccdc);
+
+int omap3isp_ccdc_busy(struct isp_ccdc_device *isp_ccdc);
+int omap3isp_ccdc_isr(struct isp_ccdc_device *isp_ccdc, u32 events);
+void omap3isp_ccdc_restore_context(struct isp_device *isp);
+void omap3isp_ccdc_max_rate(struct isp_ccdc_device *ccdc,
+	unsigned int *max_rate);
+
+#endif	/* OMAP3_ISP_CCDC_H */
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
new file mode 100644
index 0000000..85f0de8
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -0,0 +1,1171 @@
+/*
+ * ispccp2.c
+ *
+ * TI OMAP3 ISP - CCP2 module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispccp2.h"
+
+/* Number of LCX channels */
+#define CCP2_LCx_CHANS_NUM			3
+/* Max/Min size for CCP2 video port */
+#define ISPCCP2_DAT_START_MIN			0
+#define ISPCCP2_DAT_START_MAX			4095
+#define ISPCCP2_DAT_SIZE_MIN			0
+#define ISPCCP2_DAT_SIZE_MAX			4095
+#define ISPCCP2_VPCLK_FRACDIV			65536
+#define ISPCCP2_LCx_CTRL_FORMAT_RAW8_DPCM10_VP	0x12
+#define ISPCCP2_LCx_CTRL_FORMAT_RAW10_VP	0x16
+/* Max/Min size for CCP2 memory channel */
+#define ISPCCP2_LCM_HSIZE_COUNT_MIN		16
+#define ISPCCP2_LCM_HSIZE_COUNT_MAX		8191
+#define ISPCCP2_LCM_HSIZE_SKIP_MIN		0
+#define ISPCCP2_LCM_HSIZE_SKIP_MAX		8191
+#define ISPCCP2_LCM_VSIZE_MIN			1
+#define ISPCCP2_LCM_VSIZE_MAX			8191
+#define ISPCCP2_LCM_HWORDS_MIN			1
+#define ISPCCP2_LCM_HWORDS_MAX			4095
+#define ISPCCP2_LCM_CTRL_BURST_SIZE_32X		5
+#define ISPCCP2_LCM_CTRL_READ_THROTTLE_FULL	0
+#define ISPCCP2_LCM_CTRL_SRC_DECOMPR_DPCM10	2
+#define ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW8	2
+#define ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW10	3
+#define ISPCCP2_LCM_CTRL_DST_FORMAT_RAW10	3
+#define ISPCCP2_LCM_CTRL_DST_PORT_VP		0
+#define ISPCCP2_LCM_CTRL_DST_PORT_MEM		1
+
+/* Set only the required bits */
+#define BIT_SET(var, shift, mask, val)			\
+	do {						\
+		var = ((var) & ~((mask) << (shift)))	\
+			| ((val) << (shift));		\
+	} while (0)
+
+/*
+ * ccp2_print_status - Print current CCP2 module register values.
+ */
+#define CCP2_PRINT_REGISTER(isp, name)\
+	dev_dbg(isp->dev, "###CCP2 " #name "=0x%08x\n", \
+		isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_##name))
+
+static void ccp2_print_status(struct isp_ccp2_device *ccp2)
+{
+	struct isp_device *isp = to_isp_device(ccp2);
+
+	dev_dbg(isp->dev, "-------------CCP2 Register dump-------------\n");
+
+	CCP2_PRINT_REGISTER(isp, SYSCONFIG);
+	CCP2_PRINT_REGISTER(isp, SYSSTATUS);
+	CCP2_PRINT_REGISTER(isp, LC01_IRQENABLE);
+	CCP2_PRINT_REGISTER(isp, LC01_IRQSTATUS);
+	CCP2_PRINT_REGISTER(isp, LC23_IRQENABLE);
+	CCP2_PRINT_REGISTER(isp, LC23_IRQSTATUS);
+	CCP2_PRINT_REGISTER(isp, LCM_IRQENABLE);
+	CCP2_PRINT_REGISTER(isp, LCM_IRQSTATUS);
+	CCP2_PRINT_REGISTER(isp, CTRL);
+	CCP2_PRINT_REGISTER(isp, LCx_CTRL(0));
+	CCP2_PRINT_REGISTER(isp, LCx_CODE(0));
+	CCP2_PRINT_REGISTER(isp, LCx_STAT_START(0));
+	CCP2_PRINT_REGISTER(isp, LCx_STAT_SIZE(0));
+	CCP2_PRINT_REGISTER(isp, LCx_SOF_ADDR(0));
+	CCP2_PRINT_REGISTER(isp, LCx_EOF_ADDR(0));
+	CCP2_PRINT_REGISTER(isp, LCx_DAT_START(0));
+	CCP2_PRINT_REGISTER(isp, LCx_DAT_SIZE(0));
+	CCP2_PRINT_REGISTER(isp, LCx_DAT_PING_ADDR(0));
+	CCP2_PRINT_REGISTER(isp, LCx_DAT_PONG_ADDR(0));
+	CCP2_PRINT_REGISTER(isp, LCx_DAT_OFST(0));
+	CCP2_PRINT_REGISTER(isp, LCM_CTRL);
+	CCP2_PRINT_REGISTER(isp, LCM_VSIZE);
+	CCP2_PRINT_REGISTER(isp, LCM_HSIZE);
+	CCP2_PRINT_REGISTER(isp, LCM_PREFETCH);
+	CCP2_PRINT_REGISTER(isp, LCM_SRC_ADDR);
+	CCP2_PRINT_REGISTER(isp, LCM_SRC_OFST);
+	CCP2_PRINT_REGISTER(isp, LCM_DST_ADDR);
+	CCP2_PRINT_REGISTER(isp, LCM_DST_OFST);
+
+	dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+/*
+ * ccp2_reset - Reset the CCP2
+ * @ccp2: pointer to ISP CCP2 device
+ */
+static void ccp2_reset(struct isp_ccp2_device *ccp2)
+{
+	struct isp_device *isp = to_isp_device(ccp2);
+	int i = 0;
+
+	/* Reset the CSI1/CCP2B and wait for reset to complete */
+	isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSCONFIG,
+		    ISPCCP2_SYSCONFIG_SOFT_RESET);
+	while (!(isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSSTATUS) &
+		 ISPCCP2_SYSSTATUS_RESET_DONE)) {
+		udelay(10);
+		if (i++ > 10) {  /* try read 10 times */
+			dev_warn(isp->dev,
+				"omap3_isp: timeout waiting for ccp2 reset\n");
+			break;
+		}
+	}
+}
+
+/*
+ * ccp2_pwr_cfg - Configure the power mode settings
+ * @ccp2: pointer to ISP CCP2 device
+ */
+static void ccp2_pwr_cfg(struct isp_ccp2_device *ccp2)
+{
+	struct isp_device *isp = to_isp_device(ccp2);
+
+	isp_reg_writel(isp, ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SMART |
+			((isp->revision == ISP_REVISION_15_0 && isp->autoidle) ?
+			  ISPCCP2_SYSCONFIG_AUTO_IDLE : 0),
+		       OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSCONFIG);
+}
+
+/*
+ * ccp2_if_enable - Enable CCP2 interface.
+ * @ccp2: pointer to ISP CCP2 device
+ * @enable: enable/disable flag
+ */
+static void ccp2_if_enable(struct isp_ccp2_device *ccp2, u8 enable)
+{
+	struct isp_device *isp = to_isp_device(ccp2);
+	int i;
+
+	if (enable && ccp2->vdds_csib)
+		regulator_enable(ccp2->vdds_csib);
+
+	/* Enable/Disable all the LCx channels */
+	for (i = 0; i < CCP2_LCx_CHANS_NUM; i++)
+		isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(i),
+				ISPCCP2_LCx_CTRL_CHAN_EN,
+				enable ? ISPCCP2_LCx_CTRL_CHAN_EN : 0);
+
+	/* Enable/Disable ccp2 interface in ccp2 mode */
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL,
+			ISPCCP2_CTRL_MODE | ISPCCP2_CTRL_IF_EN,
+			enable ? (ISPCCP2_CTRL_MODE | ISPCCP2_CTRL_IF_EN) : 0);
+
+	if (!enable && ccp2->vdds_csib)
+		regulator_disable(ccp2->vdds_csib);
+}
+
+/*
+ * ccp2_mem_enable - Enable CCP2 memory interface.
+ * @ccp2: pointer to ISP CCP2 device
+ * @enable: enable/disable flag
+ */
+static void ccp2_mem_enable(struct isp_ccp2_device *ccp2, u8 enable)
+{
+	struct isp_device *isp = to_isp_device(ccp2);
+
+	if (enable)
+		ccp2_if_enable(ccp2, 0);
+
+	/* Enable/Disable ccp2 interface in ccp2 mode */
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL,
+			ISPCCP2_CTRL_MODE, enable ? ISPCCP2_CTRL_MODE : 0);
+
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_CTRL,
+			ISPCCP2_LCM_CTRL_CHAN_EN,
+			enable ? ISPCCP2_LCM_CTRL_CHAN_EN : 0);
+}
+
+/*
+ * ccp2_phyif_config - Initialize CCP2 phy interface config
+ * @ccp2: Pointer to ISP CCP2 device
+ * @config: CCP2 platform data
+ *
+ * Configure the CCP2 physical interface module from platform data.
+ *
+ * Returns -EIO if strobe is chosen in CSI1 mode, or 0 on success.
+ */
+static int ccp2_phyif_config(struct isp_ccp2_device *ccp2,
+			     const struct isp_ccp2_platform_data *pdata)
+{
+	struct isp_device *isp = to_isp_device(ccp2);
+	u32 val;
+
+	/* CCP2B mode */
+	val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL) |
+			    ISPCCP2_CTRL_IO_OUT_SEL | ISPCCP2_CTRL_MODE;
+	/* Data/strobe physical layer */
+	BIT_SET(val, ISPCCP2_CTRL_PHY_SEL_SHIFT, ISPCCP2_CTRL_PHY_SEL_MASK,
+		pdata->phy_layer);
+	BIT_SET(val, ISPCCP2_CTRL_INV_SHIFT, ISPCCP2_CTRL_INV_MASK,
+		pdata->strobe_clk_pol);
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
+
+	val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
+	if (!(val & ISPCCP2_CTRL_MODE)) {
+		if (pdata->ccp2_mode == ISP_CCP2_MODE_CCP2)
+			dev_warn(isp->dev, "OMAP3 CCP2 bus not available\n");
+		if (pdata->phy_layer == ISP_CCP2_PHY_DATA_STROBE)
+			/* Strobe mode requires CCP2 */
+			return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * ccp2_vp_config - Initialize CCP2 video port interface.
+ * @ccp2: Pointer to ISP CCP2 device
+ * @vpclk_div: Video port divisor
+ *
+ * Configure the CCP2 video port with the given clock divisor. The valid divisor
+ * values depend on the ISP revision:
+ *
+ * - revision 1.0 and 2.0	1 to 4
+ * - revision 15.0		1 to 65536
+ *
+ * The exact divisor value used might differ from the requested value, as ISP
+ * revision 15.0 represent the divisor by 65536 divided by an integer.
+ */
+static void ccp2_vp_config(struct isp_ccp2_device *ccp2,
+			   unsigned int vpclk_div)
+{
+	struct isp_device *isp = to_isp_device(ccp2);
+	u32 val;
+
+	/* ISPCCP2_CTRL Video port */
+	val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
+	val |= ISPCCP2_CTRL_VP_ONLY_EN;	/* Disable the memory write port */
+
+	if (isp->revision == ISP_REVISION_15_0) {
+		vpclk_div = clamp_t(unsigned int, vpclk_div, 1, 65536);
+		vpclk_div = min(ISPCCP2_VPCLK_FRACDIV / vpclk_div, 65535U);
+		BIT_SET(val, ISPCCP2_CTRL_VPCLK_DIV_SHIFT,
+			ISPCCP2_CTRL_VPCLK_DIV_MASK, vpclk_div);
+	} else {
+		vpclk_div = clamp_t(unsigned int, vpclk_div, 1, 4);
+		BIT_SET(val, ISPCCP2_CTRL_VP_OUT_CTRL_SHIFT,
+			ISPCCP2_CTRL_VP_OUT_CTRL_MASK, vpclk_div - 1);
+	}
+
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
+}
+
+/*
+ * ccp2_lcx_config - Initialize CCP2 logical channel interface.
+ * @ccp2: Pointer to ISP CCP2 device
+ * @config: Pointer to ISP LCx config structure.
+ *
+ * This will analyze the parameters passed by the interface config
+ * and configure CSI1/CCP2 logical channel
+ *
+ */
+static void ccp2_lcx_config(struct isp_ccp2_device *ccp2,
+			    struct isp_interface_lcx_config *config)
+{
+	struct isp_device *isp = to_isp_device(ccp2);
+	u32 val, format;
+
+	switch (config->format) {
+	case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+		format = ISPCCP2_LCx_CTRL_FORMAT_RAW8_DPCM10_VP;
+		break;
+	case V4L2_MBUS_FMT_SGRBG10_1X10:
+	default:
+		format = ISPCCP2_LCx_CTRL_FORMAT_RAW10_VP;	/* RAW10+VP */
+		break;
+	}
+	/* ISPCCP2_LCx_CTRL logical channel #0 */
+	val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(0))
+			    | (ISPCCP2_LCx_CTRL_REGION_EN); /* Region */
+
+	if (isp->revision == ISP_REVISION_15_0) {
+		/* CRC */
+		BIT_SET(val, ISPCCP2_LCx_CTRL_CRC_SHIFT_15_0,
+			ISPCCP2_LCx_CTRL_CRC_MASK,
+			config->crc);
+		/* Format = RAW10+VP or RAW8+DPCM10+VP*/
+		BIT_SET(val, ISPCCP2_LCx_CTRL_FORMAT_SHIFT_15_0,
+			ISPCCP2_LCx_CTRL_FORMAT_MASK_15_0, format);
+	} else {
+		BIT_SET(val, ISPCCP2_LCx_CTRL_CRC_SHIFT,
+			ISPCCP2_LCx_CTRL_CRC_MASK,
+			config->crc);
+
+		BIT_SET(val, ISPCCP2_LCx_CTRL_FORMAT_SHIFT,
+			ISPCCP2_LCx_CTRL_FORMAT_MASK, format);
+	}
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(0));
+
+	/* ISPCCP2_DAT_START for logical channel #0 */
+	isp_reg_writel(isp, config->data_start << ISPCCP2_LCx_DAT_SHIFT,
+		       OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_DAT_START(0));
+
+	/* ISPCCP2_DAT_SIZE for logical channel #0 */
+	isp_reg_writel(isp, config->data_size << ISPCCP2_LCx_DAT_SHIFT,
+		       OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_DAT_SIZE(0));
+
+	/* Enable error IRQs for logical channel #0 */
+	val = ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ |
+	      ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ |
+	      ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ |
+	      ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ |
+	      ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ |
+	      ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ;
+
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LC01_IRQSTATUS);
+	isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LC01_IRQENABLE, val);
+}
+
+/*
+ * ccp2_if_configure - Configure ccp2 with data from sensor
+ * @ccp2: Pointer to ISP CCP2 device
+ *
+ * Return 0 on success or a negative error code
+ */
+static int ccp2_if_configure(struct isp_ccp2_device *ccp2)
+{
+	const struct isp_v4l2_subdevs_group *pdata;
+	struct v4l2_mbus_framefmt *format;
+	struct media_pad *pad;
+	struct v4l2_subdev *sensor;
+	u32 lines = 0;
+	int ret;
+
+	ccp2_pwr_cfg(ccp2);
+
+	pad = media_entity_remote_source(&ccp2->pads[CCP2_PAD_SINK]);
+	sensor = media_entity_to_v4l2_subdev(pad->entity);
+	pdata = sensor->host_priv;
+
+	ret = ccp2_phyif_config(ccp2, &pdata->bus.ccp2);
+	if (ret < 0)
+		return ret;
+
+	ccp2_vp_config(ccp2, pdata->bus.ccp2.vpclk_div + 1);
+
+	v4l2_subdev_call(sensor, sensor, g_skip_top_lines, &lines);
+
+	format = &ccp2->formats[CCP2_PAD_SINK];
+
+	ccp2->if_cfg.data_start = lines;
+	ccp2->if_cfg.crc = pdata->bus.ccp2.crc;
+	ccp2->if_cfg.format = format->code;
+	ccp2->if_cfg.data_size = format->height;
+
+	ccp2_lcx_config(ccp2, &ccp2->if_cfg);
+
+	return 0;
+}
+
+static int ccp2_adjust_bandwidth(struct isp_ccp2_device *ccp2)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
+	struct isp_device *isp = to_isp_device(ccp2);
+	const struct v4l2_mbus_framefmt *ofmt = &ccp2->formats[CCP2_PAD_SOURCE];
+	unsigned long l3_ick = pipe->l3_ick;
+	struct v4l2_fract *timeperframe;
+	unsigned int vpclk_div = 2;
+	unsigned int value;
+	u64 bound;
+	u64 area;
+
+	/* Compute the minimum clock divisor, based on the pipeline maximum
+	 * data rate. This is an absolute lower bound if we don't want SBL
+	 * overflows, so round the value up.
+	 */
+	vpclk_div = max_t(unsigned int, DIV_ROUND_UP(l3_ick, pipe->max_rate),
+			  vpclk_div);
+
+	/* Compute the maximum clock divisor, based on the requested frame rate.
+	 * This is a soft lower bound to achieve a frame rate equal or higher
+	 * than the requested value, so round the value down.
+	 */
+	timeperframe = &pipe->max_timeperframe;
+
+	if (timeperframe->numerator) {
+		area = ofmt->width * ofmt->height;
+		bound = div_u64(area * timeperframe->denominator,
+				timeperframe->numerator);
+		value = min_t(u64, bound, l3_ick);
+		vpclk_div = max_t(unsigned int, l3_ick / value, vpclk_div);
+	}
+
+	dev_dbg(isp->dev, "%s: minimum clock divisor = %u\n", __func__,
+		vpclk_div);
+
+	return vpclk_div;
+}
+
+/*
+ * ccp2_mem_configure - Initialize CCP2 memory input/output interface
+ * @ccp2: Pointer to ISP CCP2 device
+ * @config: Pointer to ISP mem interface config structure
+ *
+ * This will analyze the parameters passed by the interface config
+ * structure, and configure the respective registers for proper
+ * CSI1/CCP2 memory input.
+ */
+static void ccp2_mem_configure(struct isp_ccp2_device *ccp2,
+			       struct isp_interface_mem_config *config)
+{
+	struct isp_device *isp = to_isp_device(ccp2);
+	u32 sink_pixcode = ccp2->formats[CCP2_PAD_SINK].code;
+	u32 source_pixcode = ccp2->formats[CCP2_PAD_SOURCE].code;
+	unsigned int dpcm_decompress = 0;
+	u32 val, hwords;
+
+	if (sink_pixcode != source_pixcode &&
+	    sink_pixcode == V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8)
+		dpcm_decompress = 1;
+
+	ccp2_pwr_cfg(ccp2);
+
+	/* Hsize, Skip */
+	isp_reg_writel(isp, ISPCCP2_LCM_HSIZE_SKIP_MIN |
+		       (config->hsize_count << ISPCCP2_LCM_HSIZE_SHIFT),
+		       OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_HSIZE);
+
+	/* Vsize, no. of lines */
+	isp_reg_writel(isp, config->vsize_count << ISPCCP2_LCM_VSIZE_SHIFT,
+		       OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_VSIZE);
+
+	if (ccp2->video_in.bpl_padding == 0)
+		config->src_ofst = 0;
+	else
+		config->src_ofst = ccp2->video_in.bpl_value;
+
+	isp_reg_writel(isp, config->src_ofst, OMAP3_ISP_IOMEM_CCP2,
+		       ISPCCP2_LCM_SRC_OFST);
+
+	/* Source and Destination formats */
+	val = ISPCCP2_LCM_CTRL_DST_FORMAT_RAW10 <<
+	      ISPCCP2_LCM_CTRL_DST_FORMAT_SHIFT;
+
+	if (dpcm_decompress) {
+		/* source format is RAW8 */
+		val |= ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW8 <<
+		       ISPCCP2_LCM_CTRL_SRC_FORMAT_SHIFT;
+
+		/* RAW8 + DPCM10 - simple predictor */
+		val |= ISPCCP2_LCM_CTRL_SRC_DPCM_PRED;
+
+		/* enable source DPCM decompression */
+		val |= ISPCCP2_LCM_CTRL_SRC_DECOMPR_DPCM10 <<
+		       ISPCCP2_LCM_CTRL_SRC_DECOMPR_SHIFT;
+	} else {
+		/* source format is RAW10 */
+		val |= ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW10 <<
+		       ISPCCP2_LCM_CTRL_SRC_FORMAT_SHIFT;
+	}
+
+	/* Burst size to 32x64 */
+	val |= ISPCCP2_LCM_CTRL_BURST_SIZE_32X <<
+	       ISPCCP2_LCM_CTRL_BURST_SIZE_SHIFT;
+
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_CTRL);
+
+	/* Prefetch setup */
+	if (dpcm_decompress)
+		hwords = (ISPCCP2_LCM_HSIZE_SKIP_MIN +
+			  config->hsize_count) >> 3;
+	else
+		hwords = (ISPCCP2_LCM_HSIZE_SKIP_MIN +
+			  config->hsize_count) >> 2;
+
+	isp_reg_writel(isp, hwords << ISPCCP2_LCM_PREFETCH_SHIFT,
+		       OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_PREFETCH);
+
+	/* Video port */
+	isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL,
+		    ISPCCP2_CTRL_IO_OUT_SEL | ISPCCP2_CTRL_MODE);
+	ccp2_vp_config(ccp2, ccp2_adjust_bandwidth(ccp2));
+
+	/* Clear LCM interrupts */
+	isp_reg_writel(isp, ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ |
+		       ISPCCP2_LCM_IRQSTATUS_EOF_IRQ,
+		       OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_IRQSTATUS);
+
+	/* Enable LCM interupts */
+	isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_IRQENABLE,
+		    ISPCCP2_LCM_IRQSTATUS_EOF_IRQ |
+		    ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ);
+}
+
+/*
+ * ccp2_set_inaddr - Sets memory address of input frame.
+ * @ccp2: Pointer to ISP CCP2 device
+ * @addr: 32bit memory address aligned on 32byte boundary.
+ *
+ * Configures the memory address from which the input frame is to be read.
+ */
+static void ccp2_set_inaddr(struct isp_ccp2_device *ccp2, u32 addr)
+{
+	struct isp_device *isp = to_isp_device(ccp2);
+
+	isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_SRC_ADDR);
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+static void ccp2_isr_buffer(struct isp_ccp2_device *ccp2)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
+	struct isp_buffer *buffer;
+
+	buffer = omap3isp_video_buffer_next(&ccp2->video_in);
+	if (buffer != NULL)
+		ccp2_set_inaddr(ccp2, buffer->isp_addr);
+
+	pipe->state |= ISP_PIPELINE_IDLE_INPUT;
+
+	if (ccp2->state == ISP_PIPELINE_STREAM_SINGLESHOT) {
+		if (isp_pipeline_ready(pipe))
+			omap3isp_pipeline_set_stream(pipe,
+						ISP_PIPELINE_STREAM_SINGLESHOT);
+	}
+}
+
+/*
+ * omap3isp_ccp2_isr - Handle ISP CCP2 interrupts
+ * @ccp2: Pointer to ISP CCP2 device
+ *
+ * This will handle the CCP2 interrupts
+ */
+void omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
+	struct isp_device *isp = to_isp_device(ccp2);
+	static const u32 ISPCCP2_LC01_ERROR =
+		ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ |
+		ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ |
+		ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ |
+		ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ |
+		ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ |
+		ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ;
+	u32 lcx_irqstatus, lcm_irqstatus;
+
+	/* First clear the interrupts */
+	lcx_irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2,
+				      ISPCCP2_LC01_IRQSTATUS);
+	isp_reg_writel(isp, lcx_irqstatus, OMAP3_ISP_IOMEM_CCP2,
+		       ISPCCP2_LC01_IRQSTATUS);
+
+	lcm_irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2,
+				      ISPCCP2_LCM_IRQSTATUS);
+	isp_reg_writel(isp, lcm_irqstatus, OMAP3_ISP_IOMEM_CCP2,
+		       ISPCCP2_LCM_IRQSTATUS);
+	/* Errors */
+	if (lcx_irqstatus & ISPCCP2_LC01_ERROR) {
+		pipe->error = true;
+		dev_dbg(isp->dev, "CCP2 err:%x\n", lcx_irqstatus);
+		return;
+	}
+
+	if (lcm_irqstatus & ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ) {
+		pipe->error = true;
+		dev_dbg(isp->dev, "CCP2 OCP err:%x\n", lcm_irqstatus);
+	}
+
+	if (omap3isp_module_sync_is_stopping(&ccp2->wait, &ccp2->stopping))
+		return;
+
+	/* Handle queued buffers on frame end interrupts */
+	if (lcm_irqstatus & ISPCCP2_LCM_IRQSTATUS_EOF_IRQ)
+		ccp2_isr_buffer(ccp2);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+static const unsigned int ccp2_fmts[] = {
+	V4L2_MBUS_FMT_SGRBG10_1X10,
+	V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+};
+
+/*
+ * __ccp2_get_format - helper function for getting ccp2 format
+ * @ccp2  : Pointer to ISP CCP2 device
+ * @fh    : V4L2 subdev file handle
+ * @pad   : pad number
+ * @which : wanted subdev format
+ * return format structure or NULL on error
+ */
+static struct v4l2_mbus_framefmt *
+__ccp2_get_format(struct isp_ccp2_device *ccp2, struct v4l2_subdev_fh *fh,
+		     unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_format(fh, pad);
+	else
+		return &ccp2->formats[pad];
+}
+
+/*
+ * ccp2_try_format - Handle try format by pad subdev method
+ * @ccp2  : Pointer to ISP CCP2 device
+ * @fh    : V4L2 subdev file handle
+ * @pad   : pad num
+ * @fmt   : pointer to v4l2 mbus format structure
+ * @which : wanted subdev format
+ */
+static void ccp2_try_format(struct isp_ccp2_device *ccp2,
+			       struct v4l2_subdev_fh *fh, unsigned int pad,
+			       struct v4l2_mbus_framefmt *fmt,
+			       enum v4l2_subdev_format_whence which)
+{
+	struct v4l2_mbus_framefmt *format;
+
+	switch (pad) {
+	case CCP2_PAD_SINK:
+		if (fmt->code != V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8)
+			fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+		if (ccp2->input == CCP2_INPUT_SENSOR) {
+			fmt->width = clamp_t(u32, fmt->width,
+					     ISPCCP2_DAT_START_MIN,
+					     ISPCCP2_DAT_START_MAX);
+			fmt->height = clamp_t(u32, fmt->height,
+					      ISPCCP2_DAT_SIZE_MIN,
+					      ISPCCP2_DAT_SIZE_MAX);
+		} else if (ccp2->input == CCP2_INPUT_MEMORY) {
+			fmt->width = clamp_t(u32, fmt->width,
+					     ISPCCP2_LCM_HSIZE_COUNT_MIN,
+					     ISPCCP2_LCM_HSIZE_COUNT_MAX);
+			fmt->height = clamp_t(u32, fmt->height,
+					      ISPCCP2_LCM_VSIZE_MIN,
+					      ISPCCP2_LCM_VSIZE_MAX);
+		}
+		break;
+
+	case CCP2_PAD_SOURCE:
+		/* Source format - copy sink format and change pixel code
+		 * to SGRBG10_1X10 as we don't support CCP2 write to memory.
+		 * When CCP2 write to memory feature will be added this
+		 * should be changed properly.
+		 */
+		format = __ccp2_get_format(ccp2, fh, CCP2_PAD_SINK, which);
+		memcpy(fmt, format, sizeof(*fmt));
+		fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+		break;
+	}
+
+	fmt->field = V4L2_FIELD_NONE;
+	fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+/*
+ * ccp2_enum_mbus_code - Handle pixel format enumeration
+ * @sd     : pointer to v4l2 subdev structure
+ * @fh     : V4L2 subdev file handle
+ * @code   : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ccp2_enum_mbus_code(struct v4l2_subdev *sd,
+				  struct v4l2_subdev_fh *fh,
+				  struct v4l2_subdev_mbus_code_enum *code)
+{
+	struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	if (code->pad == CCP2_PAD_SINK) {
+		if (code->index >= ARRAY_SIZE(ccp2_fmts))
+			return -EINVAL;
+
+		code->code = ccp2_fmts[code->index];
+	} else {
+		if (code->index != 0)
+			return -EINVAL;
+
+		format = __ccp2_get_format(ccp2, fh, CCP2_PAD_SINK,
+					      V4L2_SUBDEV_FORMAT_TRY);
+		code->code = format->code;
+	}
+
+	return 0;
+}
+
+static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
+				   struct v4l2_subdev_fh *fh,
+				   struct v4l2_subdev_frame_size_enum *fse)
+{
+	struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt format;
+
+	if (fse->index != 0)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = 1;
+	format.height = 1;
+	ccp2_try_format(ccp2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+	fse->min_width = format.width;
+	fse->min_height = format.height;
+
+	if (format.code != fse->code)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = -1;
+	format.height = -1;
+	ccp2_try_format(ccp2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+	fse->max_width = format.width;
+	fse->max_height = format.height;
+
+	return 0;
+}
+
+/*
+ * ccp2_get_format - Handle get format by pads subdev method
+ * @sd    : pointer to v4l2 subdev structure
+ * @fh    : V4L2 subdev file handle
+ * @fmt   : pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int ccp2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			      struct v4l2_subdev_format *fmt)
+{
+	struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __ccp2_get_format(ccp2, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	fmt->format = *format;
+	return 0;
+}
+
+/*
+ * ccp2_set_format - Handle set format by pads subdev method
+ * @sd    : pointer to v4l2 subdev structure
+ * @fh    : V4L2 subdev file handle
+ * @fmt   : pointer to v4l2 subdev format structure
+ * returns zero
+ */
+static int ccp2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			      struct v4l2_subdev_format *fmt)
+{
+	struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __ccp2_get_format(ccp2, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	ccp2_try_format(ccp2, fh, fmt->pad, &fmt->format, fmt->which);
+	*format = fmt->format;
+
+	/* Propagate the format from sink to source */
+	if (fmt->pad == CCP2_PAD_SINK) {
+		format = __ccp2_get_format(ccp2, fh, CCP2_PAD_SOURCE,
+					   fmt->which);
+		*format = fmt->format;
+		ccp2_try_format(ccp2, fh, CCP2_PAD_SOURCE, format, fmt->which);
+	}
+
+	return 0;
+}
+
+/*
+ * ccp2_init_formats - Initialize formats on all pads
+ * @sd: ISP CCP2 V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int ccp2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	struct v4l2_subdev_format format;
+
+	memset(&format, 0, sizeof(format));
+	format.pad = CCP2_PAD_SINK;
+	format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+	format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+	format.format.width = 4096;
+	format.format.height = 4096;
+	ccp2_set_format(sd, fh, &format);
+
+	return 0;
+}
+
+/*
+ * ccp2_s_stream - Enable/Disable streaming on ccp2 subdev
+ * @sd    : pointer to v4l2 subdev structure
+ * @enable: 1 == Enable, 0 == Disable
+ * return zero
+ */
+static int ccp2_s_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+	struct isp_device *isp = to_isp_device(ccp2);
+	struct device *dev = to_device(ccp2);
+	int ret;
+
+	if (ccp2->state == ISP_PIPELINE_STREAM_STOPPED) {
+		if (enable == ISP_PIPELINE_STREAM_STOPPED)
+			return 0;
+		atomic_set(&ccp2->stopping, 0);
+	}
+
+	switch (enable) {
+	case ISP_PIPELINE_STREAM_CONTINUOUS:
+		if (ccp2->phy) {
+			ret = omap3isp_csiphy_acquire(ccp2->phy);
+			if (ret < 0)
+				return ret;
+		}
+
+		ccp2_if_configure(ccp2);
+		ccp2_print_status(ccp2);
+
+		/* Enable CSI1/CCP2 interface */
+		ccp2_if_enable(ccp2, 1);
+		break;
+
+	case ISP_PIPELINE_STREAM_SINGLESHOT:
+		if (ccp2->state != ISP_PIPELINE_STREAM_SINGLESHOT) {
+			struct v4l2_mbus_framefmt *format;
+
+			format = &ccp2->formats[CCP2_PAD_SINK];
+
+			ccp2->mem_cfg.hsize_count = format->width;
+			ccp2->mem_cfg.vsize_count = format->height;
+			ccp2->mem_cfg.src_ofst = 0;
+
+			ccp2_mem_configure(ccp2, &ccp2->mem_cfg);
+			omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CSI1_READ);
+			ccp2_print_status(ccp2);
+		}
+		ccp2_mem_enable(ccp2, 1);
+		break;
+
+	case ISP_PIPELINE_STREAM_STOPPED:
+		if (omap3isp_module_sync_idle(&sd->entity, &ccp2->wait,
+					      &ccp2->stopping))
+			dev_dbg(dev, "%s: module stop timeout.\n", sd->name);
+		if (ccp2->input == CCP2_INPUT_MEMORY) {
+			ccp2_mem_enable(ccp2, 0);
+			omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_CSI1_READ);
+		} else if (ccp2->input == CCP2_INPUT_SENSOR) {
+			/* Disable CSI1/CCP2 interface */
+			ccp2_if_enable(ccp2, 0);
+			if (ccp2->phy)
+				omap3isp_csiphy_release(ccp2->phy);
+		}
+		break;
+	}
+
+	ccp2->state = enable;
+	return 0;
+}
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops ccp2_sd_video_ops = {
+	.s_stream = ccp2_s_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops ccp2_sd_pad_ops = {
+	.enum_mbus_code = ccp2_enum_mbus_code,
+	.enum_frame_size = ccp2_enum_frame_size,
+	.get_fmt = ccp2_get_format,
+	.set_fmt = ccp2_set_format,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops ccp2_sd_ops = {
+	.video = &ccp2_sd_video_ops,
+	.pad = &ccp2_sd_pad_ops,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops ccp2_sd_internal_ops = {
+	.open = ccp2_init_formats,
+};
+
+/* --------------------------------------------------------------------------
+ * ISP ccp2 video device node
+ */
+
+/*
+ * ccp2_video_queue - Queue video buffer.
+ * @video : Pointer to isp video structure
+ * @buffer: Pointer to isp_buffer structure
+ * return -EIO or zero on success
+ */
+static int ccp2_video_queue(struct isp_video *video, struct isp_buffer *buffer)
+{
+	struct isp_ccp2_device *ccp2 = &video->isp->isp_ccp2;
+
+	ccp2_set_inaddr(ccp2, buffer->isp_addr);
+	return 0;
+}
+
+static const struct isp_video_operations ccp2_video_ops = {
+	.queue = ccp2_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * ccp2_link_setup - Setup ccp2 connections.
+ * @entity : Pointer to media entity structure
+ * @local  : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags  : Link flags
+ * return -EINVAL on error or zero on success
+ */
+static int ccp2_link_setup(struct media_entity *entity,
+			   const struct media_pad *local,
+			   const struct media_pad *remote, u32 flags)
+{
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+	struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
+
+	switch (local->index | media_entity_type(remote->entity)) {
+	case CCP2_PAD_SINK | MEDIA_ENT_T_DEVNODE:
+		/* read from memory */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (ccp2->input == CCP2_INPUT_SENSOR)
+				return -EBUSY;
+			ccp2->input = CCP2_INPUT_MEMORY;
+		} else {
+			if (ccp2->input == CCP2_INPUT_MEMORY)
+				ccp2->input = CCP2_INPUT_NONE;
+		}
+		break;
+
+	case CCP2_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+		/* read from sensor/phy */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (ccp2->input == CCP2_INPUT_MEMORY)
+				return -EBUSY;
+			ccp2->input = CCP2_INPUT_SENSOR;
+		} else {
+			if (ccp2->input == CCP2_INPUT_SENSOR)
+				ccp2->input = CCP2_INPUT_NONE;
+		} break;
+
+	case CCP2_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+		/* write to video port/ccdc */
+		if (flags & MEDIA_LNK_FL_ENABLED)
+			ccp2->output = CCP2_OUTPUT_CCDC;
+		else
+			ccp2->output = CCP2_OUTPUT_NONE;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations ccp2_media_ops = {
+	.link_setup = ccp2_link_setup,
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * omap3isp_ccp2_unregister_entities - Unregister media entities: subdev
+ * @ccp2: Pointer to ISP CCP2 device
+ */
+void omap3isp_ccp2_unregister_entities(struct isp_ccp2_device *ccp2)
+{
+	v4l2_device_unregister_subdev(&ccp2->subdev);
+	omap3isp_video_unregister(&ccp2->video_in);
+}
+
+/*
+ * omap3isp_ccp2_register_entities - Register the subdev media entity
+ * @ccp2: Pointer to ISP CCP2 device
+ * @vdev: Pointer to v4l device
+ * return negative error code or zero on success
+ */
+
+int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
+				    struct v4l2_device *vdev)
+{
+	int ret;
+
+	/* Register the subdev and video nodes. */
+	ret = v4l2_device_register_subdev(vdev, &ccp2->subdev);
+	if (ret < 0)
+		goto error;
+
+	ret = omap3isp_video_register(&ccp2->video_in, vdev);
+	if (ret < 0)
+		goto error;
+
+	return 0;
+
+error:
+	omap3isp_ccp2_unregister_entities(ccp2);
+	return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP ccp2 initialisation and cleanup
+ */
+
+/*
+ * ccp2_init_entities - Initialize ccp2 subdev and media entity.
+ * @ccp2: Pointer to ISP CCP2 device
+ * return negative error code or zero on success
+ */
+static int ccp2_init_entities(struct isp_ccp2_device *ccp2)
+{
+	struct v4l2_subdev *sd = &ccp2->subdev;
+	struct media_pad *pads = ccp2->pads;
+	struct media_entity *me = &sd->entity;
+	int ret;
+
+	ccp2->input = CCP2_INPUT_NONE;
+	ccp2->output = CCP2_OUTPUT_NONE;
+
+	v4l2_subdev_init(sd, &ccp2_sd_ops);
+	sd->internal_ops = &ccp2_sd_internal_ops;
+	strlcpy(sd->name, "OMAP3 ISP CCP2", sizeof(sd->name));
+	sd->grp_id = 1 << 16;   /* group ID for isp subdevs */
+	v4l2_set_subdevdata(sd, ccp2);
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	pads[CCP2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	pads[CCP2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+	me->ops = &ccp2_media_ops;
+	ret = media_entity_init(me, CCP2_PADS_NUM, pads, 0);
+	if (ret < 0)
+		return ret;
+
+	ccp2_init_formats(sd, NULL);
+
+	/*
+	 * The CCP2 has weird line alignment requirements, possibly caused by
+	 * DPCM8 decompression. Line length for data read from memory must be a
+	 * multiple of 128 bits (16 bytes) in continuous mode (when no padding
+	 * is present at end of lines). Additionally, if padding is used, the
+	 * padded line length must be a multiple of 32 bytes. To simplify the
+	 * implementation we use a fixed 32 bytes alignment regardless of the
+	 * input format and width. If strict 128 bits alignment support is
+	 * required ispvideo will need to be made aware of this special dual
+	 * alignement requirements.
+	 */
+	ccp2->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	ccp2->video_in.bpl_alignment = 32;
+	ccp2->video_in.bpl_max = 0xffffffe0;
+	ccp2->video_in.isp = to_isp_device(ccp2);
+	ccp2->video_in.ops = &ccp2_video_ops;
+	ccp2->video_in.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+
+	ret = omap3isp_video_init(&ccp2->video_in, "CCP2");
+	if (ret < 0)
+		goto error_video;
+
+	/* Connect the video node to the ccp2 subdev. */
+	ret = media_entity_create_link(&ccp2->video_in.video.entity, 0,
+				       &ccp2->subdev.entity, CCP2_PAD_SINK, 0);
+	if (ret < 0)
+		goto error_link;
+
+	return 0;
+
+error_link:
+	omap3isp_video_cleanup(&ccp2->video_in);
+error_video:
+	media_entity_cleanup(&ccp2->subdev.entity);
+	return ret;
+}
+
+/*
+ * omap3isp_ccp2_init - CCP2 initialization.
+ * @isp : Pointer to ISP device
+ * return negative error code or zero on success
+ */
+int omap3isp_ccp2_init(struct isp_device *isp)
+{
+	struct isp_ccp2_device *ccp2 = &isp->isp_ccp2;
+	int ret;
+
+	init_waitqueue_head(&ccp2->wait);
+
+	/*
+	 * On the OMAP34xx the CSI1 receiver is operated in the CSIb IO
+	 * complex, which is powered by vdds_csib power rail. Hence the
+	 * request for the regulator.
+	 *
+	 * On the OMAP36xx, the CCP2 uses the CSI PHY1 or PHY2, shared with
+	 * the CSI2c or CSI2a receivers. The PHY then needs to be explicitly
+	 * configured.
+	 *
+	 * TODO: Don't hardcode the usage of PHY1 (shared with CSI2c).
+	 */
+	if (isp->revision == ISP_REVISION_2_0) {
+		ccp2->vdds_csib = regulator_get(isp->dev, "vdds_csib");
+		if (IS_ERR(ccp2->vdds_csib)) {
+			dev_dbg(isp->dev,
+				"Could not get regulator vdds_csib\n");
+			ccp2->vdds_csib = NULL;
+		}
+	} else if (isp->revision == ISP_REVISION_15_0) {
+		ccp2->phy = &isp->isp_csiphy1;
+	}
+
+	ret = ccp2_init_entities(ccp2);
+	if (ret < 0) {
+		regulator_put(ccp2->vdds_csib);
+		return ret;
+	}
+
+	ccp2_reset(ccp2);
+	return 0;
+}
+
+/*
+ * omap3isp_ccp2_cleanup - CCP2 un-initialization
+ * @isp : Pointer to ISP device
+ */
+void omap3isp_ccp2_cleanup(struct isp_device *isp)
+{
+	struct isp_ccp2_device *ccp2 = &isp->isp_ccp2;
+
+	omap3isp_video_cleanup(&ccp2->video_in);
+	media_entity_cleanup(&ccp2->subdev.entity);
+
+	regulator_put(ccp2->vdds_csib);
+}
diff --git a/drivers/media/platform/omap3isp/ispccp2.h b/drivers/media/platform/omap3isp/ispccp2.h
new file mode 100644
index 0000000..76d65f4
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispccp2.h
@@ -0,0 +1,98 @@
+/*
+ * ispccp2.h
+ *
+ * TI OMAP3 ISP - CCP2 module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_CCP2_H
+#define OMAP3_ISP_CCP2_H
+
+#include <linux/videodev2.h>
+
+struct isp_device;
+struct isp_csiphy;
+
+/* Sink and source ccp2 pads */
+#define CCP2_PAD_SINK			0
+#define CCP2_PAD_SOURCE			1
+#define CCP2_PADS_NUM			2
+
+/* CCP2 input media entity */
+enum ccp2_input_entity {
+	CCP2_INPUT_NONE,
+	CCP2_INPUT_SENSOR,
+	CCP2_INPUT_MEMORY,
+};
+
+/* CCP2 output media entity */
+enum ccp2_output_entity {
+	CCP2_OUTPUT_NONE,
+	CCP2_OUTPUT_CCDC,
+	CCP2_OUTPUT_MEMORY,
+};
+
+
+/* Logical channel configuration */
+struct isp_interface_lcx_config {
+	int crc;
+	u32 data_start;
+	u32 data_size;
+	u32 format;
+};
+
+/* Memory channel configuration */
+struct isp_interface_mem_config {
+	u32 dst_port;
+	u32 vsize_count;
+	u32 hsize_count;
+	u32 src_ofst;
+	u32 dst_ofst;
+};
+
+/* CCP2 device */
+struct isp_ccp2_device {
+	struct v4l2_subdev subdev;
+	struct v4l2_mbus_framefmt formats[CCP2_PADS_NUM];
+	struct media_pad pads[CCP2_PADS_NUM];
+
+	enum ccp2_input_entity input;
+	enum ccp2_output_entity output;
+	struct isp_interface_lcx_config if_cfg;
+	struct isp_interface_mem_config mem_cfg;
+	struct isp_video video_in;
+	struct isp_csiphy *phy;
+	struct regulator *vdds_csib;
+	enum isp_pipeline_stream_state state;
+	wait_queue_head_t wait;
+	atomic_t stopping;
+};
+
+/* Function declarations */
+int omap3isp_ccp2_init(struct isp_device *isp);
+void omap3isp_ccp2_cleanup(struct isp_device *isp);
+int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
+			struct v4l2_device *vdev);
+void omap3isp_ccp2_unregister_entities(struct isp_ccp2_device *ccp2);
+void omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2);
+
+#endif	/* OMAP3_ISP_CCP2_H */
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
new file mode 100644
index 0000000..6a3ff79
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -0,0 +1,1328 @@
+/*
+ * ispcsi2.c
+ *
+ * TI OMAP3 ISP - CSI2 module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/delay.h>
+#include <media/v4l2-common.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/mm.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispcsi2.h"
+
+/*
+ * csi2_if_enable - Enable CSI2 Receiver interface.
+ * @enable: enable flag
+ *
+ */
+static void csi2_if_enable(struct isp_device *isp,
+			   struct isp_csi2_device *csi2, u8 enable)
+{
+	struct isp_csi2_ctrl_cfg *currctrl = &csi2->ctrl;
+
+	isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_CTRL, ISPCSI2_CTRL_IF_EN,
+			enable ? ISPCSI2_CTRL_IF_EN : 0);
+
+	currctrl->if_enable = enable;
+}
+
+/*
+ * csi2_recv_config - CSI2 receiver module configuration.
+ * @currctrl: isp_csi2_ctrl_cfg structure
+ *
+ */
+static void csi2_recv_config(struct isp_device *isp,
+			     struct isp_csi2_device *csi2,
+			     struct isp_csi2_ctrl_cfg *currctrl)
+{
+	u32 reg;
+
+	reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTRL);
+
+	if (currctrl->frame_mode)
+		reg |= ISPCSI2_CTRL_FRAME;
+	else
+		reg &= ~ISPCSI2_CTRL_FRAME;
+
+	if (currctrl->vp_clk_enable)
+		reg |= ISPCSI2_CTRL_VP_CLK_EN;
+	else
+		reg &= ~ISPCSI2_CTRL_VP_CLK_EN;
+
+	if (currctrl->vp_only_enable)
+		reg |= ISPCSI2_CTRL_VP_ONLY_EN;
+	else
+		reg &= ~ISPCSI2_CTRL_VP_ONLY_EN;
+
+	reg &= ~ISPCSI2_CTRL_VP_OUT_CTRL_MASK;
+	reg |= currctrl->vp_out_ctrl << ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT;
+
+	if (currctrl->ecc_enable)
+		reg |= ISPCSI2_CTRL_ECC_EN;
+	else
+		reg &= ~ISPCSI2_CTRL_ECC_EN;
+
+	isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTRL);
+}
+
+static const unsigned int csi2_input_fmts[] = {
+	V4L2_MBUS_FMT_SGRBG10_1X10,
+	V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+	V4L2_MBUS_FMT_SRGGB10_1X10,
+	V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
+	V4L2_MBUS_FMT_SBGGR10_1X10,
+	V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
+	V4L2_MBUS_FMT_SGBRG10_1X10,
+	V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
+	V4L2_MBUS_FMT_YUYV8_2X8,
+};
+
+/* To set the format on the CSI2 requires a mapping function that takes
+ * the following inputs:
+ * - 3 different formats (at this time)
+ * - 2 destinations (mem, vp+mem) (vp only handled separately)
+ * - 2 decompression options (on, off)
+ * - 2 isp revisions (certain format must be handled differently on OMAP3630)
+ * Output should be CSI2 frame format code
+ * Array indices as follows: [format][dest][decompr][is_3630]
+ * Not all combinations are valid. 0 means invalid.
+ */
+static const u16 __csi2_fmt_map[3][2][2][2] = {
+	/* RAW10 formats */
+	{
+		/* Output to memory */
+		{
+			/* No DPCM decompression */
+			{ CSI2_PIX_FMT_RAW10_EXP16, CSI2_PIX_FMT_RAW10_EXP16 },
+			/* DPCM decompression */
+			{ 0, 0 },
+		},
+		/* Output to both */
+		{
+			/* No DPCM decompression */
+			{ CSI2_PIX_FMT_RAW10_EXP16_VP,
+			  CSI2_PIX_FMT_RAW10_EXP16_VP },
+			/* DPCM decompression */
+			{ 0, 0 },
+		},
+	},
+	/* RAW10 DPCM8 formats */
+	{
+		/* Output to memory */
+		{
+			/* No DPCM decompression */
+			{ CSI2_PIX_FMT_RAW8, CSI2_USERDEF_8BIT_DATA1 },
+			/* DPCM decompression */
+			{ CSI2_PIX_FMT_RAW8_DPCM10_EXP16,
+			  CSI2_USERDEF_8BIT_DATA1_DPCM10 },
+		},
+		/* Output to both */
+		{
+			/* No DPCM decompression */
+			{ CSI2_PIX_FMT_RAW8_VP,
+			  CSI2_PIX_FMT_RAW8_VP },
+			/* DPCM decompression */
+			{ CSI2_PIX_FMT_RAW8_DPCM10_VP,
+			  CSI2_USERDEF_8BIT_DATA1_DPCM10_VP },
+		},
+	},
+	/* YUYV8 2X8 formats */
+	{
+		/* Output to memory */
+		{
+			/* No DPCM decompression */
+			{ CSI2_PIX_FMT_YUV422_8BIT,
+			  CSI2_PIX_FMT_YUV422_8BIT },
+			/* DPCM decompression */
+			{ 0, 0 },
+		},
+		/* Output to both */
+		{
+			/* No DPCM decompression */
+			{ CSI2_PIX_FMT_YUV422_8BIT_VP,
+			  CSI2_PIX_FMT_YUV422_8BIT_VP },
+			/* DPCM decompression */
+			{ 0, 0 },
+		},
+	},
+};
+
+/*
+ * csi2_ctx_map_format - Map CSI2 sink media bus format to CSI2 format ID
+ * @csi2: ISP CSI2 device
+ *
+ * Returns CSI2 physical format id
+ */
+static u16 csi2_ctx_map_format(struct isp_csi2_device *csi2)
+{
+	const struct v4l2_mbus_framefmt *fmt = &csi2->formats[CSI2_PAD_SINK];
+	int fmtidx, destidx, is_3630;
+
+	switch (fmt->code) {
+	case V4L2_MBUS_FMT_SGRBG10_1X10:
+	case V4L2_MBUS_FMT_SRGGB10_1X10:
+	case V4L2_MBUS_FMT_SBGGR10_1X10:
+	case V4L2_MBUS_FMT_SGBRG10_1X10:
+		fmtidx = 0;
+		break;
+	case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+	case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8:
+	case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8:
+	case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8:
+		fmtidx = 1;
+		break;
+	case V4L2_MBUS_FMT_YUYV8_2X8:
+		fmtidx = 2;
+		break;
+	default:
+		WARN(1, KERN_ERR "CSI2: pixel format %08x unsupported!\n",
+		     fmt->code);
+		return 0;
+	}
+
+	if (!(csi2->output & CSI2_OUTPUT_CCDC) &&
+	    !(csi2->output & CSI2_OUTPUT_MEMORY)) {
+		/* Neither output enabled is a valid combination */
+		return CSI2_PIX_FMT_OTHERS;
+	}
+
+	/* If we need to skip frames at the beginning of the stream disable the
+	 * video port to avoid sending the skipped frames to the CCDC.
+	 */
+	destidx = csi2->frame_skip ? 0 : !!(csi2->output & CSI2_OUTPUT_CCDC);
+	is_3630 = csi2->isp->revision == ISP_REVISION_15_0;
+
+	return __csi2_fmt_map[fmtidx][destidx][csi2->dpcm_decompress][is_3630];
+}
+
+/*
+ * csi2_set_outaddr - Set memory address to save output image
+ * @csi2: Pointer to ISP CSI2a device.
+ * @addr: ISP MMU Mapped 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ *
+ * Returns 0 if successful, or -EINVAL if the address is not in the 32 byte
+ * boundary.
+ */
+static void csi2_set_outaddr(struct isp_csi2_device *csi2, u32 addr)
+{
+	struct isp_device *isp = csi2->isp;
+	struct isp_csi2_ctx_cfg *ctx = &csi2->contexts[0];
+
+	ctx->ping_addr = addr;
+	ctx->pong_addr = addr;
+	isp_reg_writel(isp, ctx->ping_addr,
+		       csi2->regs1, ISPCSI2_CTX_DAT_PING_ADDR(ctx->ctxnum));
+	isp_reg_writel(isp, ctx->pong_addr,
+		       csi2->regs1, ISPCSI2_CTX_DAT_PONG_ADDR(ctx->ctxnum));
+}
+
+/*
+ * is_usr_def_mapping - Checks whether USER_DEF_MAPPING should
+ *			be enabled by CSI2.
+ * @format_id: mapped format id
+ *
+ */
+static inline int is_usr_def_mapping(u32 format_id)
+{
+	return (format_id & 0x40) ? 1 : 0;
+}
+
+/*
+ * csi2_ctx_enable - Enable specified CSI2 context
+ * @ctxnum: Context number, valid between 0 and 7 values.
+ * @enable: enable
+ *
+ */
+static void csi2_ctx_enable(struct isp_device *isp,
+			    struct isp_csi2_device *csi2, u8 ctxnum, u8 enable)
+{
+	struct isp_csi2_ctx_cfg *ctx = &csi2->contexts[ctxnum];
+	unsigned int skip = 0;
+	u32 reg;
+
+	reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL1(ctxnum));
+
+	if (enable) {
+		if (csi2->frame_skip)
+			skip = csi2->frame_skip;
+		else if (csi2->output & CSI2_OUTPUT_MEMORY)
+			skip = 1;
+
+		reg &= ~ISPCSI2_CTX_CTRL1_COUNT_MASK;
+		reg |= ISPCSI2_CTX_CTRL1_COUNT_UNLOCK
+		    |  (skip << ISPCSI2_CTX_CTRL1_COUNT_SHIFT)
+		    |  ISPCSI2_CTX_CTRL1_CTX_EN;
+	} else {
+		reg &= ~ISPCSI2_CTX_CTRL1_CTX_EN;
+	}
+
+	isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL1(ctxnum));
+	ctx->enabled = enable;
+}
+
+/*
+ * csi2_ctx_config - CSI2 context configuration.
+ * @ctx: context configuration
+ *
+ */
+static void csi2_ctx_config(struct isp_device *isp,
+			    struct isp_csi2_device *csi2,
+			    struct isp_csi2_ctx_cfg *ctx)
+{
+	u32 reg;
+
+	/* Set up CSI2_CTx_CTRL1 */
+	reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL1(ctx->ctxnum));
+
+	if (ctx->eof_enabled)
+		reg |= ISPCSI2_CTX_CTRL1_EOF_EN;
+	else
+		reg &= ~ISPCSI2_CTX_CTRL1_EOF_EN;
+
+	if (ctx->eol_enabled)
+		reg |= ISPCSI2_CTX_CTRL1_EOL_EN;
+	else
+		reg &= ~ISPCSI2_CTX_CTRL1_EOL_EN;
+
+	if (ctx->checksum_enabled)
+		reg |= ISPCSI2_CTX_CTRL1_CS_EN;
+	else
+		reg &= ~ISPCSI2_CTX_CTRL1_CS_EN;
+
+	isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL1(ctx->ctxnum));
+
+	/* Set up CSI2_CTx_CTRL2 */
+	reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL2(ctx->ctxnum));
+
+	reg &= ~(ISPCSI2_CTX_CTRL2_VIRTUAL_ID_MASK);
+	reg |= ctx->virtual_id << ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT;
+
+	reg &= ~(ISPCSI2_CTX_CTRL2_FORMAT_MASK);
+	reg |= ctx->format_id << ISPCSI2_CTX_CTRL2_FORMAT_SHIFT;
+
+	if (ctx->dpcm_decompress) {
+		if (ctx->dpcm_predictor)
+			reg |= ISPCSI2_CTX_CTRL2_DPCM_PRED;
+		else
+			reg &= ~ISPCSI2_CTX_CTRL2_DPCM_PRED;
+	}
+
+	if (is_usr_def_mapping(ctx->format_id)) {
+		reg &= ~ISPCSI2_CTX_CTRL2_USER_DEF_MAP_MASK;
+		reg |= 2 << ISPCSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT;
+	}
+
+	isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL2(ctx->ctxnum));
+
+	/* Set up CSI2_CTx_CTRL3 */
+	reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL3(ctx->ctxnum));
+	reg &= ~(ISPCSI2_CTX_CTRL3_ALPHA_MASK);
+	reg |= (ctx->alpha << ISPCSI2_CTX_CTRL3_ALPHA_SHIFT);
+
+	isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL3(ctx->ctxnum));
+
+	/* Set up CSI2_CTx_DAT_OFST */
+	reg = isp_reg_readl(isp, csi2->regs1,
+			    ISPCSI2_CTX_DAT_OFST(ctx->ctxnum));
+	reg &= ~ISPCSI2_CTX_DAT_OFST_OFST_MASK;
+	reg |= ctx->data_offset << ISPCSI2_CTX_DAT_OFST_OFST_SHIFT;
+	isp_reg_writel(isp, reg, csi2->regs1,
+		       ISPCSI2_CTX_DAT_OFST(ctx->ctxnum));
+
+	isp_reg_writel(isp, ctx->ping_addr,
+		       csi2->regs1, ISPCSI2_CTX_DAT_PING_ADDR(ctx->ctxnum));
+
+	isp_reg_writel(isp, ctx->pong_addr,
+		       csi2->regs1, ISPCSI2_CTX_DAT_PONG_ADDR(ctx->ctxnum));
+}
+
+/*
+ * csi2_timing_config - CSI2 timing configuration.
+ * @timing: csi2_timing_cfg structure
+ */
+static void csi2_timing_config(struct isp_device *isp,
+			       struct isp_csi2_device *csi2,
+			       struct isp_csi2_timing_cfg *timing)
+{
+	u32 reg;
+
+	reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_TIMING);
+
+	if (timing->force_rx_mode)
+		reg |= ISPCSI2_TIMING_FORCE_RX_MODE_IO(timing->ionum);
+	else
+		reg &= ~ISPCSI2_TIMING_FORCE_RX_MODE_IO(timing->ionum);
+
+	if (timing->stop_state_16x)
+		reg |= ISPCSI2_TIMING_STOP_STATE_X16_IO(timing->ionum);
+	else
+		reg &= ~ISPCSI2_TIMING_STOP_STATE_X16_IO(timing->ionum);
+
+	if (timing->stop_state_4x)
+		reg |= ISPCSI2_TIMING_STOP_STATE_X4_IO(timing->ionum);
+	else
+		reg &= ~ISPCSI2_TIMING_STOP_STATE_X4_IO(timing->ionum);
+
+	reg &= ~ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_MASK(timing->ionum);
+	reg |= timing->stop_state_counter <<
+	       ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(timing->ionum);
+
+	isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_TIMING);
+}
+
+/*
+ * csi2_irq_ctx_set - Enables CSI2 Context IRQs.
+ * @enable: Enable/disable CSI2 Context interrupts
+ */
+static void csi2_irq_ctx_set(struct isp_device *isp,
+			     struct isp_csi2_device *csi2, int enable)
+{
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		isp_reg_writel(isp, ISPCSI2_CTX_IRQSTATUS_FE_IRQ, csi2->regs1,
+			       ISPCSI2_CTX_IRQSTATUS(i));
+		if (enable)
+			isp_reg_set(isp, csi2->regs1, ISPCSI2_CTX_IRQENABLE(i),
+				    ISPCSI2_CTX_IRQSTATUS_FE_IRQ);
+		else
+			isp_reg_clr(isp, csi2->regs1, ISPCSI2_CTX_IRQENABLE(i),
+				    ISPCSI2_CTX_IRQSTATUS_FE_IRQ);
+	}
+}
+
+/*
+ * csi2_irq_complexio1_set - Enables CSI2 ComplexIO IRQs.
+ * @enable: Enable/disable CSI2 ComplexIO #1 interrupts
+ */
+static void csi2_irq_complexio1_set(struct isp_device *isp,
+				    struct isp_csi2_device *csi2, int enable)
+{
+	u32 reg;
+	reg = ISPCSI2_PHY_IRQENABLE_STATEALLULPMEXIT |
+		ISPCSI2_PHY_IRQENABLE_STATEALLULPMENTER |
+		ISPCSI2_PHY_IRQENABLE_STATEULPM5 |
+		ISPCSI2_PHY_IRQENABLE_ERRCONTROL5 |
+		ISPCSI2_PHY_IRQENABLE_ERRESC5 |
+		ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS5 |
+		ISPCSI2_PHY_IRQENABLE_ERRSOTHS5 |
+		ISPCSI2_PHY_IRQENABLE_STATEULPM4 |
+		ISPCSI2_PHY_IRQENABLE_ERRCONTROL4 |
+		ISPCSI2_PHY_IRQENABLE_ERRESC4 |
+		ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS4 |
+		ISPCSI2_PHY_IRQENABLE_ERRSOTHS4 |
+		ISPCSI2_PHY_IRQENABLE_STATEULPM3 |
+		ISPCSI2_PHY_IRQENABLE_ERRCONTROL3 |
+		ISPCSI2_PHY_IRQENABLE_ERRESC3 |
+		ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS3 |
+		ISPCSI2_PHY_IRQENABLE_ERRSOTHS3 |
+		ISPCSI2_PHY_IRQENABLE_STATEULPM2 |
+		ISPCSI2_PHY_IRQENABLE_ERRCONTROL2 |
+		ISPCSI2_PHY_IRQENABLE_ERRESC2 |
+		ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS2 |
+		ISPCSI2_PHY_IRQENABLE_ERRSOTHS2 |
+		ISPCSI2_PHY_IRQENABLE_STATEULPM1 |
+		ISPCSI2_PHY_IRQENABLE_ERRCONTROL1 |
+		ISPCSI2_PHY_IRQENABLE_ERRESC1 |
+		ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS1 |
+		ISPCSI2_PHY_IRQENABLE_ERRSOTHS1;
+	isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_PHY_IRQSTATUS);
+	if (enable)
+		reg |= isp_reg_readl(isp, csi2->regs1, ISPCSI2_PHY_IRQENABLE);
+	else
+		reg = 0;
+	isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_PHY_IRQENABLE);
+}
+
+/*
+ * csi2_irq_status_set - Enables CSI2 Status IRQs.
+ * @enable: Enable/disable CSI2 Status interrupts
+ */
+static void csi2_irq_status_set(struct isp_device *isp,
+				struct isp_csi2_device *csi2, int enable)
+{
+	u32 reg;
+	reg = ISPCSI2_IRQSTATUS_OCP_ERR_IRQ |
+		ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ |
+		ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ |
+		ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ |
+		ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ |
+		ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ |
+		ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ |
+		ISPCSI2_IRQSTATUS_CONTEXT(0);
+	isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_IRQSTATUS);
+	if (enable)
+		reg |= isp_reg_readl(isp, csi2->regs1, ISPCSI2_IRQENABLE);
+	else
+		reg = 0;
+
+	isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_IRQENABLE);
+}
+
+/*
+ * omap3isp_csi2_reset - Resets the CSI2 module.
+ *
+ * Must be called with the phy lock held.
+ *
+ * Returns 0 if successful, or -EBUSY if power command didn't respond.
+ */
+int omap3isp_csi2_reset(struct isp_csi2_device *csi2)
+{
+	struct isp_device *isp = csi2->isp;
+	u8 soft_reset_retries = 0;
+	u32 reg;
+	int i;
+
+	if (!csi2->available)
+		return -ENODEV;
+
+	if (csi2->phy->phy_in_use)
+		return -EBUSY;
+
+	isp_reg_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG,
+		    ISPCSI2_SYSCONFIG_SOFT_RESET);
+
+	do {
+		reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_SYSSTATUS) &
+				    ISPCSI2_SYSSTATUS_RESET_DONE;
+		if (reg == ISPCSI2_SYSSTATUS_RESET_DONE)
+			break;
+		soft_reset_retries++;
+		if (soft_reset_retries < 5)
+			udelay(100);
+	} while (soft_reset_retries < 5);
+
+	if (soft_reset_retries == 5) {
+		printk(KERN_ERR "CSI2: Soft reset try count exceeded!\n");
+		return -EBUSY;
+	}
+
+	if (isp->revision == ISP_REVISION_15_0)
+		isp_reg_set(isp, csi2->regs1, ISPCSI2_PHY_CFG,
+			    ISPCSI2_PHY_CFG_RESET_CTRL);
+
+	i = 100;
+	do {
+		reg = isp_reg_readl(isp, csi2->phy->phy_regs, ISPCSIPHY_REG1)
+		    & ISPCSIPHY_REG1_RESET_DONE_CTRLCLK;
+		if (reg == ISPCSIPHY_REG1_RESET_DONE_CTRLCLK)
+			break;
+		udelay(100);
+	} while (--i > 0);
+
+	if (i == 0) {
+		printk(KERN_ERR
+		       "CSI2: Reset for CSI2_96M_FCLK domain Failed!\n");
+		return -EBUSY;
+	}
+
+	if (isp->autoidle)
+		isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG,
+				ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK |
+				ISPCSI2_SYSCONFIG_AUTO_IDLE,
+				ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SMART |
+				((isp->revision == ISP_REVISION_15_0) ?
+				 ISPCSI2_SYSCONFIG_AUTO_IDLE : 0));
+	else
+		isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG,
+				ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK |
+				ISPCSI2_SYSCONFIG_AUTO_IDLE,
+				ISPCSI2_SYSCONFIG_MSTANDBY_MODE_NO);
+
+	return 0;
+}
+
+static int csi2_configure(struct isp_csi2_device *csi2)
+{
+	const struct isp_v4l2_subdevs_group *pdata;
+	struct isp_device *isp = csi2->isp;
+	struct isp_csi2_timing_cfg *timing = &csi2->timing[0];
+	struct v4l2_subdev *sensor;
+	struct media_pad *pad;
+
+	/*
+	 * CSI2 fields that can be updated while the context has
+	 * been enabled or the interface has been enabled are not
+	 * updated dynamically currently. So we do not allow to
+	 * reconfigure if either has been enabled
+	 */
+	if (csi2->contexts[0].enabled || csi2->ctrl.if_enable)
+		return -EBUSY;
+
+	pad = media_entity_remote_source(&csi2->pads[CSI2_PAD_SINK]);
+	sensor = media_entity_to_v4l2_subdev(pad->entity);
+	pdata = sensor->host_priv;
+
+	csi2->frame_skip = 0;
+	v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip);
+
+	csi2->ctrl.vp_out_ctrl = pdata->bus.csi2.vpclk_div;
+	csi2->ctrl.frame_mode = ISP_CSI2_FRAME_IMMEDIATE;
+	csi2->ctrl.ecc_enable = pdata->bus.csi2.crc;
+
+	timing->ionum = 1;
+	timing->force_rx_mode = 1;
+	timing->stop_state_16x = 1;
+	timing->stop_state_4x = 1;
+	timing->stop_state_counter = 0x1FF;
+
+	/*
+	 * The CSI2 receiver can't do any format conversion except DPCM
+	 * decompression, so every set_format call configures both pads
+	 * and enables DPCM decompression as a special case:
+	 */
+	if (csi2->formats[CSI2_PAD_SINK].code !=
+	    csi2->formats[CSI2_PAD_SOURCE].code)
+		csi2->dpcm_decompress = true;
+	else
+		csi2->dpcm_decompress = false;
+
+	csi2->contexts[0].format_id = csi2_ctx_map_format(csi2);
+
+	if (csi2->video_out.bpl_padding == 0)
+		csi2->contexts[0].data_offset = 0;
+	else
+		csi2->contexts[0].data_offset = csi2->video_out.bpl_value;
+
+	/*
+	 * Enable end of frame and end of line signals generation for
+	 * context 0. These signals are generated from CSI2 receiver to
+	 * qualify the last pixel of a frame and the last pixel of a line.
+	 * Without enabling the signals CSI2 receiver writes data to memory
+	 * beyond buffer size and/or data line offset is not handled correctly.
+	 */
+	csi2->contexts[0].eof_enabled = 1;
+	csi2->contexts[0].eol_enabled = 1;
+
+	csi2_irq_complexio1_set(isp, csi2, 1);
+	csi2_irq_ctx_set(isp, csi2, 1);
+	csi2_irq_status_set(isp, csi2, 1);
+
+	/* Set configuration (timings, format and links) */
+	csi2_timing_config(isp, csi2, timing);
+	csi2_recv_config(isp, csi2, &csi2->ctrl);
+	csi2_ctx_config(isp, csi2, &csi2->contexts[0]);
+
+	return 0;
+}
+
+/*
+ * csi2_print_status - Prints CSI2 debug information.
+ */
+#define CSI2_PRINT_REGISTER(isp, regs, name)\
+	dev_dbg(isp->dev, "###CSI2 " #name "=0x%08x\n", \
+		isp_reg_readl(isp, regs, ISPCSI2_##name))
+
+static void csi2_print_status(struct isp_csi2_device *csi2)
+{
+	struct isp_device *isp = csi2->isp;
+
+	if (!csi2->available)
+		return;
+
+	dev_dbg(isp->dev, "-------------CSI2 Register dump-------------\n");
+
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, SYSCONFIG);
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, SYSSTATUS);
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, IRQENABLE);
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, IRQSTATUS);
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, CTRL);
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, DBG_H);
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, GNQ);
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_CFG);
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_IRQSTATUS);
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, SHORT_PACKET);
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_IRQENABLE);
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, DBG_P);
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, TIMING);
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL1(0));
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL2(0));
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_OFST(0));
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_PING_ADDR(0));
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_PONG_ADDR(0));
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_IRQENABLE(0));
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_IRQSTATUS(0));
+	CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL3(0));
+
+	dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+/*
+ * csi2_isr_buffer - Does buffer handling at end-of-frame
+ * when writing to memory.
+ */
+static void csi2_isr_buffer(struct isp_csi2_device *csi2)
+{
+	struct isp_device *isp = csi2->isp;
+	struct isp_buffer *buffer;
+
+	csi2_ctx_enable(isp, csi2, 0, 0);
+
+	buffer = omap3isp_video_buffer_next(&csi2->video_out);
+
+	/*
+	 * Let video queue operation restart engine if there is an underrun
+	 * condition.
+	 */
+	if (buffer == NULL)
+		return;
+
+	csi2_set_outaddr(csi2, buffer->isp_addr);
+	csi2_ctx_enable(isp, csi2, 0, 1);
+}
+
+static void csi2_isr_ctx(struct isp_csi2_device *csi2,
+			 struct isp_csi2_ctx_cfg *ctx)
+{
+	struct isp_device *isp = csi2->isp;
+	unsigned int n = ctx->ctxnum;
+	u32 status;
+
+	status = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n));
+	isp_reg_writel(isp, status, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n));
+
+	if (!(status & ISPCSI2_CTX_IRQSTATUS_FE_IRQ))
+		return;
+
+	/* Skip interrupts until we reach the frame skip count. The CSI2 will be
+	 * automatically disabled, as the frame skip count has been programmed
+	 * in the CSI2_CTx_CTRL1::COUNT field, so reenable it.
+	 *
+	 * It would have been nice to rely on the FRAME_NUMBER interrupt instead
+	 * but it turned out that the interrupt is only generated when the CSI2
+	 * writes to memory (the CSI2_CTx_CTRL1::COUNT field is decreased
+	 * correctly and reaches 0 when data is forwarded to the video port only
+	 * but no interrupt arrives). Maybe a CSI2 hardware bug.
+	 */
+	if (csi2->frame_skip) {
+		csi2->frame_skip--;
+		if (csi2->frame_skip == 0) {
+			ctx->format_id = csi2_ctx_map_format(csi2);
+			csi2_ctx_config(isp, csi2, ctx);
+			csi2_ctx_enable(isp, csi2, n, 1);
+		}
+		return;
+	}
+
+	if (csi2->output & CSI2_OUTPUT_MEMORY)
+		csi2_isr_buffer(csi2);
+}
+
+/*
+ * omap3isp_csi2_isr - CSI2 interrupt handling.
+ */
+void omap3isp_csi2_isr(struct isp_csi2_device *csi2)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity);
+	u32 csi2_irqstatus, cpxio1_irqstatus;
+	struct isp_device *isp = csi2->isp;
+
+	if (!csi2->available)
+		return;
+
+	csi2_irqstatus = isp_reg_readl(isp, csi2->regs1, ISPCSI2_IRQSTATUS);
+	isp_reg_writel(isp, csi2_irqstatus, csi2->regs1, ISPCSI2_IRQSTATUS);
+
+	/* Failure Cases */
+	if (csi2_irqstatus & ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ) {
+		cpxio1_irqstatus = isp_reg_readl(isp, csi2->regs1,
+						 ISPCSI2_PHY_IRQSTATUS);
+		isp_reg_writel(isp, cpxio1_irqstatus,
+			       csi2->regs1, ISPCSI2_PHY_IRQSTATUS);
+		dev_dbg(isp->dev, "CSI2: ComplexIO Error IRQ "
+			"%x\n", cpxio1_irqstatus);
+		pipe->error = true;
+	}
+
+	if (csi2_irqstatus & (ISPCSI2_IRQSTATUS_OCP_ERR_IRQ |
+			      ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ |
+			      ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ |
+			      ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ |
+			      ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ)) {
+		dev_dbg(isp->dev, "CSI2 Err:"
+			" OCP:%d,"
+			" Short_pack:%d,"
+			" ECC:%d,"
+			" CPXIO2:%d,"
+			" FIFO_OVF:%d,"
+			"\n",
+			(csi2_irqstatus &
+			 ISPCSI2_IRQSTATUS_OCP_ERR_IRQ) ? 1 : 0,
+			(csi2_irqstatus &
+			 ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ) ? 1 : 0,
+			(csi2_irqstatus &
+			 ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ) ? 1 : 0,
+			(csi2_irqstatus &
+			 ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ) ? 1 : 0,
+			(csi2_irqstatus &
+			 ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ) ? 1 : 0);
+		pipe->error = true;
+	}
+
+	if (omap3isp_module_sync_is_stopping(&csi2->wait, &csi2->stopping))
+		return;
+
+	/* Successful cases */
+	if (csi2_irqstatus & ISPCSI2_IRQSTATUS_CONTEXT(0))
+		csi2_isr_ctx(csi2, &csi2->contexts[0]);
+
+	if (csi2_irqstatus & ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ)
+		dev_dbg(isp->dev, "CSI2: ECC correction done\n");
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP video operations
+ */
+
+/*
+ * csi2_queue - Queues the first buffer when using memory output
+ * @video: The video node
+ * @buffer: buffer to queue
+ */
+static int csi2_queue(struct isp_video *video, struct isp_buffer *buffer)
+{
+	struct isp_device *isp = video->isp;
+	struct isp_csi2_device *csi2 = &isp->isp_csi2a;
+
+	csi2_set_outaddr(csi2, buffer->isp_addr);
+
+	/*
+	 * If streaming was enabled before there was a buffer queued
+	 * or underrun happened in the ISR, the hardware was not enabled
+	 * and DMA queue flag ISP_VIDEO_DMAQUEUE_UNDERRUN is still set.
+	 * Enable it now.
+	 */
+	if (csi2->video_out.dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) {
+		/* Enable / disable context 0 and IRQs */
+		csi2_if_enable(isp, csi2, 1);
+		csi2_ctx_enable(isp, csi2, 0, 1);
+		isp_video_dmaqueue_flags_clr(&csi2->video_out);
+	}
+
+	return 0;
+}
+
+static const struct isp_video_operations csi2_ispvideo_ops = {
+	.queue = csi2_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__csi2_get_format(struct isp_csi2_device *csi2, struct v4l2_subdev_fh *fh,
+		  unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_format(fh, pad);
+	else
+		return &csi2->formats[pad];
+}
+
+static void
+csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_fh *fh,
+		unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+		enum v4l2_subdev_format_whence which)
+{
+	enum v4l2_mbus_pixelcode pixelcode;
+	struct v4l2_mbus_framefmt *format;
+	const struct isp_format_info *info;
+	unsigned int i;
+
+	switch (pad) {
+	case CSI2_PAD_SINK:
+		/* Clamp the width and height to valid range (1-8191). */
+		for (i = 0; i < ARRAY_SIZE(csi2_input_fmts); i++) {
+			if (fmt->code == csi2_input_fmts[i])
+				break;
+		}
+
+		/* If not found, use SGRBG10 as default */
+		if (i >= ARRAY_SIZE(csi2_input_fmts))
+			fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+		fmt->width = clamp_t(u32, fmt->width, 1, 8191);
+		fmt->height = clamp_t(u32, fmt->height, 1, 8191);
+		break;
+
+	case CSI2_PAD_SOURCE:
+		/* Source format same as sink format, except for DPCM
+		 * compression.
+		 */
+		pixelcode = fmt->code;
+		format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK, which);
+		memcpy(fmt, format, sizeof(*fmt));
+
+		/*
+		 * Only Allow DPCM decompression, and check that the
+		 * pattern is preserved
+		 */
+		info = omap3isp_video_format_info(fmt->code);
+		if (info->uncompressed == pixelcode)
+			fmt->code = pixelcode;
+		break;
+	}
+
+	/* RGB, non-interlaced */
+	fmt->colorspace = V4L2_COLORSPACE_SRGB;
+	fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * csi2_enum_mbus_code - Handle pixel format enumeration
+ * @sd     : pointer to v4l2 subdev structure
+ * @fh     : V4L2 subdev file handle
+ * @code   : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
+			       struct v4l2_subdev_fh *fh,
+			       struct v4l2_subdev_mbus_code_enum *code)
+{
+	struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+	const struct isp_format_info *info;
+
+	if (code->pad == CSI2_PAD_SINK) {
+		if (code->index >= ARRAY_SIZE(csi2_input_fmts))
+			return -EINVAL;
+
+		code->code = csi2_input_fmts[code->index];
+	} else {
+		format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK,
+					   V4L2_SUBDEV_FORMAT_TRY);
+		switch (code->index) {
+		case 0:
+			/* Passthrough sink pad code */
+			code->code = format->code;
+			break;
+		case 1:
+			/* Uncompressed code */
+			info = omap3isp_video_format_info(format->code);
+			if (info->uncompressed == format->code)
+				return -EINVAL;
+
+			code->code = info->uncompressed;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int csi2_enum_frame_size(struct v4l2_subdev *sd,
+				struct v4l2_subdev_fh *fh,
+				struct v4l2_subdev_frame_size_enum *fse)
+{
+	struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt format;
+
+	if (fse->index != 0)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = 1;
+	format.height = 1;
+	csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+	fse->min_width = format.width;
+	fse->min_height = format.height;
+
+	if (format.code != fse->code)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = -1;
+	format.height = -1;
+	csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+	fse->max_width = format.width;
+	fse->max_height = format.height;
+
+	return 0;
+}
+
+/*
+ * csi2_get_format - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __csi2_get_format(csi2, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	fmt->format = *format;
+	return 0;
+}
+
+/*
+ * csi2_set_format - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __csi2_get_format(csi2, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	csi2_try_format(csi2, fh, fmt->pad, &fmt->format, fmt->which);
+	*format = fmt->format;
+
+	/* Propagate the format from sink to source */
+	if (fmt->pad == CSI2_PAD_SINK) {
+		format = __csi2_get_format(csi2, fh, CSI2_PAD_SOURCE,
+					   fmt->which);
+		*format = fmt->format;
+		csi2_try_format(csi2, fh, CSI2_PAD_SOURCE, format, fmt->which);
+	}
+
+	return 0;
+}
+
+/*
+ * csi2_init_formats - Initialize formats on all pads
+ * @sd: ISP CSI2 V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	struct v4l2_subdev_format format;
+
+	memset(&format, 0, sizeof(format));
+	format.pad = CSI2_PAD_SINK;
+	format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+	format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+	format.format.width = 4096;
+	format.format.height = 4096;
+	csi2_set_format(sd, fh, &format);
+
+	return 0;
+}
+
+/*
+ * csi2_set_stream - Enable/Disable streaming on the CSI2 module
+ * @sd: ISP CSI2 V4L2 subdevice
+ * @enable: ISP pipeline stream state
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+	struct isp_device *isp = csi2->isp;
+	struct isp_video *video_out = &csi2->video_out;
+
+	switch (enable) {
+	case ISP_PIPELINE_STREAM_CONTINUOUS:
+		if (omap3isp_csiphy_acquire(csi2->phy) < 0)
+			return -ENODEV;
+		if (csi2->output & CSI2_OUTPUT_MEMORY)
+			omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CSI2A_WRITE);
+		csi2_configure(csi2);
+		csi2_print_status(csi2);
+
+		/*
+		 * When outputting to memory with no buffer available, let the
+		 * buffer queue handler start the hardware. A DMA queue flag
+		 * ISP_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
+		 * a buffer available.
+		 */
+		if (csi2->output & CSI2_OUTPUT_MEMORY &&
+		    !(video_out->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED))
+			break;
+		/* Enable context 0 and IRQs */
+		atomic_set(&csi2->stopping, 0);
+		csi2_ctx_enable(isp, csi2, 0, 1);
+		csi2_if_enable(isp, csi2, 1);
+		isp_video_dmaqueue_flags_clr(video_out);
+		break;
+
+	case ISP_PIPELINE_STREAM_STOPPED:
+		if (csi2->state == ISP_PIPELINE_STREAM_STOPPED)
+			return 0;
+		if (omap3isp_module_sync_idle(&sd->entity, &csi2->wait,
+					      &csi2->stopping))
+			dev_dbg(isp->dev, "%s: module stop timeout.\n",
+				sd->name);
+		csi2_ctx_enable(isp, csi2, 0, 0);
+		csi2_if_enable(isp, csi2, 0);
+		csi2_irq_ctx_set(isp, csi2, 0);
+		omap3isp_csiphy_release(csi2->phy);
+		isp_video_dmaqueue_flags_clr(video_out);
+		omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_CSI2A_WRITE);
+		break;
+	}
+
+	csi2->state = enable;
+	return 0;
+}
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops csi2_video_ops = {
+	.s_stream = csi2_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
+	.enum_mbus_code = csi2_enum_mbus_code,
+	.enum_frame_size = csi2_enum_frame_size,
+	.get_fmt = csi2_get_format,
+	.set_fmt = csi2_set_format,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops csi2_ops = {
+	.video = &csi2_video_ops,
+	.pad = &csi2_pad_ops,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
+	.open = csi2_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * csi2_link_setup - Setup CSI2 connections.
+ * @entity : Pointer to media entity structure
+ * @local  : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags  : Link flags
+ * return -EINVAL or zero on success
+ */
+static int csi2_link_setup(struct media_entity *entity,
+			   const struct media_pad *local,
+			   const struct media_pad *remote, u32 flags)
+{
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+	struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+	struct isp_csi2_ctrl_cfg *ctrl = &csi2->ctrl;
+
+	/*
+	 * The ISP core doesn't support pipelines with multiple video outputs.
+	 * Revisit this when it will be implemented, and return -EBUSY for now.
+	 */
+
+	switch (local->index | media_entity_type(remote->entity)) {
+	case CSI2_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (csi2->output & ~CSI2_OUTPUT_MEMORY)
+				return -EBUSY;
+			csi2->output |= CSI2_OUTPUT_MEMORY;
+		} else {
+			csi2->output &= ~CSI2_OUTPUT_MEMORY;
+		}
+		break;
+
+	case CSI2_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (csi2->output & ~CSI2_OUTPUT_CCDC)
+				return -EBUSY;
+			csi2->output |= CSI2_OUTPUT_CCDC;
+		} else {
+			csi2->output &= ~CSI2_OUTPUT_CCDC;
+		}
+		break;
+
+	default:
+		/* Link from camera to CSI2 is fixed... */
+		return -EINVAL;
+	}
+
+	ctrl->vp_only_enable =
+		(csi2->output & CSI2_OUTPUT_MEMORY) ? false : true;
+	ctrl->vp_clk_enable = !!(csi2->output & CSI2_OUTPUT_CCDC);
+
+	return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations csi2_media_ops = {
+	.link_setup = csi2_link_setup,
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+void omap3isp_csi2_unregister_entities(struct isp_csi2_device *csi2)
+{
+	v4l2_device_unregister_subdev(&csi2->subdev);
+	omap3isp_video_unregister(&csi2->video_out);
+}
+
+int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
+				    struct v4l2_device *vdev)
+{
+	int ret;
+
+	/* Register the subdev and video nodes. */
+	ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
+	if (ret < 0)
+		goto error;
+
+	ret = omap3isp_video_register(&csi2->video_out, vdev);
+	if (ret < 0)
+		goto error;
+
+	return 0;
+
+error:
+	omap3isp_csi2_unregister_entities(csi2);
+	return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP CSI2 initialisation and cleanup
+ */
+
+/*
+ * csi2_init_entities - Initialize subdev and media entity.
+ * @csi2: Pointer to csi2 structure.
+ * return -ENOMEM or zero on success
+ */
+static int csi2_init_entities(struct isp_csi2_device *csi2)
+{
+	struct v4l2_subdev *sd = &csi2->subdev;
+	struct media_pad *pads = csi2->pads;
+	struct media_entity *me = &sd->entity;
+	int ret;
+
+	v4l2_subdev_init(sd, &csi2_ops);
+	sd->internal_ops = &csi2_internal_ops;
+	strlcpy(sd->name, "OMAP3 ISP CSI2a", sizeof(sd->name));
+
+	sd->grp_id = 1 << 16;	/* group ID for isp subdevs */
+	v4l2_set_subdevdata(sd, csi2);
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+	pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+
+	me->ops = &csi2_media_ops;
+	ret = media_entity_init(me, CSI2_PADS_NUM, pads, 0);
+	if (ret < 0)
+		return ret;
+
+	csi2_init_formats(sd, NULL);
+
+	/* Video device node */
+	csi2->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	csi2->video_out.ops = &csi2_ispvideo_ops;
+	csi2->video_out.bpl_alignment = 32;
+	csi2->video_out.bpl_zero_padding = 1;
+	csi2->video_out.bpl_max = 0x1ffe0;
+	csi2->video_out.isp = csi2->isp;
+	csi2->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+
+	ret = omap3isp_video_init(&csi2->video_out, "CSI2a");
+	if (ret < 0)
+		goto error_video;
+
+	/* Connect the CSI2 subdev to the video node. */
+	ret = media_entity_create_link(&csi2->subdev.entity, CSI2_PAD_SOURCE,
+				       &csi2->video_out.video.entity, 0, 0);
+	if (ret < 0)
+		goto error_link;
+
+	return 0;
+
+error_link:
+	omap3isp_video_cleanup(&csi2->video_out);
+error_video:
+	media_entity_cleanup(&csi2->subdev.entity);
+	return ret;
+}
+
+/*
+ * omap3isp_csi2_init - Routine for module driver init
+ */
+int omap3isp_csi2_init(struct isp_device *isp)
+{
+	struct isp_csi2_device *csi2a = &isp->isp_csi2a;
+	struct isp_csi2_device *csi2c = &isp->isp_csi2c;
+	int ret;
+
+	csi2a->isp = isp;
+	csi2a->available = 1;
+	csi2a->regs1 = OMAP3_ISP_IOMEM_CSI2A_REGS1;
+	csi2a->regs2 = OMAP3_ISP_IOMEM_CSI2A_REGS2;
+	csi2a->phy = &isp->isp_csiphy2;
+	csi2a->state = ISP_PIPELINE_STREAM_STOPPED;
+	init_waitqueue_head(&csi2a->wait);
+
+	ret = csi2_init_entities(csi2a);
+	if (ret < 0)
+		return ret;
+
+	if (isp->revision == ISP_REVISION_15_0) {
+		csi2c->isp = isp;
+		csi2c->available = 1;
+		csi2c->regs1 = OMAP3_ISP_IOMEM_CSI2C_REGS1;
+		csi2c->regs2 = OMAP3_ISP_IOMEM_CSI2C_REGS2;
+		csi2c->phy = &isp->isp_csiphy1;
+		csi2c->state = ISP_PIPELINE_STREAM_STOPPED;
+		init_waitqueue_head(&csi2c->wait);
+	}
+
+	return 0;
+}
+
+/*
+ * omap3isp_csi2_cleanup - Routine for module driver cleanup
+ */
+void omap3isp_csi2_cleanup(struct isp_device *isp)
+{
+	struct isp_csi2_device *csi2a = &isp->isp_csi2a;
+
+	omap3isp_video_cleanup(&csi2a->video_out);
+	media_entity_cleanup(&csi2a->subdev.entity);
+}
diff --git a/drivers/media/platform/omap3isp/ispcsi2.h b/drivers/media/platform/omap3isp/ispcsi2.h
new file mode 100644
index 0000000..c57729b
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispcsi2.h
@@ -0,0 +1,165 @@
+/*
+ * ispcsi2.h
+ *
+ * TI OMAP3 ISP - CSI2 module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_CSI2_H
+#define OMAP3_ISP_CSI2_H
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+struct isp_csiphy;
+
+/* This is not an exhaustive list */
+enum isp_csi2_pix_formats {
+	CSI2_PIX_FMT_OTHERS = 0,
+	CSI2_PIX_FMT_YUV422_8BIT = 0x1e,
+	CSI2_PIX_FMT_YUV422_8BIT_VP = 0x9e,
+	CSI2_PIX_FMT_RAW10_EXP16 = 0xab,
+	CSI2_PIX_FMT_RAW10_EXP16_VP = 0x12f,
+	CSI2_PIX_FMT_RAW8 = 0x2a,
+	CSI2_PIX_FMT_RAW8_DPCM10_EXP16 = 0x2aa,
+	CSI2_PIX_FMT_RAW8_DPCM10_VP = 0x32a,
+	CSI2_PIX_FMT_RAW8_VP = 0x12a,
+	CSI2_USERDEF_8BIT_DATA1_DPCM10_VP = 0x340,
+	CSI2_USERDEF_8BIT_DATA1_DPCM10 = 0x2c0,
+	CSI2_USERDEF_8BIT_DATA1 = 0x40,
+};
+
+enum isp_csi2_irqevents {
+	OCP_ERR_IRQ = 0x4000,
+	SHORT_PACKET_IRQ = 0x2000,
+	ECC_CORRECTION_IRQ = 0x1000,
+	ECC_NO_CORRECTION_IRQ = 0x800,
+	COMPLEXIO2_ERR_IRQ = 0x400,
+	COMPLEXIO1_ERR_IRQ = 0x200,
+	FIFO_OVF_IRQ = 0x100,
+	CONTEXT7 = 0x80,
+	CONTEXT6 = 0x40,
+	CONTEXT5 = 0x20,
+	CONTEXT4 = 0x10,
+	CONTEXT3 = 0x8,
+	CONTEXT2 = 0x4,
+	CONTEXT1 = 0x2,
+	CONTEXT0 = 0x1,
+};
+
+enum isp_csi2_ctx_irqevents {
+	CTX_ECC_CORRECTION = 0x100,
+	CTX_LINE_NUMBER = 0x80,
+	CTX_FRAME_NUMBER = 0x40,
+	CTX_CS = 0x20,
+	CTX_LE = 0x8,
+	CTX_LS = 0x4,
+	CTX_FE = 0x2,
+	CTX_FS = 0x1,
+};
+
+enum isp_csi2_frame_mode {
+	ISP_CSI2_FRAME_IMMEDIATE,
+	ISP_CSI2_FRAME_AFTERFEC,
+};
+
+#define ISP_CSI2_MAX_CTX_NUM	7
+
+struct isp_csi2_ctx_cfg {
+	u8 ctxnum;		/* context number 0 - 7 */
+	u8 dpcm_decompress;
+
+	/* Fields in CSI2_CTx_CTRL2 - locked by CSI2_CTx_CTRL1.CTX_EN */
+	u8 virtual_id;
+	u16 format_id;		/* as in CSI2_CTx_CTRL2[9:0] */
+	u8 dpcm_predictor;	/* 1: simple, 0: advanced */
+
+	/* Fields in CSI2_CTx_CTRL1/3 - Shadowed */
+	u16 alpha;
+	u16 data_offset;
+	u32 ping_addr;
+	u32 pong_addr;
+	u8 eof_enabled;
+	u8 eol_enabled;
+	u8 checksum_enabled;
+	u8 enabled;
+};
+
+struct isp_csi2_timing_cfg {
+	u8 ionum;			/* IO1 or IO2 as in CSI2_TIMING */
+	unsigned force_rx_mode:1;
+	unsigned stop_state_16x:1;
+	unsigned stop_state_4x:1;
+	u16 stop_state_counter;
+};
+
+struct isp_csi2_ctrl_cfg {
+	bool vp_clk_enable;
+	bool vp_only_enable;
+	u8 vp_out_ctrl;
+	enum isp_csi2_frame_mode frame_mode;
+	bool ecc_enable;
+	bool if_enable;
+};
+
+#define CSI2_PAD_SINK		0
+#define CSI2_PAD_SOURCE		1
+#define CSI2_PADS_NUM		2
+
+#define CSI2_OUTPUT_CCDC	(1 << 0)
+#define CSI2_OUTPUT_MEMORY	(1 << 1)
+
+struct isp_csi2_device {
+	struct v4l2_subdev subdev;
+	struct media_pad pads[CSI2_PADS_NUM];
+	struct v4l2_mbus_framefmt formats[CSI2_PADS_NUM];
+
+	struct isp_video video_out;
+	struct isp_device *isp;
+
+	u8 available;		/* Is the IP present on the silicon? */
+
+	/* mem resources - enums as defined in enum isp_mem_resources */
+	u8 regs1;
+	u8 regs2;
+
+	u32 output; /* output to CCDC, memory or both? */
+	bool dpcm_decompress;
+	unsigned int frame_skip;
+
+	struct isp_csiphy *phy;
+	struct isp_csi2_ctx_cfg contexts[ISP_CSI2_MAX_CTX_NUM + 1];
+	struct isp_csi2_timing_cfg timing[2];
+	struct isp_csi2_ctrl_cfg ctrl;
+	enum isp_pipeline_stream_state state;
+	wait_queue_head_t wait;
+	atomic_t stopping;
+};
+
+void omap3isp_csi2_isr(struct isp_csi2_device *csi2);
+int omap3isp_csi2_reset(struct isp_csi2_device *csi2);
+int omap3isp_csi2_init(struct isp_device *isp);
+void omap3isp_csi2_cleanup(struct isp_device *isp);
+void omap3isp_csi2_unregister_entities(struct isp_csi2_device *csi2);
+int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
+				    struct v4l2_device *vdev);
+#endif	/* OMAP3_ISP_CSI2_H */
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c
new file mode 100644
index 0000000..348f67e
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispcsiphy.c
@@ -0,0 +1,249 @@
+/*
+ * ispcsiphy.c
+ *
+ * TI OMAP3 ISP - CSI PHY module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/regulator/consumer.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispcsiphy.h"
+
+/*
+ * csiphy_lanes_config - Configuration of CSIPHY lanes.
+ *
+ * Updates HW configuration.
+ * Called with phy->mutex taken.
+ */
+static void csiphy_lanes_config(struct isp_csiphy *phy)
+{
+	unsigned int i;
+	u32 reg;
+
+	reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG);
+
+	for (i = 0; i < phy->num_data_lanes; i++) {
+		reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) |
+			 ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1));
+		reg |= (phy->lanes.data[i].pol <<
+			ISPCSI2_PHY_CFG_DATA_POL_SHIFT(i + 1));
+		reg |= (phy->lanes.data[i].pos <<
+			ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(i + 1));
+	}
+
+	reg &= ~(ISPCSI2_PHY_CFG_CLOCK_POL_MASK |
+		 ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK);
+	reg |= phy->lanes.clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT;
+	reg |= phy->lanes.clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT;
+
+	isp_reg_writel(phy->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG);
+}
+
+/*
+ * csiphy_power_autoswitch_enable
+ * @enable: Sets or clears the autoswitch function enable flag.
+ */
+static void csiphy_power_autoswitch_enable(struct isp_csiphy *phy, bool enable)
+{
+	isp_reg_clr_set(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG,
+			ISPCSI2_PHY_CFG_PWR_AUTO,
+			enable ? ISPCSI2_PHY_CFG_PWR_AUTO : 0);
+}
+
+/*
+ * csiphy_set_power
+ * @power: Power state to be set.
+ *
+ * Returns 0 if successful, or -EBUSY if the retry count is exceeded.
+ */
+static int csiphy_set_power(struct isp_csiphy *phy, u32 power)
+{
+	u32 reg;
+	u8 retry_count;
+
+	isp_reg_clr_set(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG,
+			ISPCSI2_PHY_CFG_PWR_CMD_MASK, power);
+
+	retry_count = 0;
+	do {
+		udelay(50);
+		reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG) &
+				    ISPCSI2_PHY_CFG_PWR_STATUS_MASK;
+
+		if (reg != power >> 2)
+			retry_count++;
+
+	} while ((reg != power >> 2) && (retry_count < 100));
+
+	if (retry_count == 100) {
+		printk(KERN_ERR "CSI2 CIO set power failed!\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+/*
+ * csiphy_dphy_config - Configure CSI2 D-PHY parameters.
+ *
+ * Called with phy->mutex taken.
+ */
+static void csiphy_dphy_config(struct isp_csiphy *phy)
+{
+	u32 reg;
+
+	/* Set up ISPCSIPHY_REG0 */
+	reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG0);
+
+	reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK |
+		 ISPCSIPHY_REG0_THS_SETTLE_MASK);
+	reg |= phy->dphy.ths_term << ISPCSIPHY_REG0_THS_TERM_SHIFT;
+	reg |= phy->dphy.ths_settle << ISPCSIPHY_REG0_THS_SETTLE_SHIFT;
+
+	isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG0);
+
+	/* Set up ISPCSIPHY_REG1 */
+	reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG1);
+
+	reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK |
+		 ISPCSIPHY_REG1_TCLK_MISS_MASK |
+		 ISPCSIPHY_REG1_TCLK_SETTLE_MASK);
+	reg |= phy->dphy.tclk_term << ISPCSIPHY_REG1_TCLK_TERM_SHIFT;
+	reg |= phy->dphy.tclk_miss << ISPCSIPHY_REG1_TCLK_MISS_SHIFT;
+	reg |= phy->dphy.tclk_settle << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT;
+
+	isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG1);
+}
+
+static int csiphy_config(struct isp_csiphy *phy,
+			 struct isp_csiphy_dphy_cfg *dphy,
+			 struct isp_csiphy_lanes_cfg *lanes)
+{
+	unsigned int used_lanes = 0;
+	unsigned int i;
+
+	/* Clock and data lanes verification */
+	for (i = 0; i < phy->num_data_lanes; i++) {
+		if (lanes->data[i].pol > 1 || lanes->data[i].pos > 3)
+			return -EINVAL;
+
+		if (used_lanes & (1 << lanes->data[i].pos))
+			return -EINVAL;
+
+		used_lanes |= 1 << lanes->data[i].pos;
+	}
+
+	if (lanes->clk.pol > 1 || lanes->clk.pos > 3)
+		return -EINVAL;
+
+	if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos))
+		return -EINVAL;
+
+	mutex_lock(&phy->mutex);
+	phy->dphy = *dphy;
+	phy->lanes = *lanes;
+	mutex_unlock(&phy->mutex);
+
+	return 0;
+}
+
+int omap3isp_csiphy_acquire(struct isp_csiphy *phy)
+{
+	int rval;
+
+	if (phy->vdd == NULL) {
+		dev_err(phy->isp->dev, "Power regulator for CSI PHY not "
+			"available\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&phy->mutex);
+
+	rval = regulator_enable(phy->vdd);
+	if (rval < 0)
+		goto done;
+
+	rval = omap3isp_csi2_reset(phy->csi2);
+	if (rval < 0)
+		goto done;
+
+	csiphy_dphy_config(phy);
+	csiphy_lanes_config(phy);
+
+	rval = csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_ON);
+	if (rval) {
+		regulator_disable(phy->vdd);
+		goto done;
+	}
+
+	csiphy_power_autoswitch_enable(phy, true);
+	phy->phy_in_use = 1;
+
+done:
+	mutex_unlock(&phy->mutex);
+	return rval;
+}
+
+void omap3isp_csiphy_release(struct isp_csiphy *phy)
+{
+	mutex_lock(&phy->mutex);
+	if (phy->phy_in_use) {
+		csiphy_power_autoswitch_enable(phy, false);
+		csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF);
+		regulator_disable(phy->vdd);
+		phy->phy_in_use = 0;
+	}
+	mutex_unlock(&phy->mutex);
+}
+
+/*
+ * omap3isp_csiphy_init - Initialize the CSI PHY frontends
+ */
+int omap3isp_csiphy_init(struct isp_device *isp)
+{
+	struct isp_csiphy *phy1 = &isp->isp_csiphy1;
+	struct isp_csiphy *phy2 = &isp->isp_csiphy2;
+
+	isp->platform_cb.csiphy_config = csiphy_config;
+
+	phy2->isp = isp;
+	phy2->csi2 = &isp->isp_csi2a;
+	phy2->num_data_lanes = ISP_CSIPHY2_NUM_DATA_LANES;
+	phy2->cfg_regs = OMAP3_ISP_IOMEM_CSI2A_REGS1;
+	phy2->phy_regs = OMAP3_ISP_IOMEM_CSIPHY2;
+	mutex_init(&phy2->mutex);
+
+	if (isp->revision == ISP_REVISION_15_0) {
+		phy1->isp = isp;
+		phy1->csi2 = &isp->isp_csi2c;
+		phy1->num_data_lanes = ISP_CSIPHY1_NUM_DATA_LANES;
+		phy1->cfg_regs = OMAP3_ISP_IOMEM_CSI2C_REGS1;
+		phy1->phy_regs = OMAP3_ISP_IOMEM_CSIPHY1;
+		mutex_init(&phy1->mutex);
+	}
+
+	return 0;
+}
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.h b/drivers/media/platform/omap3isp/ispcsiphy.h
new file mode 100644
index 0000000..e93a661
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispcsiphy.h
@@ -0,0 +1,63 @@
+/*
+ * ispcsiphy.h
+ *
+ * TI OMAP3 ISP - CSI PHY module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_CSI_PHY_H
+#define OMAP3_ISP_CSI_PHY_H
+
+#include <media/omap3isp.h>
+
+struct isp_csi2_device;
+struct regulator;
+
+struct isp_csiphy_dphy_cfg {
+	u8 ths_term;
+	u8 ths_settle;
+	u8 tclk_term;
+	unsigned tclk_miss:1;
+	u8 tclk_settle;
+};
+
+struct isp_csiphy {
+	struct isp_device *isp;
+	struct mutex mutex;	/* serialize csiphy configuration */
+	u8 phy_in_use;
+	struct isp_csi2_device *csi2;
+	struct regulator *vdd;
+
+	/* mem resources - enums as defined in enum isp_mem_resources */
+	unsigned int cfg_regs;
+	unsigned int phy_regs;
+
+	u8 num_data_lanes;	/* number of CSI2 Data Lanes supported */
+	struct isp_csiphy_lanes_cfg lanes;
+	struct isp_csiphy_dphy_cfg dphy;
+};
+
+int omap3isp_csiphy_acquire(struct isp_csiphy *phy);
+void omap3isp_csiphy_release(struct isp_csiphy *phy);
+int omap3isp_csiphy_init(struct isp_device *isp);
+
+#endif	/* OMAP3_ISP_CSI_PHY_H */
diff --git a/drivers/media/platform/omap3isp/isph3a.h b/drivers/media/platform/omap3isp/isph3a.h
new file mode 100644
index 0000000..fb09fd4
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isph3a.h
@@ -0,0 +1,117 @@
+/*
+ * isph3a.h
+ *
+ * TI OMAP3 ISP - H3A AF module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ *	     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_H3A_H
+#define OMAP3_ISP_H3A_H
+
+#include <linux/omap3isp.h>
+
+/*
+ * ----------
+ * -H3A AEWB-
+ * ----------
+ */
+
+#define AEWB_PACKET_SIZE	16
+#define AEWB_SATURATION_LIMIT	0x3ff
+
+/* Flags for changed registers */
+#define PCR_CHNG		(1 << 0)
+#define AEWWIN1_CHNG		(1 << 1)
+#define AEWINSTART_CHNG		(1 << 2)
+#define AEWINBLK_CHNG		(1 << 3)
+#define AEWSUBWIN_CHNG		(1 << 4)
+#define PRV_WBDGAIN_CHNG	(1 << 5)
+#define PRV_WBGAIN_CHNG		(1 << 6)
+
+/* ISPH3A REGISTERS bits */
+#define ISPH3A_PCR_AF_EN	(1 << 0)
+#define ISPH3A_PCR_AF_ALAW_EN	(1 << 1)
+#define ISPH3A_PCR_AF_MED_EN	(1 << 2)
+#define ISPH3A_PCR_AF_BUSY	(1 << 15)
+#define ISPH3A_PCR_AEW_EN	(1 << 16)
+#define ISPH3A_PCR_AEW_ALAW_EN	(1 << 17)
+#define ISPH3A_PCR_AEW_BUSY	(1 << 18)
+#define ISPH3A_PCR_AEW_MASK	(ISPH3A_PCR_AEW_ALAW_EN | \
+				 ISPH3A_PCR_AEW_AVE2LMT_MASK)
+
+/*
+ * --------
+ * -H3A AF-
+ * --------
+ */
+
+/* Peripheral Revision */
+#define AFPID				0x0
+
+#define AFCOEF_OFFSET			0x00000004	/* COEF base address */
+
+/* PCR fields */
+#define AF_BUSYAF			(1 << 15)
+#define AF_FVMODE			(1 << 14)
+#define AF_RGBPOS			(0x7 << 11)
+#define AF_MED_TH			(0xFF << 3)
+#define AF_MED_EN			(1 << 2)
+#define AF_ALAW_EN			(1 << 1)
+#define AF_EN				(1 << 0)
+#define AF_PCR_MASK			(AF_FVMODE | AF_RGBPOS | AF_MED_TH | \
+					 AF_MED_EN | AF_ALAW_EN)
+
+/* AFPAX1 fields */
+#define AF_PAXW				(0x7F << 16)
+#define AF_PAXH				0x7F
+
+/* AFPAX2 fields */
+#define AF_AFINCV			(0xF << 13)
+#define AF_PAXVC			(0x7F << 6)
+#define AF_PAXHC			0x3F
+
+/* AFPAXSTART fields */
+#define AF_PAXSH			(0xFFF<<16)
+#define AF_PAXSV			0xFFF
+
+/* COEFFICIENT MASK */
+#define AF_COEF_MASK0			0xFFF
+#define AF_COEF_MASK1			(0xFFF<<16)
+
+/* BIT SHIFTS */
+#define AF_RGBPOS_SHIFT			11
+#define AF_MED_TH_SHIFT			3
+#define AF_PAXW_SHIFT			16
+#define AF_LINE_INCR_SHIFT		13
+#define AF_VT_COUNT_SHIFT		6
+#define AF_HZ_START_SHIFT		16
+#define AF_COEF_SHIFT			16
+
+/* Init and cleanup functions */
+int omap3isp_h3a_aewb_init(struct isp_device *isp);
+int omap3isp_h3a_af_init(struct isp_device *isp);
+
+void omap3isp_h3a_aewb_cleanup(struct isp_device *isp);
+void omap3isp_h3a_af_cleanup(struct isp_device *isp);
+
+#endif /* OMAP3_ISP_H3A_H */
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/omap3isp/isph3a_aewb.c
new file mode 100644
index 0000000..036e996
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isph3a_aewb.c
@@ -0,0 +1,368 @@
+/*
+ * isph3a.c
+ *
+ * TI OMAP3 ISP - H3A module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ *	     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "isp.h"
+#include "isph3a.h"
+#include "ispstat.h"
+
+/*
+ * h3a_aewb_update_regs - Helper function to update h3a registers.
+ */
+static void h3a_aewb_setup_regs(struct ispstat *aewb, void *priv)
+{
+	struct omap3isp_h3a_aewb_config *conf = priv;
+	u32 pcr;
+	u32 win1;
+	u32 start;
+	u32 blk;
+	u32 subwin;
+
+	if (aewb->state == ISPSTAT_DISABLED)
+		return;
+
+	isp_reg_writel(aewb->isp, aewb->active_buf->iommu_addr,
+		       OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST);
+
+	if (!aewb->update)
+		return;
+
+	/* Converting config metadata into reg values */
+	pcr = conf->saturation_limit << ISPH3A_PCR_AEW_AVE2LMT_SHIFT;
+	pcr |= !!conf->alaw_enable << ISPH3A_PCR_AEW_ALAW_EN_SHIFT;
+
+	win1 = ((conf->win_height >> 1) - 1) << ISPH3A_AEWWIN1_WINH_SHIFT;
+	win1 |= ((conf->win_width >> 1) - 1) << ISPH3A_AEWWIN1_WINW_SHIFT;
+	win1 |= (conf->ver_win_count - 1) << ISPH3A_AEWWIN1_WINVC_SHIFT;
+	win1 |= (conf->hor_win_count - 1) << ISPH3A_AEWWIN1_WINHC_SHIFT;
+
+	start = conf->hor_win_start << ISPH3A_AEWINSTART_WINSH_SHIFT;
+	start |= conf->ver_win_start << ISPH3A_AEWINSTART_WINSV_SHIFT;
+
+	blk = conf->blk_ver_win_start << ISPH3A_AEWINBLK_WINSV_SHIFT;
+	blk |= ((conf->blk_win_height >> 1) - 1) << ISPH3A_AEWINBLK_WINH_SHIFT;
+
+	subwin = ((conf->subsample_ver_inc >> 1) - 1) <<
+		 ISPH3A_AEWSUBWIN_AEWINCV_SHIFT;
+	subwin |= ((conf->subsample_hor_inc >> 1) - 1) <<
+		  ISPH3A_AEWSUBWIN_AEWINCH_SHIFT;
+
+	isp_reg_writel(aewb->isp, win1, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWWIN1);
+	isp_reg_writel(aewb->isp, start, OMAP3_ISP_IOMEM_H3A,
+		       ISPH3A_AEWINSTART);
+	isp_reg_writel(aewb->isp, blk, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWINBLK);
+	isp_reg_writel(aewb->isp, subwin, OMAP3_ISP_IOMEM_H3A,
+		       ISPH3A_AEWSUBWIN);
+	isp_reg_clr_set(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+			ISPH3A_PCR_AEW_MASK, pcr);
+
+	aewb->update = 0;
+	aewb->config_counter += aewb->inc_config;
+	aewb->inc_config = 0;
+	aewb->buf_size = conf->buf_size;
+}
+
+static void h3a_aewb_enable(struct ispstat *aewb, int enable)
+{
+	if (enable) {
+		isp_reg_set(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+			    ISPH3A_PCR_AEW_EN);
+		omap3isp_subclk_enable(aewb->isp, OMAP3_ISP_SUBCLK_AEWB);
+	} else {
+		isp_reg_clr(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+			    ISPH3A_PCR_AEW_EN);
+		omap3isp_subclk_disable(aewb->isp, OMAP3_ISP_SUBCLK_AEWB);
+	}
+}
+
+static int h3a_aewb_busy(struct ispstat *aewb)
+{
+	return isp_reg_readl(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR)
+						& ISPH3A_PCR_BUSYAEAWB;
+}
+
+static u32 h3a_aewb_get_buf_size(struct omap3isp_h3a_aewb_config *conf)
+{
+	/* Number of configured windows + extra row for black data */
+	u32 win_count = (conf->ver_win_count + 1) * conf->hor_win_count;
+
+	/*
+	 * Unsaturated block counts for each 8 windows.
+	 * 1 extra for the last (win_count % 8) windows if win_count is not
+	 * divisible by 8.
+	 */
+	win_count += (win_count + 7) / 8;
+
+	return win_count * AEWB_PACKET_SIZE;
+}
+
+static int h3a_aewb_validate_params(struct ispstat *aewb, void *new_conf)
+{
+	struct omap3isp_h3a_aewb_config *user_cfg = new_conf;
+	u32 buf_size;
+
+	if (unlikely(user_cfg->saturation_limit >
+		     OMAP3ISP_AEWB_MAX_SATURATION_LIM))
+		return -EINVAL;
+
+	if (unlikely(user_cfg->win_height < OMAP3ISP_AEWB_MIN_WIN_H ||
+		     user_cfg->win_height > OMAP3ISP_AEWB_MAX_WIN_H ||
+		     user_cfg->win_height & 0x01))
+		return -EINVAL;
+
+	if (unlikely(user_cfg->win_width < OMAP3ISP_AEWB_MIN_WIN_W ||
+		     user_cfg->win_width > OMAP3ISP_AEWB_MAX_WIN_W ||
+		     user_cfg->win_width & 0x01))
+		return -EINVAL;
+
+	if (unlikely(user_cfg->ver_win_count < OMAP3ISP_AEWB_MIN_WINVC ||
+		     user_cfg->ver_win_count > OMAP3ISP_AEWB_MAX_WINVC))
+		return -EINVAL;
+
+	if (unlikely(user_cfg->hor_win_count < OMAP3ISP_AEWB_MIN_WINHC ||
+		     user_cfg->hor_win_count > OMAP3ISP_AEWB_MAX_WINHC))
+		return -EINVAL;
+
+	if (unlikely(user_cfg->ver_win_start > OMAP3ISP_AEWB_MAX_WINSTART))
+		return -EINVAL;
+
+	if (unlikely(user_cfg->hor_win_start > OMAP3ISP_AEWB_MAX_WINSTART))
+		return -EINVAL;
+
+	if (unlikely(user_cfg->blk_ver_win_start > OMAP3ISP_AEWB_MAX_WINSTART))
+		return -EINVAL;
+
+	if (unlikely(user_cfg->blk_win_height < OMAP3ISP_AEWB_MIN_WIN_H ||
+		     user_cfg->blk_win_height > OMAP3ISP_AEWB_MAX_WIN_H ||
+		     user_cfg->blk_win_height & 0x01))
+		return -EINVAL;
+
+	if (unlikely(user_cfg->subsample_ver_inc < OMAP3ISP_AEWB_MIN_SUB_INC ||
+		     user_cfg->subsample_ver_inc > OMAP3ISP_AEWB_MAX_SUB_INC ||
+		     user_cfg->subsample_ver_inc & 0x01))
+		return -EINVAL;
+
+	if (unlikely(user_cfg->subsample_hor_inc < OMAP3ISP_AEWB_MIN_SUB_INC ||
+		     user_cfg->subsample_hor_inc > OMAP3ISP_AEWB_MAX_SUB_INC ||
+		     user_cfg->subsample_hor_inc & 0x01))
+		return -EINVAL;
+
+	buf_size = h3a_aewb_get_buf_size(user_cfg);
+	if (buf_size > user_cfg->buf_size)
+		user_cfg->buf_size = buf_size;
+	else if (user_cfg->buf_size > OMAP3ISP_AEWB_MAX_BUF_SIZE)
+		user_cfg->buf_size = OMAP3ISP_AEWB_MAX_BUF_SIZE;
+
+	return 0;
+}
+
+/*
+ * h3a_aewb_set_params - Helper function to check & store user given params.
+ * @new_conf: Pointer to AE and AWB parameters struct.
+ *
+ * As most of them are busy-lock registers, need to wait until AEW_BUSY = 0 to
+ * program them during ISR.
+ */
+static void h3a_aewb_set_params(struct ispstat *aewb, void *new_conf)
+{
+	struct omap3isp_h3a_aewb_config *user_cfg = new_conf;
+	struct omap3isp_h3a_aewb_config *cur_cfg = aewb->priv;
+	int update = 0;
+
+	if (cur_cfg->saturation_limit != user_cfg->saturation_limit) {
+		cur_cfg->saturation_limit = user_cfg->saturation_limit;
+		update = 1;
+	}
+	if (cur_cfg->alaw_enable != user_cfg->alaw_enable) {
+		cur_cfg->alaw_enable = user_cfg->alaw_enable;
+		update = 1;
+	}
+	if (cur_cfg->win_height != user_cfg->win_height) {
+		cur_cfg->win_height = user_cfg->win_height;
+		update = 1;
+	}
+	if (cur_cfg->win_width != user_cfg->win_width) {
+		cur_cfg->win_width = user_cfg->win_width;
+		update = 1;
+	}
+	if (cur_cfg->ver_win_count != user_cfg->ver_win_count) {
+		cur_cfg->ver_win_count = user_cfg->ver_win_count;
+		update = 1;
+	}
+	if (cur_cfg->hor_win_count != user_cfg->hor_win_count) {
+		cur_cfg->hor_win_count = user_cfg->hor_win_count;
+		update = 1;
+	}
+	if (cur_cfg->ver_win_start != user_cfg->ver_win_start) {
+		cur_cfg->ver_win_start = user_cfg->ver_win_start;
+		update = 1;
+	}
+	if (cur_cfg->hor_win_start != user_cfg->hor_win_start) {
+		cur_cfg->hor_win_start = user_cfg->hor_win_start;
+		update = 1;
+	}
+	if (cur_cfg->blk_ver_win_start != user_cfg->blk_ver_win_start) {
+		cur_cfg->blk_ver_win_start = user_cfg->blk_ver_win_start;
+		update = 1;
+	}
+	if (cur_cfg->blk_win_height != user_cfg->blk_win_height) {
+		cur_cfg->blk_win_height = user_cfg->blk_win_height;
+		update = 1;
+	}
+	if (cur_cfg->subsample_ver_inc != user_cfg->subsample_ver_inc) {
+		cur_cfg->subsample_ver_inc = user_cfg->subsample_ver_inc;
+		update = 1;
+	}
+	if (cur_cfg->subsample_hor_inc != user_cfg->subsample_hor_inc) {
+		cur_cfg->subsample_hor_inc = user_cfg->subsample_hor_inc;
+		update = 1;
+	}
+
+	if (update || !aewb->configured) {
+		aewb->inc_config++;
+		aewb->update = 1;
+		cur_cfg->buf_size = h3a_aewb_get_buf_size(cur_cfg);
+	}
+}
+
+static long h3a_aewb_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+	struct ispstat *stat = v4l2_get_subdevdata(sd);
+
+	switch (cmd) {
+	case VIDIOC_OMAP3ISP_AEWB_CFG:
+		return omap3isp_stat_config(stat, arg);
+	case VIDIOC_OMAP3ISP_STAT_REQ:
+		return omap3isp_stat_request_statistics(stat, arg);
+	case VIDIOC_OMAP3ISP_STAT_EN: {
+		unsigned long *en = arg;
+		return omap3isp_stat_enable(stat, !!*en);
+	}
+	}
+
+	return -ENOIOCTLCMD;
+}
+
+static const struct ispstat_ops h3a_aewb_ops = {
+	.validate_params	= h3a_aewb_validate_params,
+	.set_params		= h3a_aewb_set_params,
+	.setup_regs		= h3a_aewb_setup_regs,
+	.enable			= h3a_aewb_enable,
+	.busy			= h3a_aewb_busy,
+};
+
+static const struct v4l2_subdev_core_ops h3a_aewb_subdev_core_ops = {
+	.ioctl = h3a_aewb_ioctl,
+	.subscribe_event = omap3isp_stat_subscribe_event,
+	.unsubscribe_event = omap3isp_stat_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_video_ops h3a_aewb_subdev_video_ops = {
+	.s_stream = omap3isp_stat_s_stream,
+};
+
+static const struct v4l2_subdev_ops h3a_aewb_subdev_ops = {
+	.core = &h3a_aewb_subdev_core_ops,
+	.video = &h3a_aewb_subdev_video_ops,
+};
+
+/*
+ * omap3isp_h3a_aewb_init - Module Initialisation.
+ */
+int omap3isp_h3a_aewb_init(struct isp_device *isp)
+{
+	struct ispstat *aewb = &isp->isp_aewb;
+	struct omap3isp_h3a_aewb_config *aewb_cfg;
+	struct omap3isp_h3a_aewb_config *aewb_recover_cfg;
+	int ret;
+
+	aewb_cfg = kzalloc(sizeof(*aewb_cfg), GFP_KERNEL);
+	if (!aewb_cfg)
+		return -ENOMEM;
+
+	memset(aewb, 0, sizeof(*aewb));
+	aewb->ops = &h3a_aewb_ops;
+	aewb->priv = aewb_cfg;
+	aewb->dma_ch = -1;
+	aewb->event_type = V4L2_EVENT_OMAP3ISP_AEWB;
+	aewb->isp = isp;
+
+	/* Set recover state configuration */
+	aewb_recover_cfg = kzalloc(sizeof(*aewb_recover_cfg), GFP_KERNEL);
+	if (!aewb_recover_cfg) {
+		dev_err(aewb->isp->dev, "AEWB: cannot allocate memory for "
+					"recover configuration.\n");
+		ret = -ENOMEM;
+		goto err_recover_alloc;
+	}
+
+	aewb_recover_cfg->saturation_limit = OMAP3ISP_AEWB_MAX_SATURATION_LIM;
+	aewb_recover_cfg->win_height = OMAP3ISP_AEWB_MIN_WIN_H;
+	aewb_recover_cfg->win_width = OMAP3ISP_AEWB_MIN_WIN_W;
+	aewb_recover_cfg->ver_win_count = OMAP3ISP_AEWB_MIN_WINVC;
+	aewb_recover_cfg->hor_win_count = OMAP3ISP_AEWB_MIN_WINHC;
+	aewb_recover_cfg->blk_ver_win_start = aewb_recover_cfg->ver_win_start +
+		aewb_recover_cfg->win_height * aewb_recover_cfg->ver_win_count;
+	aewb_recover_cfg->blk_win_height = OMAP3ISP_AEWB_MIN_WIN_H;
+	aewb_recover_cfg->subsample_ver_inc = OMAP3ISP_AEWB_MIN_SUB_INC;
+	aewb_recover_cfg->subsample_hor_inc = OMAP3ISP_AEWB_MIN_SUB_INC;
+
+	if (h3a_aewb_validate_params(aewb, aewb_recover_cfg)) {
+		dev_err(aewb->isp->dev, "AEWB: recover configuration is "
+					"invalid.\n");
+		ret = -EINVAL;
+		goto err_conf;
+	}
+
+	aewb_recover_cfg->buf_size = h3a_aewb_get_buf_size(aewb_recover_cfg);
+	aewb->recover_priv = aewb_recover_cfg;
+
+	ret = omap3isp_stat_init(aewb, "AEWB", &h3a_aewb_subdev_ops);
+	if (ret)
+		goto err_conf;
+
+	return 0;
+
+err_conf:
+	kfree(aewb_recover_cfg);
+err_recover_alloc:
+	kfree(aewb_cfg);
+
+	return ret;
+}
+
+/*
+ * omap3isp_h3a_aewb_cleanup - Module exit.
+ */
+void omap3isp_h3a_aewb_cleanup(struct isp_device *isp)
+{
+	kfree(isp->isp_aewb.priv);
+	kfree(isp->isp_aewb.recover_priv);
+	omap3isp_stat_cleanup(&isp->isp_aewb);
+}
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
new file mode 100644
index 0000000..42ccce3
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -0,0 +1,423 @@
+/*
+ * isph3a_af.c
+ *
+ * TI OMAP3 ISP - H3A AF module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ *	     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+/* Linux specific include files */
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include "isp.h"
+#include "isph3a.h"
+#include "ispstat.h"
+
+#define IS_OUT_OF_BOUNDS(value, min, max)		\
+	(((value) < (min)) || ((value) > (max)))
+
+static void h3a_af_setup_regs(struct ispstat *af, void *priv)
+{
+	struct omap3isp_h3a_af_config *conf = priv;
+	u32 pcr;
+	u32 pax1;
+	u32 pax2;
+	u32 paxstart;
+	u32 coef;
+	u32 base_coef_set0;
+	u32 base_coef_set1;
+	int index;
+
+	if (af->state == ISPSTAT_DISABLED)
+		return;
+
+	isp_reg_writel(af->isp, af->active_buf->iommu_addr, OMAP3_ISP_IOMEM_H3A,
+		       ISPH3A_AFBUFST);
+
+	if (!af->update)
+		return;
+
+	/* Configure Hardware Registers */
+	pax1 = ((conf->paxel.width >> 1) - 1) << AF_PAXW_SHIFT;
+	/* Set height in AFPAX1 */
+	pax1 |= (conf->paxel.height >> 1) - 1;
+	isp_reg_writel(af->isp, pax1, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX1);
+
+	/* Configure AFPAX2 Register */
+	/* Set Line Increment in AFPAX2 Register */
+	pax2 = ((conf->paxel.line_inc >> 1) - 1) << AF_LINE_INCR_SHIFT;
+	/* Set Vertical Count */
+	pax2 |= (conf->paxel.v_cnt - 1) << AF_VT_COUNT_SHIFT;
+	/* Set Horizontal Count */
+	pax2 |= (conf->paxel.h_cnt - 1);
+	isp_reg_writel(af->isp, pax2, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX2);
+
+	/* Configure PAXSTART Register */
+	/*Configure Horizontal Start */
+	paxstart = conf->paxel.h_start << AF_HZ_START_SHIFT;
+	/* Configure Vertical Start */
+	paxstart |= conf->paxel.v_start;
+	isp_reg_writel(af->isp, paxstart, OMAP3_ISP_IOMEM_H3A,
+		       ISPH3A_AFPAXSTART);
+
+	/*SetIIRSH Register */
+	isp_reg_writel(af->isp, conf->iir.h_start,
+		       OMAP3_ISP_IOMEM_H3A, ISPH3A_AFIIRSH);
+
+	base_coef_set0 = ISPH3A_AFCOEF010;
+	base_coef_set1 = ISPH3A_AFCOEF110;
+	for (index = 0; index <= 8; index += 2) {
+		/*Set IIR Filter0 Coefficients */
+		coef = 0;
+		coef |= conf->iir.coeff_set0[index];
+		coef |= conf->iir.coeff_set0[index + 1] <<
+			AF_COEF_SHIFT;
+		isp_reg_writel(af->isp, coef, OMAP3_ISP_IOMEM_H3A,
+			       base_coef_set0);
+		base_coef_set0 += AFCOEF_OFFSET;
+
+		/*Set IIR Filter1 Coefficients */
+		coef = 0;
+		coef |= conf->iir.coeff_set1[index];
+		coef |= conf->iir.coeff_set1[index + 1] <<
+			AF_COEF_SHIFT;
+		isp_reg_writel(af->isp, coef, OMAP3_ISP_IOMEM_H3A,
+			       base_coef_set1);
+		base_coef_set1 += AFCOEF_OFFSET;
+	}
+	/* set AFCOEF0010 Register */
+	isp_reg_writel(af->isp, conf->iir.coeff_set0[10],
+		       OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF0010);
+	/* set AFCOEF1010 Register */
+	isp_reg_writel(af->isp, conf->iir.coeff_set1[10],
+		       OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF1010);
+
+	/* PCR Register */
+	/* Set RGB Position */
+	pcr = conf->rgb_pos << AF_RGBPOS_SHIFT;
+	/* Set Accumulator Mode */
+	if (conf->fvmode == OMAP3ISP_AF_MODE_PEAK)
+		pcr |= AF_FVMODE;
+	/* Set A-law */
+	if (conf->alaw_enable)
+		pcr |= AF_ALAW_EN;
+	/* HMF Configurations */
+	if (conf->hmf.enable) {
+		/* Enable HMF */
+		pcr |= AF_MED_EN;
+		/* Set Median Threshold */
+		pcr |= conf->hmf.threshold << AF_MED_TH_SHIFT;
+	}
+	/* Set PCR Register */
+	isp_reg_clr_set(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+			AF_PCR_MASK, pcr);
+
+	af->update = 0;
+	af->config_counter += af->inc_config;
+	af->inc_config = 0;
+	af->buf_size = conf->buf_size;
+}
+
+static void h3a_af_enable(struct ispstat *af, int enable)
+{
+	if (enable) {
+		isp_reg_set(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+			    ISPH3A_PCR_AF_EN);
+		omap3isp_subclk_enable(af->isp, OMAP3_ISP_SUBCLK_AF);
+	} else {
+		isp_reg_clr(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR,
+			    ISPH3A_PCR_AF_EN);
+		omap3isp_subclk_disable(af->isp, OMAP3_ISP_SUBCLK_AF);
+	}
+}
+
+static int h3a_af_busy(struct ispstat *af)
+{
+	return isp_reg_readl(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR)
+						& ISPH3A_PCR_BUSYAF;
+}
+
+static u32 h3a_af_get_buf_size(struct omap3isp_h3a_af_config *conf)
+{
+	return conf->paxel.h_cnt * conf->paxel.v_cnt * OMAP3ISP_AF_PAXEL_SIZE;
+}
+
+/* Function to check paxel parameters */
+static int h3a_af_validate_params(struct ispstat *af, void *new_conf)
+{
+	struct omap3isp_h3a_af_config *user_cfg = new_conf;
+	struct omap3isp_h3a_af_paxel *paxel_cfg = &user_cfg->paxel;
+	struct omap3isp_h3a_af_iir *iir_cfg = &user_cfg->iir;
+	int index;
+	u32 buf_size;
+
+	/* Check horizontal Count */
+	if (IS_OUT_OF_BOUNDS(paxel_cfg->h_cnt,
+			     OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MIN,
+			     OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MAX))
+		return -EINVAL;
+
+	/* Check Vertical Count */
+	if (IS_OUT_OF_BOUNDS(paxel_cfg->v_cnt,
+			     OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MIN,
+			     OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MAX))
+		return -EINVAL;
+
+	if (IS_OUT_OF_BOUNDS(paxel_cfg->height, OMAP3ISP_AF_PAXEL_HEIGHT_MIN,
+			     OMAP3ISP_AF_PAXEL_HEIGHT_MAX) ||
+	    paxel_cfg->height % 2)
+		return -EINVAL;
+
+	/* Check width */
+	if (IS_OUT_OF_BOUNDS(paxel_cfg->width, OMAP3ISP_AF_PAXEL_WIDTH_MIN,
+			     OMAP3ISP_AF_PAXEL_WIDTH_MAX) ||
+	    paxel_cfg->width % 2)
+		return -EINVAL;
+
+	/* Check Line Increment */
+	if (IS_OUT_OF_BOUNDS(paxel_cfg->line_inc,
+			     OMAP3ISP_AF_PAXEL_INCREMENT_MIN,
+			     OMAP3ISP_AF_PAXEL_INCREMENT_MAX) ||
+	    paxel_cfg->line_inc % 2)
+		return -EINVAL;
+
+	/* Check Horizontal Start */
+	if ((paxel_cfg->h_start < iir_cfg->h_start) ||
+	    IS_OUT_OF_BOUNDS(paxel_cfg->h_start,
+			     OMAP3ISP_AF_PAXEL_HZSTART_MIN,
+			     OMAP3ISP_AF_PAXEL_HZSTART_MAX))
+		return -EINVAL;
+
+	/* Check IIR */
+	for (index = 0; index < OMAP3ISP_AF_NUM_COEF; index++) {
+		if ((iir_cfg->coeff_set0[index]) > OMAP3ISP_AF_COEF_MAX)
+			return -EINVAL;
+
+		if ((iir_cfg->coeff_set1[index]) > OMAP3ISP_AF_COEF_MAX)
+			return -EINVAL;
+	}
+
+	if (IS_OUT_OF_BOUNDS(iir_cfg->h_start, OMAP3ISP_AF_IIRSH_MIN,
+			     OMAP3ISP_AF_IIRSH_MAX))
+		return -EINVAL;
+
+	/* Hack: If paxel size is 12, the 10th AF window may be corrupted */
+	if ((paxel_cfg->h_cnt * paxel_cfg->v_cnt > 9) &&
+	    (paxel_cfg->width * paxel_cfg->height == 12))
+		return -EINVAL;
+
+	buf_size = h3a_af_get_buf_size(user_cfg);
+	if (buf_size > user_cfg->buf_size)
+		/* User buf_size request wasn't enough */
+		user_cfg->buf_size = buf_size;
+	else if (user_cfg->buf_size > OMAP3ISP_AF_MAX_BUF_SIZE)
+		user_cfg->buf_size = OMAP3ISP_AF_MAX_BUF_SIZE;
+
+	return 0;
+}
+
+/* Update local parameters */
+static void h3a_af_set_params(struct ispstat *af, void *new_conf)
+{
+	struct omap3isp_h3a_af_config *user_cfg = new_conf;
+	struct omap3isp_h3a_af_config *cur_cfg = af->priv;
+	int update = 0;
+	int index;
+
+	/* alaw */
+	if (cur_cfg->alaw_enable != user_cfg->alaw_enable) {
+		update = 1;
+		goto out;
+	}
+
+	/* hmf */
+	if (cur_cfg->hmf.enable != user_cfg->hmf.enable) {
+		update = 1;
+		goto out;
+	}
+	if (cur_cfg->hmf.threshold != user_cfg->hmf.threshold) {
+		update = 1;
+		goto out;
+	}
+
+	/* rgbpos */
+	if (cur_cfg->rgb_pos != user_cfg->rgb_pos) {
+		update = 1;
+		goto out;
+	}
+
+	/* iir */
+	if (cur_cfg->iir.h_start != user_cfg->iir.h_start) {
+		update = 1;
+		goto out;
+	}
+	for (index = 0; index < OMAP3ISP_AF_NUM_COEF; index++) {
+		if (cur_cfg->iir.coeff_set0[index] !=
+				user_cfg->iir.coeff_set0[index]) {
+			update = 1;
+			goto out;
+		}
+		if (cur_cfg->iir.coeff_set1[index] !=
+				user_cfg->iir.coeff_set1[index]) {
+			update = 1;
+			goto out;
+		}
+	}
+
+	/* paxel */
+	if ((cur_cfg->paxel.width != user_cfg->paxel.width) ||
+	    (cur_cfg->paxel.height != user_cfg->paxel.height) ||
+	    (cur_cfg->paxel.h_start != user_cfg->paxel.h_start) ||
+	    (cur_cfg->paxel.v_start != user_cfg->paxel.v_start) ||
+	    (cur_cfg->paxel.h_cnt != user_cfg->paxel.h_cnt) ||
+	    (cur_cfg->paxel.v_cnt != user_cfg->paxel.v_cnt) ||
+	    (cur_cfg->paxel.line_inc != user_cfg->paxel.line_inc)) {
+		update = 1;
+		goto out;
+	}
+
+	/* af_mode */
+	if (cur_cfg->fvmode != user_cfg->fvmode)
+		update = 1;
+
+out:
+	if (update || !af->configured) {
+		memcpy(cur_cfg, user_cfg, sizeof(*cur_cfg));
+		af->inc_config++;
+		af->update = 1;
+		/*
+		 * User might be asked for a bigger buffer than necessary for
+		 * this configuration. In order to return the right amount of
+		 * data during buffer request, let's calculate the size here
+		 * instead of stick with user_cfg->buf_size.
+		 */
+		cur_cfg->buf_size = h3a_af_get_buf_size(cur_cfg);
+	}
+}
+
+static long h3a_af_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+	struct ispstat *stat = v4l2_get_subdevdata(sd);
+
+	switch (cmd) {
+	case VIDIOC_OMAP3ISP_AF_CFG:
+		return omap3isp_stat_config(stat, arg);
+	case VIDIOC_OMAP3ISP_STAT_REQ:
+		return omap3isp_stat_request_statistics(stat, arg);
+	case VIDIOC_OMAP3ISP_STAT_EN: {
+		int *en = arg;
+		return omap3isp_stat_enable(stat, !!*en);
+	}
+	}
+
+	return -ENOIOCTLCMD;
+
+}
+
+static const struct ispstat_ops h3a_af_ops = {
+	.validate_params	= h3a_af_validate_params,
+	.set_params		= h3a_af_set_params,
+	.setup_regs		= h3a_af_setup_regs,
+	.enable			= h3a_af_enable,
+	.busy			= h3a_af_busy,
+};
+
+static const struct v4l2_subdev_core_ops h3a_af_subdev_core_ops = {
+	.ioctl = h3a_af_ioctl,
+	.subscribe_event = omap3isp_stat_subscribe_event,
+	.unsubscribe_event = omap3isp_stat_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_video_ops h3a_af_subdev_video_ops = {
+	.s_stream = omap3isp_stat_s_stream,
+};
+
+static const struct v4l2_subdev_ops h3a_af_subdev_ops = {
+	.core = &h3a_af_subdev_core_ops,
+	.video = &h3a_af_subdev_video_ops,
+};
+
+/* Function to register the AF character device driver. */
+int omap3isp_h3a_af_init(struct isp_device *isp)
+{
+	struct ispstat *af = &isp->isp_af;
+	struct omap3isp_h3a_af_config *af_cfg;
+	struct omap3isp_h3a_af_config *af_recover_cfg;
+	int ret;
+
+	af_cfg = kzalloc(sizeof(*af_cfg), GFP_KERNEL);
+	if (af_cfg == NULL)
+		return -ENOMEM;
+
+	memset(af, 0, sizeof(*af));
+	af->ops = &h3a_af_ops;
+	af->priv = af_cfg;
+	af->dma_ch = -1;
+	af->event_type = V4L2_EVENT_OMAP3ISP_AF;
+	af->isp = isp;
+
+	/* Set recover state configuration */
+	af_recover_cfg = kzalloc(sizeof(*af_recover_cfg), GFP_KERNEL);
+	if (!af_recover_cfg) {
+		dev_err(af->isp->dev, "AF: cannot allocate memory for recover "
+				      "configuration.\n");
+		ret = -ENOMEM;
+		goto err_recover_alloc;
+	}
+
+	af_recover_cfg->paxel.h_start = OMAP3ISP_AF_PAXEL_HZSTART_MIN;
+	af_recover_cfg->paxel.width = OMAP3ISP_AF_PAXEL_WIDTH_MIN;
+	af_recover_cfg->paxel.height = OMAP3ISP_AF_PAXEL_HEIGHT_MIN;
+	af_recover_cfg->paxel.h_cnt = OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MIN;
+	af_recover_cfg->paxel.v_cnt = OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MIN;
+	af_recover_cfg->paxel.line_inc = OMAP3ISP_AF_PAXEL_INCREMENT_MIN;
+	if (h3a_af_validate_params(af, af_recover_cfg)) {
+		dev_err(af->isp->dev, "AF: recover configuration is "
+				      "invalid.\n");
+		ret = -EINVAL;
+		goto err_conf;
+	}
+
+	af_recover_cfg->buf_size = h3a_af_get_buf_size(af_recover_cfg);
+	af->recover_priv = af_recover_cfg;
+
+	ret = omap3isp_stat_init(af, "AF", &h3a_af_subdev_ops);
+	if (ret)
+		goto err_conf;
+
+	return 0;
+
+err_conf:
+	kfree(af_recover_cfg);
+err_recover_alloc:
+	kfree(af_cfg);
+
+	return ret;
+}
+
+void omap3isp_h3a_af_cleanup(struct isp_device *isp)
+{
+	kfree(isp->isp_af.priv);
+	kfree(isp->isp_af.recover_priv);
+	omap3isp_stat_cleanup(&isp->isp_af);
+}
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
new file mode 100644
index 0000000..d1a8dee
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isphist.c
@@ -0,0 +1,518 @@
+/*
+ * isphist.c
+ *
+ * TI OMAP3 ISP - Histogram module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ *	     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "isphist.h"
+
+#define HIST_CONFIG_DMA	1
+
+#define HIST_USING_DMA(hist) ((hist)->dma_ch >= 0)
+
+/*
+ * hist_reset_mem - clear Histogram memory before start stats engine.
+ */
+static void hist_reset_mem(struct ispstat *hist)
+{
+	struct isp_device *isp = hist->isp;
+	struct omap3isp_hist_config *conf = hist->priv;
+	unsigned int i;
+
+	isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
+
+	/*
+	 * By setting it, the histogram internal buffer is being cleared at the
+	 * same time it's being read. This bit must be cleared afterwards.
+	 */
+	isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
+
+	/*
+	 * We'll clear 4 words at each iteration for optimization. It avoids
+	 * 3/4 of the jumps. We also know HIST_MEM_SIZE is divisible by 4.
+	 */
+	for (i = OMAP3ISP_HIST_MEM_SIZE / 4; i > 0; i--) {
+		isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+		isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+		isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+		isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+	}
+	isp_reg_clr(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
+
+	hist->wait_acc_frames = conf->num_acc_frames;
+}
+
+static void hist_dma_config(struct ispstat *hist)
+{
+	hist->dma_config.data_type = OMAP_DMA_DATA_TYPE_S32;
+	hist->dma_config.sync_mode = OMAP_DMA_SYNC_ELEMENT;
+	hist->dma_config.frame_count = 1;
+	hist->dma_config.src_amode = OMAP_DMA_AMODE_CONSTANT;
+	hist->dma_config.src_start = OMAP3ISP_HIST_REG_BASE + ISPHIST_DATA;
+	hist->dma_config.dst_amode = OMAP_DMA_AMODE_POST_INC;
+	hist->dma_config.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
+}
+
+/*
+ * hist_setup_regs - Helper function to update Histogram registers.
+ */
+static void hist_setup_regs(struct ispstat *hist, void *priv)
+{
+	struct isp_device *isp = hist->isp;
+	struct omap3isp_hist_config *conf = priv;
+	int c;
+	u32 cnt;
+	u32 wb_gain;
+	u32 reg_hor[OMAP3ISP_HIST_MAX_REGIONS];
+	u32 reg_ver[OMAP3ISP_HIST_MAX_REGIONS];
+
+	if (!hist->update || hist->state == ISPSTAT_DISABLED ||
+	    hist->state == ISPSTAT_DISABLING)
+		return;
+
+	cnt = conf->cfa << ISPHIST_CNT_CFA_SHIFT;
+
+	wb_gain = conf->wg[0] << ISPHIST_WB_GAIN_WG00_SHIFT;
+	wb_gain |= conf->wg[1] << ISPHIST_WB_GAIN_WG01_SHIFT;
+	wb_gain |= conf->wg[2] << ISPHIST_WB_GAIN_WG02_SHIFT;
+	if (conf->cfa == OMAP3ISP_HIST_CFA_BAYER)
+		wb_gain |= conf->wg[3] << ISPHIST_WB_GAIN_WG03_SHIFT;
+
+	/* Regions size and position */
+	for (c = 0; c < OMAP3ISP_HIST_MAX_REGIONS; c++) {
+		if (c < conf->num_regions) {
+			reg_hor[c] = conf->region[c].h_start <<
+				     ISPHIST_REG_START_SHIFT;
+			reg_hor[c] = conf->region[c].h_end <<
+				     ISPHIST_REG_END_SHIFT;
+			reg_ver[c] = conf->region[c].v_start <<
+				     ISPHIST_REG_START_SHIFT;
+			reg_ver[c] = conf->region[c].v_end <<
+				     ISPHIST_REG_END_SHIFT;
+		} else {
+			reg_hor[c] = 0;
+			reg_ver[c] = 0;
+		}
+	}
+
+	cnt |= conf->hist_bins << ISPHIST_CNT_BINS_SHIFT;
+	switch (conf->hist_bins) {
+	case OMAP3ISP_HIST_BINS_256:
+		cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 8) <<
+			ISPHIST_CNT_SHIFT_SHIFT;
+		break;
+	case OMAP3ISP_HIST_BINS_128:
+		cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 7) <<
+			ISPHIST_CNT_SHIFT_SHIFT;
+		break;
+	case OMAP3ISP_HIST_BINS_64:
+		cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 6) <<
+			ISPHIST_CNT_SHIFT_SHIFT;
+		break;
+	default: /* OMAP3ISP_HIST_BINS_32 */
+		cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 5) <<
+			ISPHIST_CNT_SHIFT_SHIFT;
+		break;
+	}
+
+	hist_reset_mem(hist);
+
+	isp_reg_writel(isp, cnt, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT);
+	isp_reg_writel(isp, wb_gain,  OMAP3_ISP_IOMEM_HIST, ISPHIST_WB_GAIN);
+	isp_reg_writel(isp, reg_hor[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_HORZ);
+	isp_reg_writel(isp, reg_ver[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_VERT);
+	isp_reg_writel(isp, reg_hor[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_HORZ);
+	isp_reg_writel(isp, reg_ver[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_VERT);
+	isp_reg_writel(isp, reg_hor[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_HORZ);
+	isp_reg_writel(isp, reg_ver[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_VERT);
+	isp_reg_writel(isp, reg_hor[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_HORZ);
+	isp_reg_writel(isp, reg_ver[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_VERT);
+
+	hist->update = 0;
+	hist->config_counter += hist->inc_config;
+	hist->inc_config = 0;
+	hist->buf_size = conf->buf_size;
+}
+
+static void hist_enable(struct ispstat *hist, int enable)
+{
+	if (enable) {
+		isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR,
+			    ISPHIST_PCR_ENABLE);
+		omap3isp_subclk_enable(hist->isp, OMAP3_ISP_SUBCLK_HIST);
+	} else {
+		isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR,
+			    ISPHIST_PCR_ENABLE);
+		omap3isp_subclk_disable(hist->isp, OMAP3_ISP_SUBCLK_HIST);
+	}
+}
+
+static int hist_busy(struct ispstat *hist)
+{
+	return isp_reg_readl(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR)
+						& ISPHIST_PCR_BUSY;
+}
+
+static void hist_dma_cb(int lch, u16 ch_status, void *data)
+{
+	struct ispstat *hist = data;
+
+	if (ch_status & ~OMAP_DMA_BLOCK_IRQ) {
+		dev_dbg(hist->isp->dev, "hist: DMA error. status = 0x%04x\n",
+			ch_status);
+		omap_stop_dma(lch);
+		hist_reset_mem(hist);
+		atomic_set(&hist->buf_err, 1);
+	}
+	isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
+		    ISPHIST_CNT_CLEAR);
+
+	omap3isp_stat_dma_isr(hist);
+	if (hist->state != ISPSTAT_DISABLED)
+		omap3isp_hist_dma_done(hist->isp);
+}
+
+static int hist_buf_dma(struct ispstat *hist)
+{
+	dma_addr_t dma_addr = hist->active_buf->dma_addr;
+
+	if (unlikely(!dma_addr)) {
+		dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n");
+		hist_reset_mem(hist);
+		return STAT_NO_BUF;
+	}
+
+	isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
+	isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
+		    ISPHIST_CNT_CLEAR);
+	omap3isp_flush(hist->isp);
+	hist->dma_config.dst_start = dma_addr;
+	hist->dma_config.elem_count = hist->buf_size / sizeof(u32);
+	omap_set_dma_params(hist->dma_ch, &hist->dma_config);
+
+	omap_start_dma(hist->dma_ch);
+
+	return STAT_BUF_WAITING_DMA;
+}
+
+static int hist_buf_pio(struct ispstat *hist)
+{
+	struct isp_device *isp = hist->isp;
+	u32 *buf = hist->active_buf->virt_addr;
+	unsigned int i;
+
+	if (!buf) {
+		dev_dbg(isp->dev, "hist: invalid PIO buffer address\n");
+		hist_reset_mem(hist);
+		return STAT_NO_BUF;
+	}
+
+	isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
+
+	/*
+	 * By setting it, the histogram internal buffer is being cleared at the
+	 * same time it's being read. This bit must be cleared just after all
+	 * data is acquired.
+	 */
+	isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
+
+	/*
+	 * We'll read 4 times a 4-bytes-word at each iteration for
+	 * optimization. It avoids 3/4 of the jumps. We also know buf_size is
+	 * divisible by 16.
+	 */
+	for (i = hist->buf_size / 16; i > 0; i--) {
+		*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+		*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+		*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+		*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
+	}
+	isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
+		    ISPHIST_CNT_CLEAR);
+
+	return STAT_BUF_DONE;
+}
+
+/*
+ * hist_buf_process - Callback from ISP driver for HIST interrupt.
+ */
+static int hist_buf_process(struct ispstat *hist)
+{
+	struct omap3isp_hist_config *user_cfg = hist->priv;
+	int ret;
+
+	if (atomic_read(&hist->buf_err) || hist->state != ISPSTAT_ENABLED) {
+		hist_reset_mem(hist);
+		return STAT_NO_BUF;
+	}
+
+	if (--(hist->wait_acc_frames))
+		return STAT_NO_BUF;
+
+	if (HIST_USING_DMA(hist))
+		ret = hist_buf_dma(hist);
+	else
+		ret = hist_buf_pio(hist);
+
+	hist->wait_acc_frames = user_cfg->num_acc_frames;
+
+	return ret;
+}
+
+static u32 hist_get_buf_size(struct omap3isp_hist_config *conf)
+{
+	return OMAP3ISP_HIST_MEM_SIZE_BINS(conf->hist_bins) * conf->num_regions;
+}
+
+/*
+ * hist_validate_params - Helper function to check user given params.
+ * @user_cfg: Pointer to user configuration structure.
+ *
+ * Returns 0 on success configuration.
+ */
+static int hist_validate_params(struct ispstat *hist, void *new_conf)
+{
+	struct omap3isp_hist_config *user_cfg = new_conf;
+	int c;
+	u32 buf_size;
+
+	if (user_cfg->cfa > OMAP3ISP_HIST_CFA_FOVEONX3)
+		return -EINVAL;
+
+	/* Regions size and position */
+
+	if ((user_cfg->num_regions < OMAP3ISP_HIST_MIN_REGIONS) ||
+	    (user_cfg->num_regions > OMAP3ISP_HIST_MAX_REGIONS))
+		return -EINVAL;
+
+	/* Regions */
+	for (c = 0; c < user_cfg->num_regions; c++) {
+		if (user_cfg->region[c].h_start & ~ISPHIST_REG_START_END_MASK)
+			return -EINVAL;
+		if (user_cfg->region[c].h_end & ~ISPHIST_REG_START_END_MASK)
+			return -EINVAL;
+		if (user_cfg->region[c].v_start & ~ISPHIST_REG_START_END_MASK)
+			return -EINVAL;
+		if (user_cfg->region[c].v_end & ~ISPHIST_REG_START_END_MASK)
+			return -EINVAL;
+		if (user_cfg->region[c].h_start > user_cfg->region[c].h_end)
+			return -EINVAL;
+		if (user_cfg->region[c].v_start > user_cfg->region[c].v_end)
+			return -EINVAL;
+	}
+
+	switch (user_cfg->num_regions) {
+	case 1:
+		if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_256)
+			return -EINVAL;
+		break;
+	case 2:
+		if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_128)
+			return -EINVAL;
+		break;
+	default: /* 3 or 4 */
+		if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_64)
+			return -EINVAL;
+		break;
+	}
+
+	buf_size = hist_get_buf_size(user_cfg);
+	if (buf_size > user_cfg->buf_size)
+		/* User's buf_size request wasn't enoght */
+		user_cfg->buf_size = buf_size;
+	else if (user_cfg->buf_size > OMAP3ISP_HIST_MAX_BUF_SIZE)
+		user_cfg->buf_size = OMAP3ISP_HIST_MAX_BUF_SIZE;
+
+	return 0;
+}
+
+static int hist_comp_params(struct ispstat *hist,
+			    struct omap3isp_hist_config *user_cfg)
+{
+	struct omap3isp_hist_config *cur_cfg = hist->priv;
+	int c;
+
+	if (cur_cfg->cfa != user_cfg->cfa)
+		return 1;
+
+	if (cur_cfg->num_acc_frames != user_cfg->num_acc_frames)
+		return 1;
+
+	if (cur_cfg->hist_bins != user_cfg->hist_bins)
+		return 1;
+
+	for (c = 0; c < OMAP3ISP_HIST_MAX_WG; c++) {
+		if (c == 3 && user_cfg->cfa == OMAP3ISP_HIST_CFA_FOVEONX3)
+			break;
+		else if (cur_cfg->wg[c] != user_cfg->wg[c])
+			return 1;
+	}
+
+	if (cur_cfg->num_regions != user_cfg->num_regions)
+		return 1;
+
+	/* Regions */
+	for (c = 0; c < user_cfg->num_regions; c++) {
+		if (cur_cfg->region[c].h_start != user_cfg->region[c].h_start)
+			return 1;
+		if (cur_cfg->region[c].h_end != user_cfg->region[c].h_end)
+			return 1;
+		if (cur_cfg->region[c].v_start != user_cfg->region[c].v_start)
+			return 1;
+		if (cur_cfg->region[c].v_end != user_cfg->region[c].v_end)
+			return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * hist_update_params - Helper function to check and store user given params.
+ * @new_conf: Pointer to user configuration structure.
+ */
+static void hist_set_params(struct ispstat *hist, void *new_conf)
+{
+	struct omap3isp_hist_config *user_cfg = new_conf;
+	struct omap3isp_hist_config *cur_cfg = hist->priv;
+
+	if (!hist->configured || hist_comp_params(hist, user_cfg)) {
+		memcpy(cur_cfg, user_cfg, sizeof(*user_cfg));
+		if (user_cfg->num_acc_frames == 0)
+			user_cfg->num_acc_frames = 1;
+		hist->inc_config++;
+		hist->update = 1;
+		/*
+		 * User might be asked for a bigger buffer than necessary for
+		 * this configuration. In order to return the right amount of
+		 * data during buffer request, let's calculate the size here
+		 * instead of stick with user_cfg->buf_size.
+		 */
+		cur_cfg->buf_size = hist_get_buf_size(cur_cfg);
+
+	}
+}
+
+static long hist_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+	struct ispstat *stat = v4l2_get_subdevdata(sd);
+
+	switch (cmd) {
+	case VIDIOC_OMAP3ISP_HIST_CFG:
+		return omap3isp_stat_config(stat, arg);
+	case VIDIOC_OMAP3ISP_STAT_REQ:
+		return omap3isp_stat_request_statistics(stat, arg);
+	case VIDIOC_OMAP3ISP_STAT_EN: {
+		int *en = arg;
+		return omap3isp_stat_enable(stat, !!*en);
+	}
+	}
+
+	return -ENOIOCTLCMD;
+
+}
+
+static const struct ispstat_ops hist_ops = {
+	.validate_params	= hist_validate_params,
+	.set_params		= hist_set_params,
+	.setup_regs		= hist_setup_regs,
+	.enable			= hist_enable,
+	.busy			= hist_busy,
+	.buf_process		= hist_buf_process,
+};
+
+static const struct v4l2_subdev_core_ops hist_subdev_core_ops = {
+	.ioctl = hist_ioctl,
+	.subscribe_event = omap3isp_stat_subscribe_event,
+	.unsubscribe_event = omap3isp_stat_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_video_ops hist_subdev_video_ops = {
+	.s_stream = omap3isp_stat_s_stream,
+};
+
+static const struct v4l2_subdev_ops hist_subdev_ops = {
+	.core = &hist_subdev_core_ops,
+	.video = &hist_subdev_video_ops,
+};
+
+/*
+ * omap3isp_hist_init - Module Initialization.
+ */
+int omap3isp_hist_init(struct isp_device *isp)
+{
+	struct ispstat *hist = &isp->isp_hist;
+	struct omap3isp_hist_config *hist_cfg;
+	int ret = -1;
+
+	hist_cfg = kzalloc(sizeof(*hist_cfg), GFP_KERNEL);
+	if (hist_cfg == NULL)
+		return -ENOMEM;
+
+	memset(hist, 0, sizeof(*hist));
+	if (HIST_CONFIG_DMA)
+		ret = omap_request_dma(OMAP24XX_DMA_NO_DEVICE, "DMA_ISP_HIST",
+				       hist_dma_cb, hist, &hist->dma_ch);
+	if (ret) {
+		if (HIST_CONFIG_DMA)
+			dev_warn(isp->dev, "hist: DMA request channel failed. "
+					   "Using PIO only.\n");
+		hist->dma_ch = -1;
+	} else {
+		dev_dbg(isp->dev, "hist: DMA channel = %d\n", hist->dma_ch);
+		hist_dma_config(hist);
+		omap_enable_dma_irq(hist->dma_ch, OMAP_DMA_BLOCK_IRQ);
+	}
+
+	hist->ops = &hist_ops;
+	hist->priv = hist_cfg;
+	hist->event_type = V4L2_EVENT_OMAP3ISP_HIST;
+	hist->isp = isp;
+
+	ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
+	if (ret) {
+		kfree(hist_cfg);
+		if (HIST_USING_DMA(hist))
+			omap_free_dma(hist->dma_ch);
+	}
+
+	return ret;
+}
+
+/*
+ * omap3isp_hist_cleanup - Module cleanup.
+ */
+void omap3isp_hist_cleanup(struct isp_device *isp)
+{
+	if (HIST_USING_DMA(&isp->isp_hist))
+		omap_free_dma(isp->isp_hist.dma_ch);
+	kfree(isp->isp_hist.priv);
+	omap3isp_stat_cleanup(&isp->isp_hist);
+}
diff --git a/drivers/media/platform/omap3isp/isphist.h b/drivers/media/platform/omap3isp/isphist.h
new file mode 100644
index 0000000..0b2a38e
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isphist.h
@@ -0,0 +1,40 @@
+/*
+ * isphist.h
+ *
+ * TI OMAP3 ISP - Histogram module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ *	     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_HIST_H
+#define OMAP3_ISP_HIST_H
+
+#include <linux/omap3isp.h>
+
+#define ISPHIST_IN_BIT_WIDTH_CCDC	10
+
+struct isp_device;
+
+int omap3isp_hist_init(struct isp_device *isp);
+void omap3isp_hist_cleanup(struct isp_device *isp);
+
+#endif /* OMAP3_ISP_HIST */
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
new file mode 100644
index 0000000..1ae1c09
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -0,0 +1,2348 @@
+/*
+ * isppreview.c
+ *
+ * TI OMAP3 ISP driver - Preview module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "isppreview.h"
+
+/* Default values in Office Fluorescent Light for RGBtoRGB Blending */
+static struct omap3isp_prev_rgbtorgb flr_rgb2rgb = {
+	{	/* RGB-RGB Matrix */
+		{0x01E2, 0x0F30, 0x0FEE},
+		{0x0F9B, 0x01AC, 0x0FB9},
+		{0x0FE0, 0x0EC0, 0x0260}
+	},	/* RGB Offset */
+	{0x0000, 0x0000, 0x0000}
+};
+
+/* Default values in Office Fluorescent Light for RGB to YUV Conversion*/
+static struct omap3isp_prev_csc flr_prev_csc = {
+	{	/* CSC Coef Matrix */
+		{66, 129, 25},
+		{-38, -75, 112},
+		{112, -94 , -18}
+	},	/* CSC Offset */
+	{0x0, 0x0, 0x0}
+};
+
+/* Default values in Office Fluorescent Light for CFA Gradient*/
+#define FLR_CFA_GRADTHRS_HORZ	0x28
+#define FLR_CFA_GRADTHRS_VERT	0x28
+
+/* Default values in Office Fluorescent Light for Chroma Suppression*/
+#define FLR_CSUP_GAIN		0x0D
+#define FLR_CSUP_THRES		0xEB
+
+/* Default values in Office Fluorescent Light for Noise Filter*/
+#define FLR_NF_STRGTH		0x03
+
+/* Default values for White Balance */
+#define FLR_WBAL_DGAIN		0x100
+#define FLR_WBAL_COEF		0x20
+
+/* Default values in Office Fluorescent Light for Black Adjustment*/
+#define FLR_BLKADJ_BLUE		0x0
+#define FLR_BLKADJ_GREEN	0x0
+#define FLR_BLKADJ_RED		0x0
+
+#define DEF_DETECT_CORRECT_VAL	0xe
+
+/*
+ * Margins and image size limits.
+ *
+ * The preview engine crops several rows and columns internally depending on
+ * which filters are enabled. To avoid format changes when the filters are
+ * enabled or disabled (which would prevent them from being turned on or off
+ * during streaming), the driver assumes all the filters are enabled when
+ * computing sink crop and source format limits.
+ *
+ * If a filter is disabled, additional cropping is automatically added at the
+ * preview engine input by the driver to avoid overflow at line and frame end.
+ * This is completely transparent for applications.
+ *
+ * Median filter		4 pixels
+ * Noise filter,
+ * Faulty pixels correction	4 pixels, 4 lines
+ * CFA filter			4 pixels, 4 lines in Bayer mode
+ *					  2 lines in other modes
+ * Color suppression		2 pixels
+ * or luma enhancement
+ * -------------------------------------------------------------
+ * Maximum total		14 pixels, 8 lines
+ *
+ * The color suppression and luma enhancement filters are applied after bayer to
+ * YUV conversion. They thus can crop one pixel on the left and one pixel on the
+ * right side of the image without changing the color pattern. When both those
+ * filters are disabled, the driver must crop the two pixels on the same side of
+ * the image to avoid changing the bayer pattern. The left margin is thus set to
+ * 8 pixels and the right margin to 6 pixels.
+ */
+
+#define PREV_MARGIN_LEFT	8
+#define PREV_MARGIN_RIGHT	6
+#define PREV_MARGIN_TOP		4
+#define PREV_MARGIN_BOTTOM	4
+
+#define PREV_MIN_IN_WIDTH	64
+#define PREV_MIN_IN_HEIGHT	8
+#define PREV_MAX_IN_HEIGHT	16384
+
+#define PREV_MIN_OUT_WIDTH		0
+#define PREV_MIN_OUT_HEIGHT		0
+#define PREV_MAX_OUT_WIDTH_REV_1	1280
+#define PREV_MAX_OUT_WIDTH_REV_2	3300
+#define PREV_MAX_OUT_WIDTH_REV_15	4096
+
+/*
+ * Coeficient Tables for the submodules in Preview.
+ * Array is initialised with the values from.the tables text file.
+ */
+
+/*
+ * CFA Filter Coefficient Table
+ *
+ */
+static u32 cfa_coef_table[4][OMAP3ISP_PREV_CFA_BLK_SIZE] = {
+#include "cfa_coef_table.h"
+};
+
+/*
+ * Default Gamma Correction Table - All components
+ */
+static u32 gamma_table[] = {
+#include "gamma_table.h"
+};
+
+/*
+ * Noise Filter Threshold table
+ */
+static u32 noise_filter_table[] = {
+#include "noise_filter_table.h"
+};
+
+/*
+ * Luminance Enhancement Table
+ */
+static u32 luma_enhance_table[] = {
+#include "luma_enhance_table.h"
+};
+
+/*
+ * preview_config_luma_enhancement - Configure the Luminance Enhancement table
+ */
+static void
+preview_config_luma_enhancement(struct isp_prev_device *prev,
+				const struct prev_params *params)
+{
+	struct isp_device *isp = to_isp_device(prev);
+	const struct omap3isp_prev_luma *yt = &params->luma;
+	unsigned int i;
+
+	isp_reg_writel(isp, ISPPRV_YENH_TABLE_ADDR,
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
+	for (i = 0; i < OMAP3ISP_PREV_YENH_TBL_SIZE; i++) {
+		isp_reg_writel(isp, yt->table[i],
+			       OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA);
+	}
+}
+
+/*
+ * preview_enable_luma_enhancement - Enable/disable Luminance Enhancement
+ */
+static void
+preview_enable_luma_enhancement(struct isp_prev_device *prev, bool enable)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	if (enable)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_YNENHEN);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_YNENHEN);
+}
+
+/*
+ * preview_enable_invalaw - Enable/disable Inverse A-Law decompression
+ */
+static void preview_enable_invalaw(struct isp_prev_device *prev, bool enable)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	if (enable)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_WIDTH | ISPPRV_PCR_INVALAW);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_WIDTH | ISPPRV_PCR_INVALAW);
+}
+
+/*
+ * preview_config_hmed - Configure the Horizontal Median Filter
+ */
+static void preview_config_hmed(struct isp_prev_device *prev,
+				const struct prev_params *params)
+{
+	struct isp_device *isp = to_isp_device(prev);
+	const struct omap3isp_prev_hmed *hmed = &params->hmed;
+
+	isp_reg_writel(isp, (hmed->odddist == 1 ? 0 : ISPPRV_HMED_ODDDIST) |
+		       (hmed->evendist == 1 ? 0 : ISPPRV_HMED_EVENDIST) |
+		       (hmed->thres << ISPPRV_HMED_THRESHOLD_SHIFT),
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_HMED);
+}
+
+/*
+ * preview_enable_hmed - Enable/disable the Horizontal Median Filter
+ */
+static void preview_enable_hmed(struct isp_prev_device *prev, bool enable)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	if (enable)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_HMEDEN);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_HMEDEN);
+}
+
+/*
+ * preview_config_cfa - Configure CFA Interpolation for Bayer formats
+ *
+ * The CFA table is organised in four blocks, one per Bayer component. The
+ * hardware expects blocks to follow the Bayer order of the input data, while
+ * the driver stores the table in GRBG order in memory. The blocks need to be
+ * reordered to support non-GRBG Bayer patterns.
+ */
+static void preview_config_cfa(struct isp_prev_device *prev,
+			       const struct prev_params *params)
+{
+	static const unsigned int cfa_coef_order[4][4] = {
+		{ 0, 1, 2, 3 }, /* GRBG */
+		{ 1, 0, 3, 2 }, /* RGGB */
+		{ 2, 3, 0, 1 }, /* BGGR */
+		{ 3, 2, 1, 0 }, /* GBRG */
+	};
+	const unsigned int *order = cfa_coef_order[prev->params.cfa_order];
+	const struct omap3isp_prev_cfa *cfa = &params->cfa;
+	struct isp_device *isp = to_isp_device(prev);
+	unsigned int i;
+	unsigned int j;
+
+	isp_reg_writel(isp,
+		(cfa->gradthrs_vert << ISPPRV_CFA_GRADTH_VER_SHIFT) |
+		(cfa->gradthrs_horz << ISPPRV_CFA_GRADTH_HOR_SHIFT),
+		OMAP3_ISP_IOMEM_PREV, ISPPRV_CFA);
+
+	isp_reg_writel(isp, ISPPRV_CFA_TABLE_ADDR,
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
+
+	for (i = 0; i < 4; ++i) {
+		const __u32 *block = cfa->table[order[i]];
+
+		for (j = 0; j < OMAP3ISP_PREV_CFA_BLK_SIZE; ++j)
+			isp_reg_writel(isp, block[j], OMAP3_ISP_IOMEM_PREV,
+				       ISPPRV_SET_TBL_DATA);
+	}
+}
+
+/*
+ * preview_config_chroma_suppression - Configure Chroma Suppression
+ */
+static void
+preview_config_chroma_suppression(struct isp_prev_device *prev,
+				  const struct prev_params *params)
+{
+	struct isp_device *isp = to_isp_device(prev);
+	const struct omap3isp_prev_csup *cs = &params->csup;
+
+	isp_reg_writel(isp,
+		       cs->gain | (cs->thres << ISPPRV_CSUP_THRES_SHIFT) |
+		       (cs->hypf_en << ISPPRV_CSUP_HPYF_SHIFT),
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_CSUP);
+}
+
+/*
+ * preview_enable_chroma_suppression - Enable/disable Chrominance Suppression
+ */
+static void
+preview_enable_chroma_suppression(struct isp_prev_device *prev, bool enable)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	if (enable)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_SUPEN);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_SUPEN);
+}
+
+/*
+ * preview_config_whitebalance - Configure White Balance parameters
+ *
+ * Coefficient matrix always with default values.
+ */
+static void
+preview_config_whitebalance(struct isp_prev_device *prev,
+			    const struct prev_params *params)
+{
+	struct isp_device *isp = to_isp_device(prev);
+	const struct omap3isp_prev_wbal *wbal = &params->wbal;
+	u32 val;
+
+	isp_reg_writel(isp, wbal->dgain, OMAP3_ISP_IOMEM_PREV, ISPPRV_WB_DGAIN);
+
+	val = wbal->coef0 << ISPPRV_WBGAIN_COEF0_SHIFT;
+	val |= wbal->coef1 << ISPPRV_WBGAIN_COEF1_SHIFT;
+	val |= wbal->coef2 << ISPPRV_WBGAIN_COEF2_SHIFT;
+	val |= wbal->coef3 << ISPPRV_WBGAIN_COEF3_SHIFT;
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_WBGAIN);
+
+	isp_reg_writel(isp,
+		       ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N0_0_SHIFT |
+		       ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N0_1_SHIFT |
+		       ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N0_2_SHIFT |
+		       ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N0_3_SHIFT |
+		       ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N1_0_SHIFT |
+		       ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N1_1_SHIFT |
+		       ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N1_2_SHIFT |
+		       ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N1_3_SHIFT |
+		       ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N2_0_SHIFT |
+		       ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N2_1_SHIFT |
+		       ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N2_2_SHIFT |
+		       ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N2_3_SHIFT |
+		       ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N3_0_SHIFT |
+		       ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N3_1_SHIFT |
+		       ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N3_2_SHIFT |
+		       ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N3_3_SHIFT,
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_WBSEL);
+}
+
+/*
+ * preview_config_blkadj - Configure Black Adjustment
+ */
+static void
+preview_config_blkadj(struct isp_prev_device *prev,
+		      const struct prev_params *params)
+{
+	struct isp_device *isp = to_isp_device(prev);
+	const struct omap3isp_prev_blkadj *blkadj = &params->blkadj;
+
+	isp_reg_writel(isp, (blkadj->blue << ISPPRV_BLKADJOFF_B_SHIFT) |
+		       (blkadj->green << ISPPRV_BLKADJOFF_G_SHIFT) |
+		       (blkadj->red << ISPPRV_BLKADJOFF_R_SHIFT),
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_BLKADJOFF);
+}
+
+/*
+ * preview_config_rgb_blending - Configure RGB-RGB Blending
+ */
+static void
+preview_config_rgb_blending(struct isp_prev_device *prev,
+			    const struct prev_params *params)
+{
+	struct isp_device *isp = to_isp_device(prev);
+	const struct omap3isp_prev_rgbtorgb *rgbrgb = &params->rgb2rgb;
+	u32 val;
+
+	val = (rgbrgb->matrix[0][0] & 0xfff) << ISPPRV_RGB_MAT1_MTX_RR_SHIFT;
+	val |= (rgbrgb->matrix[0][1] & 0xfff) << ISPPRV_RGB_MAT1_MTX_GR_SHIFT;
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT1);
+
+	val = (rgbrgb->matrix[0][2] & 0xfff) << ISPPRV_RGB_MAT2_MTX_BR_SHIFT;
+	val |= (rgbrgb->matrix[1][0] & 0xfff) << ISPPRV_RGB_MAT2_MTX_RG_SHIFT;
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT2);
+
+	val = (rgbrgb->matrix[1][1] & 0xfff) << ISPPRV_RGB_MAT3_MTX_GG_SHIFT;
+	val |= (rgbrgb->matrix[1][2] & 0xfff) << ISPPRV_RGB_MAT3_MTX_BG_SHIFT;
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT3);
+
+	val = (rgbrgb->matrix[2][0] & 0xfff) << ISPPRV_RGB_MAT4_MTX_RB_SHIFT;
+	val |= (rgbrgb->matrix[2][1] & 0xfff) << ISPPRV_RGB_MAT4_MTX_GB_SHIFT;
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT4);
+
+	val = (rgbrgb->matrix[2][2] & 0xfff) << ISPPRV_RGB_MAT5_MTX_BB_SHIFT;
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT5);
+
+	val = (rgbrgb->offset[0] & 0x3ff) << ISPPRV_RGB_OFF1_MTX_OFFR_SHIFT;
+	val |= (rgbrgb->offset[1] & 0x3ff) << ISPPRV_RGB_OFF1_MTX_OFFG_SHIFT;
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_OFF1);
+
+	val = (rgbrgb->offset[2] & 0x3ff) << ISPPRV_RGB_OFF2_MTX_OFFB_SHIFT;
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_OFF2);
+}
+
+/*
+ * preview_config_csc - Configure Color Space Conversion (RGB to YCbYCr)
+ */
+static void
+preview_config_csc(struct isp_prev_device *prev,
+		   const struct prev_params *params)
+{
+	struct isp_device *isp = to_isp_device(prev);
+	const struct omap3isp_prev_csc *csc = &params->csc;
+	u32 val;
+
+	val = (csc->matrix[0][0] & 0x3ff) << ISPPRV_CSC0_RY_SHIFT;
+	val |= (csc->matrix[0][1] & 0x3ff) << ISPPRV_CSC0_GY_SHIFT;
+	val |= (csc->matrix[0][2] & 0x3ff) << ISPPRV_CSC0_BY_SHIFT;
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC0);
+
+	val = (csc->matrix[1][0] & 0x3ff) << ISPPRV_CSC1_RCB_SHIFT;
+	val |= (csc->matrix[1][1] & 0x3ff) << ISPPRV_CSC1_GCB_SHIFT;
+	val |= (csc->matrix[1][2] & 0x3ff) << ISPPRV_CSC1_BCB_SHIFT;
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC1);
+
+	val = (csc->matrix[2][0] & 0x3ff) << ISPPRV_CSC2_RCR_SHIFT;
+	val |= (csc->matrix[2][1] & 0x3ff) << ISPPRV_CSC2_GCR_SHIFT;
+	val |= (csc->matrix[2][2] & 0x3ff) << ISPPRV_CSC2_BCR_SHIFT;
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC2);
+
+	val = (csc->offset[0] & 0xff) << ISPPRV_CSC_OFFSET_Y_SHIFT;
+	val |= (csc->offset[1] & 0xff) << ISPPRV_CSC_OFFSET_CB_SHIFT;
+	val |= (csc->offset[2] & 0xff) << ISPPRV_CSC_OFFSET_CR_SHIFT;
+	isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC_OFFSET);
+}
+
+/*
+ * preview_config_yc_range - Configure the max and min Y and C values
+ */
+static void
+preview_config_yc_range(struct isp_prev_device *prev,
+			const struct prev_params *params)
+{
+	struct isp_device *isp = to_isp_device(prev);
+	const struct omap3isp_prev_yclimit *yc = &params->yclimit;
+
+	isp_reg_writel(isp,
+		       yc->maxC << ISPPRV_SETUP_YC_MAXC_SHIFT |
+		       yc->maxY << ISPPRV_SETUP_YC_MAXY_SHIFT |
+		       yc->minC << ISPPRV_SETUP_YC_MINC_SHIFT |
+		       yc->minY << ISPPRV_SETUP_YC_MINY_SHIFT,
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_SETUP_YC);
+}
+
+/*
+ * preview_config_dcor - Configure Couplet Defect Correction
+ */
+static void
+preview_config_dcor(struct isp_prev_device *prev,
+		    const struct prev_params *params)
+{
+	struct isp_device *isp = to_isp_device(prev);
+	const struct omap3isp_prev_dcor *dcor = &params->dcor;
+
+	isp_reg_writel(isp, dcor->detect_correct[0],
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR0);
+	isp_reg_writel(isp, dcor->detect_correct[1],
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR1);
+	isp_reg_writel(isp, dcor->detect_correct[2],
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR2);
+	isp_reg_writel(isp, dcor->detect_correct[3],
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR3);
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			ISPPRV_PCR_DCCOUP,
+			dcor->couplet_mode_en ? ISPPRV_PCR_DCCOUP : 0);
+}
+
+/*
+ * preview_enable_dcor - Enable/disable Couplet Defect Correction
+ */
+static void preview_enable_dcor(struct isp_prev_device *prev, bool enable)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	if (enable)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_DCOREN);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_DCOREN);
+}
+
+/*
+ * preview_enable_drkframe_capture - Enable/disable Dark Frame Capture
+ */
+static void
+preview_enable_drkframe_capture(struct isp_prev_device *prev, bool enable)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	if (enable)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_DRKFCAP);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_DRKFCAP);
+}
+
+/*
+ * preview_enable_drkframe - Enable/disable Dark Frame Subtraction
+ */
+static void preview_enable_drkframe(struct isp_prev_device *prev, bool enable)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	if (enable)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_DRKFEN);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_DRKFEN);
+}
+
+/*
+ * preview_config_noisefilter - Configure the Noise Filter
+ */
+static void
+preview_config_noisefilter(struct isp_prev_device *prev,
+			   const struct prev_params *params)
+{
+	struct isp_device *isp = to_isp_device(prev);
+	const struct omap3isp_prev_nf *nf = &params->nf;
+	unsigned int i;
+
+	isp_reg_writel(isp, nf->spread, OMAP3_ISP_IOMEM_PREV, ISPPRV_NF);
+	isp_reg_writel(isp, ISPPRV_NF_TABLE_ADDR,
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
+	for (i = 0; i < OMAP3ISP_PREV_NF_TBL_SIZE; i++) {
+		isp_reg_writel(isp, nf->table[i],
+			       OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA);
+	}
+}
+
+/*
+ * preview_enable_noisefilter - Enable/disable the Noise Filter
+ */
+static void
+preview_enable_noisefilter(struct isp_prev_device *prev, bool enable)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	if (enable)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_NFEN);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_NFEN);
+}
+
+/*
+ * preview_config_gammacorrn - Configure the Gamma Correction tables
+ */
+static void
+preview_config_gammacorrn(struct isp_prev_device *prev,
+			  const struct prev_params *params)
+{
+	struct isp_device *isp = to_isp_device(prev);
+	const struct omap3isp_prev_gtables *gt = &params->gamma;
+	unsigned int i;
+
+	isp_reg_writel(isp, ISPPRV_REDGAMMA_TABLE_ADDR,
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
+	for (i = 0; i < OMAP3ISP_PREV_GAMMA_TBL_SIZE; i++)
+		isp_reg_writel(isp, gt->red[i], OMAP3_ISP_IOMEM_PREV,
+			       ISPPRV_SET_TBL_DATA);
+
+	isp_reg_writel(isp, ISPPRV_GREENGAMMA_TABLE_ADDR,
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
+	for (i = 0; i < OMAP3ISP_PREV_GAMMA_TBL_SIZE; i++)
+		isp_reg_writel(isp, gt->green[i], OMAP3_ISP_IOMEM_PREV,
+			       ISPPRV_SET_TBL_DATA);
+
+	isp_reg_writel(isp, ISPPRV_BLUEGAMMA_TABLE_ADDR,
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR);
+	for (i = 0; i < OMAP3ISP_PREV_GAMMA_TBL_SIZE; i++)
+		isp_reg_writel(isp, gt->blue[i], OMAP3_ISP_IOMEM_PREV,
+			       ISPPRV_SET_TBL_DATA);
+}
+
+/*
+ * preview_enable_gammacorrn - Enable/disable Gamma Correction
+ *
+ * When gamma correction is disabled, the module is bypassed and its output is
+ * the 8 MSB of the 10-bit input .
+ */
+static void
+preview_enable_gammacorrn(struct isp_prev_device *prev, bool enable)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	if (enable)
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_GAMMA_BYPASS);
+	else
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_GAMMA_BYPASS);
+}
+
+/*
+ * preview_config_contrast - Configure the Contrast
+ *
+ * Value should be programmed before enabling the module.
+ */
+static void
+preview_config_contrast(struct isp_prev_device *prev,
+			const struct prev_params *params)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT,
+			0xff << ISPPRV_CNT_BRT_CNT_SHIFT,
+			params->contrast << ISPPRV_CNT_BRT_CNT_SHIFT);
+}
+
+/*
+ * preview_config_brightness - Configure the Brightness
+ */
+static void
+preview_config_brightness(struct isp_prev_device *prev,
+			  const struct prev_params *params)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT,
+			0xff << ISPPRV_CNT_BRT_BRT_SHIFT,
+			params->brightness << ISPPRV_CNT_BRT_BRT_SHIFT);
+}
+
+/*
+ * preview_update_contrast - Updates the contrast.
+ * @contrast: Pointer to hold the current programmed contrast value.
+ *
+ * Value should be programmed before enabling the module.
+ */
+static void
+preview_update_contrast(struct isp_prev_device *prev, u8 contrast)
+{
+	struct prev_params *params;
+	unsigned long flags;
+
+	spin_lock_irqsave(&prev->params.lock, flags);
+	params = (prev->params.active & OMAP3ISP_PREV_CONTRAST)
+	       ? &prev->params.params[0] : &prev->params.params[1];
+
+	if (params->contrast != (contrast * ISPPRV_CONTRAST_UNITS)) {
+		params->contrast = contrast * ISPPRV_CONTRAST_UNITS;
+		params->update |= OMAP3ISP_PREV_CONTRAST;
+	}
+	spin_unlock_irqrestore(&prev->params.lock, flags);
+}
+
+/*
+ * preview_update_brightness - Updates the brightness in preview module.
+ * @brightness: Pointer to hold the current programmed brightness value.
+ *
+ */
+static void
+preview_update_brightness(struct isp_prev_device *prev, u8 brightness)
+{
+	struct prev_params *params;
+	unsigned long flags;
+
+	spin_lock_irqsave(&prev->params.lock, flags);
+	params = (prev->params.active & OMAP3ISP_PREV_BRIGHTNESS)
+	       ? &prev->params.params[0] : &prev->params.params[1];
+
+	if (params->brightness != (brightness * ISPPRV_BRIGHT_UNITS)) {
+		params->brightness = brightness * ISPPRV_BRIGHT_UNITS;
+		params->update |= OMAP3ISP_PREV_BRIGHTNESS;
+	}
+	spin_unlock_irqrestore(&prev->params.lock, flags);
+}
+
+static u32
+preview_params_lock(struct isp_prev_device *prev, u32 update, bool shadow)
+{
+	u32 active = prev->params.active;
+
+	if (shadow) {
+		/* Mark all shadow parameters we are going to touch as busy. */
+		prev->params.params[0].busy |= ~active & update;
+		prev->params.params[1].busy |= active & update;
+	} else {
+		/* Mark all active parameters we are going to touch as busy. */
+		update = (prev->params.params[0].update & active)
+		       | (prev->params.params[1].update & ~active);
+
+		prev->params.params[0].busy |= active & update;
+		prev->params.params[1].busy |= ~active & update;
+	}
+
+	return update;
+}
+
+static void
+preview_params_unlock(struct isp_prev_device *prev, u32 update, bool shadow)
+{
+	u32 active = prev->params.active;
+
+	if (shadow) {
+		/* Set the update flag for shadow parameters that have been
+		 * updated and clear the busy flag for all shadow parameters.
+		 */
+		prev->params.params[0].update |= (~active & update);
+		prev->params.params[1].update |= (active & update);
+		prev->params.params[0].busy &= active;
+		prev->params.params[1].busy &= ~active;
+	} else {
+		/* Clear the update flag for active parameters that have been
+		 * applied and the busy flag for all active parameters.
+		 */
+		prev->params.params[0].update &= ~(active & update);
+		prev->params.params[1].update &= ~(~active & update);
+		prev->params.params[0].busy &= ~active;
+		prev->params.params[1].busy &= active;
+	}
+}
+
+static void preview_params_switch(struct isp_prev_device *prev)
+{
+	u32 to_switch;
+
+	/* Switch active parameters with updated shadow parameters when the
+	 * shadow parameter has been updated and neither the active not the
+	 * shadow parameter is busy.
+	 */
+	to_switch = (prev->params.params[0].update & ~prev->params.active)
+		  | (prev->params.params[1].update & prev->params.active);
+	to_switch &= ~(prev->params.params[0].busy |
+		       prev->params.params[1].busy);
+	if (to_switch == 0)
+		return;
+
+	prev->params.active ^= to_switch;
+
+	/* Remove the update flag for the shadow copy of parameters we have
+	 * switched.
+	 */
+	prev->params.params[0].update &= ~(~prev->params.active & to_switch);
+	prev->params.params[1].update &= ~(prev->params.active & to_switch);
+}
+
+/* preview parameters update structure */
+struct preview_update {
+	void (*config)(struct isp_prev_device *, const struct prev_params *);
+	void (*enable)(struct isp_prev_device *, bool);
+	unsigned int param_offset;
+	unsigned int param_size;
+	unsigned int config_offset;
+	bool skip;
+};
+
+/* Keep the array indexed by the OMAP3ISP_PREV_* bit number. */
+static const struct preview_update update_attrs[] = {
+	/* OMAP3ISP_PREV_LUMAENH */ {
+		preview_config_luma_enhancement,
+		preview_enable_luma_enhancement,
+		offsetof(struct prev_params, luma),
+		FIELD_SIZEOF(struct prev_params, luma),
+		offsetof(struct omap3isp_prev_update_config, luma),
+	}, /* OMAP3ISP_PREV_INVALAW */ {
+		NULL,
+		preview_enable_invalaw,
+	}, /* OMAP3ISP_PREV_HRZ_MED */ {
+		preview_config_hmed,
+		preview_enable_hmed,
+		offsetof(struct prev_params, hmed),
+		FIELD_SIZEOF(struct prev_params, hmed),
+		offsetof(struct omap3isp_prev_update_config, hmed),
+	}, /* OMAP3ISP_PREV_CFA */ {
+		preview_config_cfa,
+		NULL,
+		offsetof(struct prev_params, cfa),
+		FIELD_SIZEOF(struct prev_params, cfa),
+		offsetof(struct omap3isp_prev_update_config, cfa),
+	}, /* OMAP3ISP_PREV_CHROMA_SUPP */ {
+		preview_config_chroma_suppression,
+		preview_enable_chroma_suppression,
+		offsetof(struct prev_params, csup),
+		FIELD_SIZEOF(struct prev_params, csup),
+		offsetof(struct omap3isp_prev_update_config, csup),
+	}, /* OMAP3ISP_PREV_WB */ {
+		preview_config_whitebalance,
+		NULL,
+		offsetof(struct prev_params, wbal),
+		FIELD_SIZEOF(struct prev_params, wbal),
+		offsetof(struct omap3isp_prev_update_config, wbal),
+	}, /* OMAP3ISP_PREV_BLKADJ */ {
+		preview_config_blkadj,
+		NULL,
+		offsetof(struct prev_params, blkadj),
+		FIELD_SIZEOF(struct prev_params, blkadj),
+		offsetof(struct omap3isp_prev_update_config, blkadj),
+	}, /* OMAP3ISP_PREV_RGB2RGB */ {
+		preview_config_rgb_blending,
+		NULL,
+		offsetof(struct prev_params, rgb2rgb),
+		FIELD_SIZEOF(struct prev_params, rgb2rgb),
+		offsetof(struct omap3isp_prev_update_config, rgb2rgb),
+	}, /* OMAP3ISP_PREV_COLOR_CONV */ {
+		preview_config_csc,
+		NULL,
+		offsetof(struct prev_params, csc),
+		FIELD_SIZEOF(struct prev_params, csc),
+		offsetof(struct omap3isp_prev_update_config, csc),
+	}, /* OMAP3ISP_PREV_YC_LIMIT */ {
+		preview_config_yc_range,
+		NULL,
+		offsetof(struct prev_params, yclimit),
+		FIELD_SIZEOF(struct prev_params, yclimit),
+		offsetof(struct omap3isp_prev_update_config, yclimit),
+	}, /* OMAP3ISP_PREV_DEFECT_COR */ {
+		preview_config_dcor,
+		preview_enable_dcor,
+		offsetof(struct prev_params, dcor),
+		FIELD_SIZEOF(struct prev_params, dcor),
+		offsetof(struct omap3isp_prev_update_config, dcor),
+	}, /* Previously OMAP3ISP_PREV_GAMMABYPASS, not used anymore */ {
+		NULL,
+		NULL,
+	}, /* OMAP3ISP_PREV_DRK_FRM_CAPTURE */ {
+		NULL,
+		preview_enable_drkframe_capture,
+	}, /* OMAP3ISP_PREV_DRK_FRM_SUBTRACT */ {
+		NULL,
+		preview_enable_drkframe,
+	}, /* OMAP3ISP_PREV_LENS_SHADING */ {
+		NULL,
+		preview_enable_drkframe,
+	}, /* OMAP3ISP_PREV_NF */ {
+		preview_config_noisefilter,
+		preview_enable_noisefilter,
+		offsetof(struct prev_params, nf),
+		FIELD_SIZEOF(struct prev_params, nf),
+		offsetof(struct omap3isp_prev_update_config, nf),
+	}, /* OMAP3ISP_PREV_GAMMA */ {
+		preview_config_gammacorrn,
+		preview_enable_gammacorrn,
+		offsetof(struct prev_params, gamma),
+		FIELD_SIZEOF(struct prev_params, gamma),
+		offsetof(struct omap3isp_prev_update_config, gamma),
+	}, /* OMAP3ISP_PREV_CONTRAST */ {
+		preview_config_contrast,
+		NULL,
+		0, 0, 0, true,
+	}, /* OMAP3ISP_PREV_BRIGHTNESS */ {
+		preview_config_brightness,
+		NULL,
+		0, 0, 0, true,
+	},
+};
+
+/*
+ * preview_config - Copy and update local structure with userspace preview
+ *                  configuration.
+ * @prev: ISP preview engine
+ * @cfg: Configuration
+ *
+ * Return zero if success or -EFAULT if the configuration can't be copied from
+ * userspace.
+ */
+static int preview_config(struct isp_prev_device *prev,
+			  struct omap3isp_prev_update_config *cfg)
+{
+	unsigned long flags;
+	unsigned int i;
+	int rval = 0;
+	u32 update;
+	u32 active;
+
+	if (cfg->update == 0)
+		return 0;
+
+	/* Mark the shadow parameters we're going to update as busy. */
+	spin_lock_irqsave(&prev->params.lock, flags);
+	preview_params_lock(prev, cfg->update, true);
+	active = prev->params.active;
+	spin_unlock_irqrestore(&prev->params.lock, flags);
+
+	update = 0;
+
+	for (i = 0; i < ARRAY_SIZE(update_attrs); i++) {
+		const struct preview_update *attr = &update_attrs[i];
+		struct prev_params *params;
+		unsigned int bit = 1 << i;
+
+		if (attr->skip || !(cfg->update & bit))
+			continue;
+
+		params = &prev->params.params[!!(active & bit)];
+
+		if (cfg->flag & bit) {
+			void __user *from = *(void * __user *)
+				((void *)cfg + attr->config_offset);
+			void *to = (void *)params + attr->param_offset;
+			size_t size = attr->param_size;
+
+			if (to && from && size) {
+				if (copy_from_user(to, from, size)) {
+					rval = -EFAULT;
+					break;
+				}
+			}
+			params->features |= bit;
+		} else {
+			params->features &= ~bit;
+		}
+
+		update |= bit;
+	}
+
+	spin_lock_irqsave(&prev->params.lock, flags);
+	preview_params_unlock(prev, update, true);
+	preview_params_switch(prev);
+	spin_unlock_irqrestore(&prev->params.lock, flags);
+
+	return rval;
+}
+
+/*
+ * preview_setup_hw - Setup preview registers and/or internal memory
+ * @prev: pointer to preview private structure
+ * @update: Bitmask of parameters to setup
+ * @active: Bitmask of parameters active in set 0
+ * Note: can be called from interrupt context
+ * Return none
+ */
+static void preview_setup_hw(struct isp_prev_device *prev, u32 update,
+			     u32 active)
+{
+	unsigned int i;
+	u32 features;
+
+	if (update == 0)
+		return;
+
+	features = (prev->params.params[0].features & active)
+		 | (prev->params.params[1].features & ~active);
+
+	for (i = 0; i < ARRAY_SIZE(update_attrs); i++) {
+		const struct preview_update *attr = &update_attrs[i];
+		struct prev_params *params;
+		unsigned int bit = 1 << i;
+
+		if (!(update & bit))
+			continue;
+
+		params = &prev->params.params[!(active & bit)];
+
+		if (params->features & bit) {
+			if (attr->config)
+				attr->config(prev, params);
+			if (attr->enable)
+				attr->enable(prev, true);
+		} else {
+			if (attr->enable)
+				attr->enable(prev, false);
+		}
+	}
+}
+
+/*
+ * preview_config_ycpos - Configure byte layout of YUV image.
+ * @mode: Indicates the required byte layout.
+ */
+static void
+preview_config_ycpos(struct isp_prev_device *prev,
+		     enum v4l2_mbus_pixelcode pixelcode)
+{
+	struct isp_device *isp = to_isp_device(prev);
+	enum preview_ycpos_mode mode;
+
+	switch (pixelcode) {
+	case V4L2_MBUS_FMT_YUYV8_1X16:
+		mode = YCPOS_CrYCbY;
+		break;
+	case V4L2_MBUS_FMT_UYVY8_1X16:
+		mode = YCPOS_YCrYCb;
+		break;
+	default:
+		return;
+	}
+
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			ISPPRV_PCR_YCPOS_CrYCbY,
+			mode << ISPPRV_PCR_YCPOS_SHIFT);
+}
+
+/*
+ * preview_config_averager - Enable / disable / configure averager
+ * @average: Average value to be configured.
+ */
+static void preview_config_averager(struct isp_prev_device *prev, u8 average)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	isp_reg_writel(isp, ISPPRV_AVE_EVENDIST_2 << ISPPRV_AVE_EVENDIST_SHIFT |
+		       ISPPRV_AVE_ODDDIST_2 << ISPPRV_AVE_ODDDIST_SHIFT |
+		       average, OMAP3_ISP_IOMEM_PREV, ISPPRV_AVE);
+}
+
+
+/*
+ * preview_config_input_format - Configure the input format
+ * @prev: The preview engine
+ * @format: Format on the preview engine sink pad
+ *
+ * Enable and configure CFA interpolation for Bayer formats and disable it for
+ * greyscale formats.
+ *
+ * The CFA table is organised in four blocks, one per Bayer component. The
+ * hardware expects blocks to follow the Bayer order of the input data, while
+ * the driver stores the table in GRBG order in memory. The blocks need to be
+ * reordered to support non-GRBG Bayer patterns.
+ */
+static void preview_config_input_format(struct isp_prev_device *prev,
+					const struct v4l2_mbus_framefmt *format)
+{
+	struct isp_device *isp = to_isp_device(prev);
+	struct prev_params *params;
+
+	switch (format->code) {
+	case V4L2_MBUS_FMT_SGRBG10_1X10:
+		prev->params.cfa_order = 0;
+		break;
+	case V4L2_MBUS_FMT_SRGGB10_1X10:
+		prev->params.cfa_order = 1;
+		break;
+	case V4L2_MBUS_FMT_SBGGR10_1X10:
+		prev->params.cfa_order = 2;
+		break;
+	case V4L2_MBUS_FMT_SGBRG10_1X10:
+		prev->params.cfa_order = 3;
+		break;
+	default:
+		/* Disable CFA for non-Bayer formats. */
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_CFAEN);
+		return;
+	}
+
+	isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_CFAEN);
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			ISPPRV_PCR_CFAFMT_MASK, ISPPRV_PCR_CFAFMT_BAYER);
+
+	params = (prev->params.active & OMAP3ISP_PREV_CFA)
+	       ? &prev->params.params[0] : &prev->params.params[1];
+
+	preview_config_cfa(prev, params);
+}
+
+/*
+ * preview_config_input_size - Configure the input frame size
+ *
+ * The preview engine crops several rows and columns internally depending on
+ * which processing blocks are enabled. The driver assumes all those blocks are
+ * enabled when reporting source pad formats to userspace. If this assumption is
+ * not true, rows and columns must be manually cropped at the preview engine
+ * input to avoid overflows at the end of lines and frames.
+ *
+ * See the explanation at the PREV_MARGIN_* definitions for more details.
+ */
+static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
+{
+	const struct v4l2_mbus_framefmt *format = &prev->formats[PREV_PAD_SINK];
+	struct isp_device *isp = to_isp_device(prev);
+	unsigned int sph = prev->crop.left;
+	unsigned int eph = prev->crop.left + prev->crop.width - 1;
+	unsigned int slv = prev->crop.top;
+	unsigned int elv = prev->crop.top + prev->crop.height - 1;
+	u32 features;
+
+	if (format->code != V4L2_MBUS_FMT_Y10_1X10) {
+		sph -= 2;
+		eph += 2;
+		slv -= 2;
+		elv += 2;
+	}
+
+	features = (prev->params.params[0].features & active)
+		 | (prev->params.params[1].features & ~active);
+
+	if (features & (OMAP3ISP_PREV_DEFECT_COR | OMAP3ISP_PREV_NF)) {
+		sph -= 2;
+		eph += 2;
+		slv -= 2;
+		elv += 2;
+	}
+	if (features & OMAP3ISP_PREV_HRZ_MED) {
+		sph -= 2;
+		eph += 2;
+	}
+	if (features & (OMAP3ISP_PREV_CHROMA_SUPP | OMAP3ISP_PREV_LUMAENH))
+		sph -= 2;
+
+	isp_reg_writel(isp, (sph << ISPPRV_HORZ_INFO_SPH_SHIFT) | eph,
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_HORZ_INFO);
+	isp_reg_writel(isp, (slv << ISPPRV_VERT_INFO_SLV_SHIFT) | elv,
+		       OMAP3_ISP_IOMEM_PREV, ISPPRV_VERT_INFO);
+}
+
+/*
+ * preview_config_inlineoffset - Configures the Read address line offset.
+ * @prev: Preview module
+ * @offset: Line offset
+ *
+ * According to the TRM, the line offset must be aligned on a 32 bytes boundary.
+ * However, a hardware bug requires the memory start address to be aligned on a
+ * 64 bytes boundary, so the offset probably should be aligned on 64 bytes as
+ * well.
+ */
+static void
+preview_config_inlineoffset(struct isp_prev_device *prev, u32 offset)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	isp_reg_writel(isp, offset & 0xffff, OMAP3_ISP_IOMEM_PREV,
+		       ISPPRV_RADR_OFFSET);
+}
+
+/*
+ * preview_set_inaddr - Sets memory address of input frame.
+ * @addr: 32bit memory address aligned on 32byte boundary.
+ *
+ * Configures the memory address from which the input frame is to be read.
+ */
+static void preview_set_inaddr(struct isp_prev_device *prev, u32 addr)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_PREV, ISPPRV_RSDR_ADDR);
+}
+
+/*
+ * preview_config_outlineoffset - Configures the Write address line offset.
+ * @offset: Line Offset for the preview output.
+ *
+ * The offset must be a multiple of 32 bytes.
+ */
+static void preview_config_outlineoffset(struct isp_prev_device *prev,
+				    u32 offset)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	isp_reg_writel(isp, offset & 0xffff, OMAP3_ISP_IOMEM_PREV,
+		       ISPPRV_WADD_OFFSET);
+}
+
+/*
+ * preview_set_outaddr - Sets the memory address to store output frame
+ * @addr: 32bit memory address aligned on 32byte boundary.
+ *
+ * Configures the memory address to which the output frame is written.
+ */
+static void preview_set_outaddr(struct isp_prev_device *prev, u32 addr)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_PREV, ISPPRV_WSDR_ADDR);
+}
+
+static void preview_adjust_bandwidth(struct isp_prev_device *prev)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(&prev->subdev.entity);
+	struct isp_device *isp = to_isp_device(prev);
+	const struct v4l2_mbus_framefmt *ifmt = &prev->formats[PREV_PAD_SINK];
+	unsigned long l3_ick = pipe->l3_ick;
+	struct v4l2_fract *timeperframe;
+	unsigned int cycles_per_frame;
+	unsigned int requests_per_frame;
+	unsigned int cycles_per_request;
+	unsigned int minimum;
+	unsigned int maximum;
+	unsigned int value;
+
+	if (prev->input != PREVIEW_INPUT_MEMORY) {
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP,
+			    ISPSBL_SDR_REQ_PRV_EXP_MASK);
+		return;
+	}
+
+	/* Compute the minimum number of cycles per request, based on the
+	 * pipeline maximum data rate. This is an absolute lower bound if we
+	 * don't want SBL overflows, so round the value up.
+	 */
+	cycles_per_request = div_u64((u64)l3_ick / 2 * 256 + pipe->max_rate - 1,
+				     pipe->max_rate);
+	minimum = DIV_ROUND_UP(cycles_per_request, 32);
+
+	/* Compute the maximum number of cycles per request, based on the
+	 * requested frame rate. This is a soft upper bound to achieve a frame
+	 * rate equal or higher than the requested value, so round the value
+	 * down.
+	 */
+	timeperframe = &pipe->max_timeperframe;
+
+	requests_per_frame = DIV_ROUND_UP(ifmt->width * 2, 256) * ifmt->height;
+	cycles_per_frame = div_u64((u64)l3_ick * timeperframe->numerator,
+				   timeperframe->denominator);
+	cycles_per_request = cycles_per_frame / requests_per_frame;
+
+	maximum = cycles_per_request / 32;
+
+	value = max(minimum, maximum);
+
+	dev_dbg(isp->dev, "%s: cycles per request = %u\n", __func__, value);
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP,
+			ISPSBL_SDR_REQ_PRV_EXP_MASK,
+			value << ISPSBL_SDR_REQ_PRV_EXP_SHIFT);
+}
+
+/*
+ * omap3isp_preview_busy - Gets busy state of preview module.
+ */
+int omap3isp_preview_busy(struct isp_prev_device *prev)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	return isp_reg_readl(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR)
+		& ISPPRV_PCR_BUSY;
+}
+
+/*
+ * omap3isp_preview_restore_context - Restores the values of preview registers
+ */
+void omap3isp_preview_restore_context(struct isp_device *isp)
+{
+	struct isp_prev_device *prev = &isp->isp_prev;
+	const u32 update = OMAP3ISP_PREV_FEATURES_END - 1;
+
+	prev->params.params[0].update = prev->params.active & update;
+	prev->params.params[1].update = ~prev->params.active & update;
+
+	preview_setup_hw(prev, update, prev->params.active);
+
+	prev->params.params[0].update = 0;
+	prev->params.params[1].update = 0;
+}
+
+/*
+ * preview_print_status - Dump preview module registers to the kernel log
+ */
+#define PREV_PRINT_REGISTER(isp, name)\
+	dev_dbg(isp->dev, "###PRV " #name "=0x%08x\n", \
+		isp_reg_readl(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_##name))
+
+static void preview_print_status(struct isp_prev_device *prev)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	dev_dbg(isp->dev, "-------------Preview Register dump----------\n");
+
+	PREV_PRINT_REGISTER(isp, PCR);
+	PREV_PRINT_REGISTER(isp, HORZ_INFO);
+	PREV_PRINT_REGISTER(isp, VERT_INFO);
+	PREV_PRINT_REGISTER(isp, RSDR_ADDR);
+	PREV_PRINT_REGISTER(isp, RADR_OFFSET);
+	PREV_PRINT_REGISTER(isp, DSDR_ADDR);
+	PREV_PRINT_REGISTER(isp, DRKF_OFFSET);
+	PREV_PRINT_REGISTER(isp, WSDR_ADDR);
+	PREV_PRINT_REGISTER(isp, WADD_OFFSET);
+	PREV_PRINT_REGISTER(isp, AVE);
+	PREV_PRINT_REGISTER(isp, HMED);
+	PREV_PRINT_REGISTER(isp, NF);
+	PREV_PRINT_REGISTER(isp, WB_DGAIN);
+	PREV_PRINT_REGISTER(isp, WBGAIN);
+	PREV_PRINT_REGISTER(isp, WBSEL);
+	PREV_PRINT_REGISTER(isp, CFA);
+	PREV_PRINT_REGISTER(isp, BLKADJOFF);
+	PREV_PRINT_REGISTER(isp, RGB_MAT1);
+	PREV_PRINT_REGISTER(isp, RGB_MAT2);
+	PREV_PRINT_REGISTER(isp, RGB_MAT3);
+	PREV_PRINT_REGISTER(isp, RGB_MAT4);
+	PREV_PRINT_REGISTER(isp, RGB_MAT5);
+	PREV_PRINT_REGISTER(isp, RGB_OFF1);
+	PREV_PRINT_REGISTER(isp, RGB_OFF2);
+	PREV_PRINT_REGISTER(isp, CSC0);
+	PREV_PRINT_REGISTER(isp, CSC1);
+	PREV_PRINT_REGISTER(isp, CSC2);
+	PREV_PRINT_REGISTER(isp, CSC_OFFSET);
+	PREV_PRINT_REGISTER(isp, CNT_BRT);
+	PREV_PRINT_REGISTER(isp, CSUP);
+	PREV_PRINT_REGISTER(isp, SETUP_YC);
+	PREV_PRINT_REGISTER(isp, SET_TBL_ADDR);
+	PREV_PRINT_REGISTER(isp, CDC_THR0);
+	PREV_PRINT_REGISTER(isp, CDC_THR1);
+	PREV_PRINT_REGISTER(isp, CDC_THR2);
+	PREV_PRINT_REGISTER(isp, CDC_THR3);
+
+	dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+/*
+ * preview_init_params - init image processing parameters.
+ * @prev: pointer to previewer private structure
+ */
+static void preview_init_params(struct isp_prev_device *prev)
+{
+	struct prev_params *params;
+	unsigned int i;
+
+	spin_lock_init(&prev->params.lock);
+
+	prev->params.active = ~0;
+	prev->params.params[0].busy = 0;
+	prev->params.params[0].update = OMAP3ISP_PREV_FEATURES_END - 1;
+	prev->params.params[1].busy = 0;
+	prev->params.params[1].update = 0;
+
+	params = &prev->params.params[0];
+
+	/* Init values */
+	params->contrast = ISPPRV_CONTRAST_DEF * ISPPRV_CONTRAST_UNITS;
+	params->brightness = ISPPRV_BRIGHT_DEF * ISPPRV_BRIGHT_UNITS;
+	params->cfa.format = OMAP3ISP_CFAFMT_BAYER;
+	memcpy(params->cfa.table, cfa_coef_table,
+	       sizeof(params->cfa.table));
+	params->cfa.gradthrs_horz = FLR_CFA_GRADTHRS_HORZ;
+	params->cfa.gradthrs_vert = FLR_CFA_GRADTHRS_VERT;
+	params->csup.gain = FLR_CSUP_GAIN;
+	params->csup.thres = FLR_CSUP_THRES;
+	params->csup.hypf_en = 0;
+	memcpy(params->luma.table, luma_enhance_table,
+	       sizeof(params->luma.table));
+	params->nf.spread = FLR_NF_STRGTH;
+	memcpy(params->nf.table, noise_filter_table, sizeof(params->nf.table));
+	params->dcor.couplet_mode_en = 1;
+	for (i = 0; i < OMAP3ISP_PREV_DETECT_CORRECT_CHANNELS; i++)
+		params->dcor.detect_correct[i] = DEF_DETECT_CORRECT_VAL;
+	memcpy(params->gamma.blue, gamma_table, sizeof(params->gamma.blue));
+	memcpy(params->gamma.green, gamma_table, sizeof(params->gamma.green));
+	memcpy(params->gamma.red, gamma_table, sizeof(params->gamma.red));
+	params->wbal.dgain = FLR_WBAL_DGAIN;
+	params->wbal.coef0 = FLR_WBAL_COEF;
+	params->wbal.coef1 = FLR_WBAL_COEF;
+	params->wbal.coef2 = FLR_WBAL_COEF;
+	params->wbal.coef3 = FLR_WBAL_COEF;
+	params->blkadj.red = FLR_BLKADJ_RED;
+	params->blkadj.green = FLR_BLKADJ_GREEN;
+	params->blkadj.blue = FLR_BLKADJ_BLUE;
+	params->rgb2rgb = flr_rgb2rgb;
+	params->csc = flr_prev_csc;
+	params->yclimit.minC = ISPPRV_YC_MIN;
+	params->yclimit.maxC = ISPPRV_YC_MAX;
+	params->yclimit.minY = ISPPRV_YC_MIN;
+	params->yclimit.maxY = ISPPRV_YC_MAX;
+
+	params->features = OMAP3ISP_PREV_CFA | OMAP3ISP_PREV_DEFECT_COR
+			 | OMAP3ISP_PREV_NF | OMAP3ISP_PREV_GAMMA
+			 | OMAP3ISP_PREV_BLKADJ | OMAP3ISP_PREV_YC_LIMIT
+			 | OMAP3ISP_PREV_RGB2RGB | OMAP3ISP_PREV_COLOR_CONV
+			 | OMAP3ISP_PREV_WB | OMAP3ISP_PREV_BRIGHTNESS
+			 | OMAP3ISP_PREV_CONTRAST;
+}
+
+/*
+ * preview_max_out_width - Handle previewer hardware ouput limitations
+ * @isp_revision : ISP revision
+ * returns maximum width output for current isp revision
+ */
+static unsigned int preview_max_out_width(struct isp_prev_device *prev)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	switch (isp->revision) {
+	case ISP_REVISION_1_0:
+		return PREV_MAX_OUT_WIDTH_REV_1;
+
+	case ISP_REVISION_2_0:
+	default:
+		return PREV_MAX_OUT_WIDTH_REV_2;
+
+	case ISP_REVISION_15_0:
+		return PREV_MAX_OUT_WIDTH_REV_15;
+	}
+}
+
+static void preview_configure(struct isp_prev_device *prev)
+{
+	struct isp_device *isp = to_isp_device(prev);
+	struct v4l2_mbus_framefmt *format;
+	unsigned long flags;
+	u32 update;
+	u32 active;
+
+	spin_lock_irqsave(&prev->params.lock, flags);
+	/* Mark all active parameters we are going to touch as busy. */
+	update = preview_params_lock(prev, 0, false);
+	active = prev->params.active;
+	spin_unlock_irqrestore(&prev->params.lock, flags);
+
+	/* PREV_PAD_SINK */
+	format = &prev->formats[PREV_PAD_SINK];
+
+	preview_adjust_bandwidth(prev);
+
+	preview_config_input_format(prev, format);
+	preview_config_input_size(prev, active);
+
+	if (prev->input == PREVIEW_INPUT_CCDC)
+		preview_config_inlineoffset(prev, 0);
+	else
+		preview_config_inlineoffset(prev,
+				ALIGN(format->width, 0x20) * 2);
+
+	preview_setup_hw(prev, update, active);
+
+	/* PREV_PAD_SOURCE */
+	format = &prev->formats[PREV_PAD_SOURCE];
+
+	if (prev->output & PREVIEW_OUTPUT_MEMORY)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_SDRPORT);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_SDRPORT);
+
+	if (prev->output & PREVIEW_OUTPUT_RESIZER)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_RSZPORT);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_RSZPORT);
+
+	if (prev->output & PREVIEW_OUTPUT_MEMORY)
+		preview_config_outlineoffset(prev,
+				ALIGN(format->width, 0x10) * 2);
+
+	preview_config_averager(prev, 0);
+	preview_config_ycpos(prev, format->code);
+
+	spin_lock_irqsave(&prev->params.lock, flags);
+	preview_params_unlock(prev, update, false);
+	spin_unlock_irqrestore(&prev->params.lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+static void preview_enable_oneshot(struct isp_prev_device *prev)
+{
+	struct isp_device *isp = to_isp_device(prev);
+
+	/* The PCR.SOURCE bit is automatically reset to 0 when the PCR.ENABLE
+	 * bit is set. As the preview engine is used in single-shot mode, we
+	 * need to set PCR.SOURCE before enabling the preview engine.
+	 */
+	if (prev->input == PREVIEW_INPUT_MEMORY)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+			    ISPPRV_PCR_SOURCE);
+
+	isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+		    ISPPRV_PCR_EN | ISPPRV_PCR_ONESHOT);
+}
+
+void omap3isp_preview_isr_frame_sync(struct isp_prev_device *prev)
+{
+	/*
+	 * If ISP_VIDEO_DMAQUEUE_QUEUED is set, DMA queue had an underrun
+	 * condition, the module was paused and now we have a buffer queued
+	 * on the output again. Restart the pipeline if running in continuous
+	 * mode.
+	 */
+	if (prev->state == ISP_PIPELINE_STREAM_CONTINUOUS &&
+	    prev->video_out.dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED) {
+		preview_enable_oneshot(prev);
+		isp_video_dmaqueue_flags_clr(&prev->video_out);
+	}
+}
+
+static void preview_isr_buffer(struct isp_prev_device *prev)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(&prev->subdev.entity);
+	struct isp_buffer *buffer;
+	int restart = 0;
+
+	if (prev->input == PREVIEW_INPUT_MEMORY) {
+		buffer = omap3isp_video_buffer_next(&prev->video_in);
+		if (buffer != NULL)
+			preview_set_inaddr(prev, buffer->isp_addr);
+		pipe->state |= ISP_PIPELINE_IDLE_INPUT;
+	}
+
+	if (prev->output & PREVIEW_OUTPUT_MEMORY) {
+		buffer = omap3isp_video_buffer_next(&prev->video_out);
+		if (buffer != NULL) {
+			preview_set_outaddr(prev, buffer->isp_addr);
+			restart = 1;
+		}
+		pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
+	}
+
+	switch (prev->state) {
+	case ISP_PIPELINE_STREAM_SINGLESHOT:
+		if (isp_pipeline_ready(pipe))
+			omap3isp_pipeline_set_stream(pipe,
+						ISP_PIPELINE_STREAM_SINGLESHOT);
+		break;
+
+	case ISP_PIPELINE_STREAM_CONTINUOUS:
+		/* If an underrun occurs, the video queue operation handler will
+		 * restart the preview engine. Otherwise restart it immediately.
+		 */
+		if (restart)
+			preview_enable_oneshot(prev);
+		break;
+
+	case ISP_PIPELINE_STREAM_STOPPED:
+	default:
+		return;
+	}
+}
+
+/*
+ * omap3isp_preview_isr - ISP preview engine interrupt handler
+ *
+ * Manage the preview engine video buffers and configure shadowed registers.
+ */
+void omap3isp_preview_isr(struct isp_prev_device *prev)
+{
+	unsigned long flags;
+	u32 update;
+	u32 active;
+
+	if (omap3isp_module_sync_is_stopping(&prev->wait, &prev->stopping))
+		return;
+
+	spin_lock_irqsave(&prev->params.lock, flags);
+	preview_params_switch(prev);
+	update = preview_params_lock(prev, 0, false);
+	active = prev->params.active;
+	spin_unlock_irqrestore(&prev->params.lock, flags);
+
+	preview_setup_hw(prev, update, active);
+	preview_config_input_size(prev, active);
+
+	if (prev->input == PREVIEW_INPUT_MEMORY ||
+	    prev->output & PREVIEW_OUTPUT_MEMORY)
+		preview_isr_buffer(prev);
+	else if (prev->state == ISP_PIPELINE_STREAM_CONTINUOUS)
+		preview_enable_oneshot(prev);
+
+	spin_lock_irqsave(&prev->params.lock, flags);
+	preview_params_unlock(prev, update, false);
+	spin_unlock_irqrestore(&prev->params.lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP video operations
+ */
+
+static int preview_video_queue(struct isp_video *video,
+			       struct isp_buffer *buffer)
+{
+	struct isp_prev_device *prev = &video->isp->isp_prev;
+
+	if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		preview_set_inaddr(prev, buffer->isp_addr);
+
+	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		preview_set_outaddr(prev, buffer->isp_addr);
+
+	return 0;
+}
+
+static const struct isp_video_operations preview_video_ops = {
+	.queue = preview_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+/*
+ * preview_s_ctrl - Handle set control subdev method
+ * @ctrl: pointer to v4l2 control structure
+ */
+static int preview_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct isp_prev_device *prev =
+		container_of(ctrl->handler, struct isp_prev_device, ctrls);
+
+	switch (ctrl->id) {
+	case V4L2_CID_BRIGHTNESS:
+		preview_update_brightness(prev, ctrl->val);
+		break;
+	case V4L2_CID_CONTRAST:
+		preview_update_contrast(prev, ctrl->val);
+		break;
+	}
+
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops preview_ctrl_ops = {
+	.s_ctrl = preview_s_ctrl,
+};
+
+/*
+ * preview_ioctl - Handle preview module private ioctl's
+ * @prev: pointer to preview context structure
+ * @cmd: configuration command
+ * @arg: configuration argument
+ * return -EINVAL or zero on success
+ */
+static long preview_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+	struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+
+	switch (cmd) {
+	case VIDIOC_OMAP3ISP_PRV_CFG:
+		return preview_config(prev, arg);
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+/*
+ * preview_set_stream - Enable/Disable streaming on preview subdev
+ * @sd    : pointer to v4l2 subdev structure
+ * @enable: 1 == Enable, 0 == Disable
+ * return -EINVAL or zero on success
+ */
+static int preview_set_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+	struct isp_video *video_out = &prev->video_out;
+	struct isp_device *isp = to_isp_device(prev);
+	struct device *dev = to_device(prev);
+
+	if (prev->state == ISP_PIPELINE_STREAM_STOPPED) {
+		if (enable == ISP_PIPELINE_STREAM_STOPPED)
+			return 0;
+
+		omap3isp_subclk_enable(isp, OMAP3_ISP_SUBCLK_PREVIEW);
+		preview_configure(prev);
+		atomic_set(&prev->stopping, 0);
+		preview_print_status(prev);
+	}
+
+	switch (enable) {
+	case ISP_PIPELINE_STREAM_CONTINUOUS:
+		if (prev->output & PREVIEW_OUTPUT_MEMORY)
+			omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_PREVIEW_WRITE);
+
+		if (video_out->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED ||
+		    !(prev->output & PREVIEW_OUTPUT_MEMORY))
+			preview_enable_oneshot(prev);
+
+		isp_video_dmaqueue_flags_clr(video_out);
+		break;
+
+	case ISP_PIPELINE_STREAM_SINGLESHOT:
+		if (prev->input == PREVIEW_INPUT_MEMORY)
+			omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_PREVIEW_READ);
+		if (prev->output & PREVIEW_OUTPUT_MEMORY)
+			omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_PREVIEW_WRITE);
+
+		preview_enable_oneshot(prev);
+		break;
+
+	case ISP_PIPELINE_STREAM_STOPPED:
+		if (omap3isp_module_sync_idle(&sd->entity, &prev->wait,
+					      &prev->stopping))
+			dev_dbg(dev, "%s: stop timeout.\n", sd->name);
+		omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_PREVIEW_READ);
+		omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_PREVIEW_WRITE);
+		omap3isp_subclk_disable(isp, OMAP3_ISP_SUBCLK_PREVIEW);
+		isp_video_dmaqueue_flags_clr(video_out);
+		break;
+	}
+
+	prev->state = enable;
+	return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__preview_get_format(struct isp_prev_device *prev, struct v4l2_subdev_fh *fh,
+		     unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_format(fh, pad);
+	else
+		return &prev->formats[pad];
+}
+
+static struct v4l2_rect *
+__preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_fh *fh,
+		   enum v4l2_subdev_format_whence which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_crop(fh, PREV_PAD_SINK);
+	else
+		return &prev->crop;
+}
+
+/* previewer format descriptions */
+static const unsigned int preview_input_fmts[] = {
+	V4L2_MBUS_FMT_Y10_1X10,
+	V4L2_MBUS_FMT_SGRBG10_1X10,
+	V4L2_MBUS_FMT_SRGGB10_1X10,
+	V4L2_MBUS_FMT_SBGGR10_1X10,
+	V4L2_MBUS_FMT_SGBRG10_1X10,
+};
+
+static const unsigned int preview_output_fmts[] = {
+	V4L2_MBUS_FMT_UYVY8_1X16,
+	V4L2_MBUS_FMT_YUYV8_1X16,
+};
+
+/*
+ * preview_try_format - Validate a format
+ * @prev: ISP preview engine
+ * @fh: V4L2 subdev file handle
+ * @pad: pad number
+ * @fmt: format to be validated
+ * @which: try/active format selector
+ *
+ * Validate and adjust the given format for the given pad based on the preview
+ * engine limits and the format and crop rectangles on other pads.
+ */
+static void preview_try_format(struct isp_prev_device *prev,
+			       struct v4l2_subdev_fh *fh, unsigned int pad,
+			       struct v4l2_mbus_framefmt *fmt,
+			       enum v4l2_subdev_format_whence which)
+{
+	enum v4l2_mbus_pixelcode pixelcode;
+	struct v4l2_rect *crop;
+	unsigned int i;
+
+	switch (pad) {
+	case PREV_PAD_SINK:
+		/* When reading data from the CCDC, the input size has already
+		 * been mangled by the CCDC output pad so it can be accepted
+		 * as-is.
+		 *
+		 * When reading data from memory, clamp the requested width and
+		 * height. The TRM doesn't specify a minimum input height, make
+		 * sure we got enough lines to enable the noise filter and color
+		 * filter array interpolation.
+		 */
+		if (prev->input == PREVIEW_INPUT_MEMORY) {
+			fmt->width = clamp_t(u32, fmt->width, PREV_MIN_IN_WIDTH,
+					     preview_max_out_width(prev));
+			fmt->height = clamp_t(u32, fmt->height,
+					      PREV_MIN_IN_HEIGHT,
+					      PREV_MAX_IN_HEIGHT);
+		}
+
+		fmt->colorspace = V4L2_COLORSPACE_SRGB;
+
+		for (i = 0; i < ARRAY_SIZE(preview_input_fmts); i++) {
+			if (fmt->code == preview_input_fmts[i])
+				break;
+		}
+
+		/* If not found, use SGRBG10 as default */
+		if (i >= ARRAY_SIZE(preview_input_fmts))
+			fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+		break;
+
+	case PREV_PAD_SOURCE:
+		pixelcode = fmt->code;
+		*fmt = *__preview_get_format(prev, fh, PREV_PAD_SINK, which);
+
+		switch (pixelcode) {
+		case V4L2_MBUS_FMT_YUYV8_1X16:
+		case V4L2_MBUS_FMT_UYVY8_1X16:
+			fmt->code = pixelcode;
+			break;
+
+		default:
+			fmt->code = V4L2_MBUS_FMT_YUYV8_1X16;
+			break;
+		}
+
+		/* The preview module output size is configurable through the
+		 * averager (horizontal scaling by 1/1, 1/2, 1/4 or 1/8). This
+		 * is not supported yet, hardcode the output size to the crop
+		 * rectangle size.
+		 */
+		crop = __preview_get_crop(prev, fh, which);
+		fmt->width = crop->width;
+		fmt->height = crop->height;
+
+		fmt->colorspace = V4L2_COLORSPACE_JPEG;
+		break;
+	}
+
+	fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * preview_try_crop - Validate a crop rectangle
+ * @prev: ISP preview engine
+ * @sink: format on the sink pad
+ * @crop: crop rectangle to be validated
+ *
+ * The preview engine crops lines and columns for its internal operation,
+ * depending on which filters are enabled. Enforce minimum crop margins to
+ * handle that transparently for userspace.
+ *
+ * See the explanation at the PREV_MARGIN_* definitions for more details.
+ */
+static void preview_try_crop(struct isp_prev_device *prev,
+			     const struct v4l2_mbus_framefmt *sink,
+			     struct v4l2_rect *crop)
+{
+	unsigned int left = PREV_MARGIN_LEFT;
+	unsigned int right = sink->width - PREV_MARGIN_RIGHT;
+	unsigned int top = PREV_MARGIN_TOP;
+	unsigned int bottom = sink->height - PREV_MARGIN_BOTTOM;
+
+	/* When processing data on-the-fly from the CCDC, at least 2 pixels must
+	 * be cropped from the left and right sides of the image. As we don't
+	 * know which filters will be enabled, increase the left and right
+	 * margins by two.
+	 */
+	if (prev->input == PREVIEW_INPUT_CCDC) {
+		left += 2;
+		right -= 2;
+	}
+
+	/* Restrict left/top to even values to keep the Bayer pattern. */
+	crop->left &= ~1;
+	crop->top &= ~1;
+
+	crop->left = clamp_t(u32, crop->left, left, right - PREV_MIN_OUT_WIDTH);
+	crop->top = clamp_t(u32, crop->top, top, bottom - PREV_MIN_OUT_HEIGHT);
+	crop->width = clamp_t(u32, crop->width, PREV_MIN_OUT_WIDTH,
+			      right - crop->left);
+	crop->height = clamp_t(u32, crop->height, PREV_MIN_OUT_HEIGHT,
+			       bottom - crop->top);
+}
+
+/*
+ * preview_enum_mbus_code - Handle pixel format enumeration
+ * @sd     : pointer to v4l2 subdev structure
+ * @fh     : V4L2 subdev file handle
+ * @code   : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int preview_enum_mbus_code(struct v4l2_subdev *sd,
+				  struct v4l2_subdev_fh *fh,
+				  struct v4l2_subdev_mbus_code_enum *code)
+{
+	switch (code->pad) {
+	case PREV_PAD_SINK:
+		if (code->index >= ARRAY_SIZE(preview_input_fmts))
+			return -EINVAL;
+
+		code->code = preview_input_fmts[code->index];
+		break;
+	case PREV_PAD_SOURCE:
+		if (code->index >= ARRAY_SIZE(preview_output_fmts))
+			return -EINVAL;
+
+		code->code = preview_output_fmts[code->index];
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int preview_enum_frame_size(struct v4l2_subdev *sd,
+				   struct v4l2_subdev_fh *fh,
+				   struct v4l2_subdev_frame_size_enum *fse)
+{
+	struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt format;
+
+	if (fse->index != 0)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = 1;
+	format.height = 1;
+	preview_try_format(prev, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+	fse->min_width = format.width;
+	fse->min_height = format.height;
+
+	if (format.code != fse->code)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = -1;
+	format.height = -1;
+	preview_try_format(prev, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+	fse->max_width = format.width;
+	fse->max_height = format.height;
+
+	return 0;
+}
+
+/*
+ * preview_get_selection - Retrieve a selection rectangle on a pad
+ * @sd: ISP preview V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangles are the crop rectangles on the sink pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int preview_get_selection(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_fh *fh,
+				 struct v4l2_subdev_selection *sel)
+{
+	struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	if (sel->pad != PREV_PAD_SINK)
+		return -EINVAL;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = INT_MAX;
+		sel->r.height = INT_MAX;
+
+		format = __preview_get_format(prev, fh, PREV_PAD_SINK,
+					      sel->which);
+		preview_try_crop(prev, format, &sel->r);
+		break;
+
+	case V4L2_SEL_TGT_CROP:
+		sel->r = *__preview_get_crop(prev, fh, sel->which);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * preview_set_selection - Set a selection rectangle on a pad
+ * @sd: ISP preview V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangle is the actual crop rectangle on the sink pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int preview_set_selection(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_fh *fh,
+				 struct v4l2_subdev_selection *sel)
+{
+	struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	if (sel->target != V4L2_SEL_TGT_CROP ||
+	    sel->pad != PREV_PAD_SINK)
+		return -EINVAL;
+
+	/* The crop rectangle can't be changed while streaming. */
+	if (prev->state != ISP_PIPELINE_STREAM_STOPPED)
+		return -EBUSY;
+
+	/* Modifying the crop rectangle always changes the format on the source
+	 * pad. If the KEEP_CONFIG flag is set, just return the current crop
+	 * rectangle.
+	 */
+	if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
+		sel->r = *__preview_get_crop(prev, fh, sel->which);
+		return 0;
+	}
+
+	format = __preview_get_format(prev, fh, PREV_PAD_SINK, sel->which);
+	preview_try_crop(prev, format, &sel->r);
+	*__preview_get_crop(prev, fh, sel->which) = sel->r;
+
+	/* Update the source format. */
+	format = __preview_get_format(prev, fh, PREV_PAD_SOURCE, sel->which);
+	preview_try_format(prev, fh, PREV_PAD_SOURCE, format, sel->which);
+
+	return 0;
+}
+
+/*
+ * preview_get_format - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int preview_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			      struct v4l2_subdev_format *fmt)
+{
+	struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __preview_get_format(prev, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	fmt->format = *format;
+	return 0;
+}
+
+/*
+ * preview_set_format - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int preview_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			      struct v4l2_subdev_format *fmt)
+{
+	struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+	struct v4l2_rect *crop;
+
+	format = __preview_get_format(prev, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	preview_try_format(prev, fh, fmt->pad, &fmt->format, fmt->which);
+	*format = fmt->format;
+
+	/* Propagate the format from sink to source */
+	if (fmt->pad == PREV_PAD_SINK) {
+		/* Reset the crop rectangle. */
+		crop = __preview_get_crop(prev, fh, fmt->which);
+		crop->left = 0;
+		crop->top = 0;
+		crop->width = fmt->format.width;
+		crop->height = fmt->format.height;
+
+		preview_try_crop(prev, &fmt->format, crop);
+
+		/* Update the source format. */
+		format = __preview_get_format(prev, fh, PREV_PAD_SOURCE,
+					      fmt->which);
+		preview_try_format(prev, fh, PREV_PAD_SOURCE, format,
+				   fmt->which);
+	}
+
+	return 0;
+}
+
+/*
+ * preview_init_formats - Initialize formats on all pads
+ * @sd: ISP preview V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int preview_init_formats(struct v4l2_subdev *sd,
+				struct v4l2_subdev_fh *fh)
+{
+	struct v4l2_subdev_format format;
+
+	memset(&format, 0, sizeof(format));
+	format.pad = PREV_PAD_SINK;
+	format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+	format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+	format.format.width = 4096;
+	format.format.height = 4096;
+	preview_set_format(sd, fh, &format);
+
+	return 0;
+}
+
+/* subdev core operations */
+static const struct v4l2_subdev_core_ops preview_v4l2_core_ops = {
+	.ioctl = preview_ioctl,
+};
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops preview_v4l2_video_ops = {
+	.s_stream = preview_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops preview_v4l2_pad_ops = {
+	.enum_mbus_code = preview_enum_mbus_code,
+	.enum_frame_size = preview_enum_frame_size,
+	.get_fmt = preview_get_format,
+	.set_fmt = preview_set_format,
+	.get_selection = preview_get_selection,
+	.set_selection = preview_set_selection,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops preview_v4l2_ops = {
+	.core = &preview_v4l2_core_ops,
+	.video = &preview_v4l2_video_ops,
+	.pad = &preview_v4l2_pad_ops,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops preview_v4l2_internal_ops = {
+	.open = preview_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * preview_link_setup - Setup previewer connections.
+ * @entity : Pointer to media entity structure
+ * @local  : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags  : Link flags
+ * return -EINVAL or zero on success
+ */
+static int preview_link_setup(struct media_entity *entity,
+			      const struct media_pad *local,
+			      const struct media_pad *remote, u32 flags)
+{
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+	struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+
+	switch (local->index | media_entity_type(remote->entity)) {
+	case PREV_PAD_SINK | MEDIA_ENT_T_DEVNODE:
+		/* read from memory */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (prev->input == PREVIEW_INPUT_CCDC)
+				return -EBUSY;
+			prev->input = PREVIEW_INPUT_MEMORY;
+		} else {
+			if (prev->input == PREVIEW_INPUT_MEMORY)
+				prev->input = PREVIEW_INPUT_NONE;
+		}
+		break;
+
+	case PREV_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+		/* read from ccdc */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (prev->input == PREVIEW_INPUT_MEMORY)
+				return -EBUSY;
+			prev->input = PREVIEW_INPUT_CCDC;
+		} else {
+			if (prev->input == PREVIEW_INPUT_CCDC)
+				prev->input = PREVIEW_INPUT_NONE;
+		}
+		break;
+
+	/*
+	 * The ISP core doesn't support pipelines with multiple video outputs.
+	 * Revisit this when it will be implemented, and return -EBUSY for now.
+	 */
+
+	case PREV_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
+		/* write to memory */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (prev->output & ~PREVIEW_OUTPUT_MEMORY)
+				return -EBUSY;
+			prev->output |= PREVIEW_OUTPUT_MEMORY;
+		} else {
+			prev->output &= ~PREVIEW_OUTPUT_MEMORY;
+		}
+		break;
+
+	case PREV_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+		/* write to resizer */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (prev->output & ~PREVIEW_OUTPUT_RESIZER)
+				return -EBUSY;
+			prev->output |= PREVIEW_OUTPUT_RESIZER;
+		} else {
+			prev->output &= ~PREVIEW_OUTPUT_RESIZER;
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations preview_media_ops = {
+	.link_setup = preview_link_setup,
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+void omap3isp_preview_unregister_entities(struct isp_prev_device *prev)
+{
+	v4l2_device_unregister_subdev(&prev->subdev);
+	omap3isp_video_unregister(&prev->video_in);
+	omap3isp_video_unregister(&prev->video_out);
+}
+
+int omap3isp_preview_register_entities(struct isp_prev_device *prev,
+	struct v4l2_device *vdev)
+{
+	int ret;
+
+	/* Register the subdev and video nodes. */
+	ret = v4l2_device_register_subdev(vdev, &prev->subdev);
+	if (ret < 0)
+		goto error;
+
+	ret = omap3isp_video_register(&prev->video_in, vdev);
+	if (ret < 0)
+		goto error;
+
+	ret = omap3isp_video_register(&prev->video_out, vdev);
+	if (ret < 0)
+		goto error;
+
+	return 0;
+
+error:
+	omap3isp_preview_unregister_entities(prev);
+	return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP previewer initialisation and cleanup
+ */
+
+/*
+ * preview_init_entities - Initialize subdev and media entity.
+ * @prev : Pointer to preview structure
+ * return -ENOMEM or zero on success
+ */
+static int preview_init_entities(struct isp_prev_device *prev)
+{
+	struct v4l2_subdev *sd = &prev->subdev;
+	struct media_pad *pads = prev->pads;
+	struct media_entity *me = &sd->entity;
+	int ret;
+
+	prev->input = PREVIEW_INPUT_NONE;
+
+	v4l2_subdev_init(sd, &preview_v4l2_ops);
+	sd->internal_ops = &preview_v4l2_internal_ops;
+	strlcpy(sd->name, "OMAP3 ISP preview", sizeof(sd->name));
+	sd->grp_id = 1 << 16;	/* group ID for isp subdevs */
+	v4l2_set_subdevdata(sd, prev);
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	v4l2_ctrl_handler_init(&prev->ctrls, 2);
+	v4l2_ctrl_new_std(&prev->ctrls, &preview_ctrl_ops, V4L2_CID_BRIGHTNESS,
+			  ISPPRV_BRIGHT_LOW, ISPPRV_BRIGHT_HIGH,
+			  ISPPRV_BRIGHT_STEP, ISPPRV_BRIGHT_DEF);
+	v4l2_ctrl_new_std(&prev->ctrls, &preview_ctrl_ops, V4L2_CID_CONTRAST,
+			  ISPPRV_CONTRAST_LOW, ISPPRV_CONTRAST_HIGH,
+			  ISPPRV_CONTRAST_STEP, ISPPRV_CONTRAST_DEF);
+	v4l2_ctrl_handler_setup(&prev->ctrls);
+	sd->ctrl_handler = &prev->ctrls;
+
+	pads[PREV_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	pads[PREV_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+	me->ops = &preview_media_ops;
+	ret = media_entity_init(me, PREV_PADS_NUM, pads, 0);
+	if (ret < 0)
+		return ret;
+
+	preview_init_formats(sd, NULL);
+
+	/* According to the OMAP34xx TRM, video buffers need to be aligned on a
+	 * 32 bytes boundary. However, an undocumented hardware bug requires a
+	 * 64 bytes boundary at the preview engine input.
+	 */
+	prev->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	prev->video_in.ops = &preview_video_ops;
+	prev->video_in.isp = to_isp_device(prev);
+	prev->video_in.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3;
+	prev->video_in.bpl_alignment = 64;
+	prev->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	prev->video_out.ops = &preview_video_ops;
+	prev->video_out.isp = to_isp_device(prev);
+	prev->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3;
+	prev->video_out.bpl_alignment = 32;
+
+	ret = omap3isp_video_init(&prev->video_in, "preview");
+	if (ret < 0)
+		goto error_video_in;
+
+	ret = omap3isp_video_init(&prev->video_out, "preview");
+	if (ret < 0)
+		goto error_video_out;
+
+	/* Connect the video nodes to the previewer subdev. */
+	ret = media_entity_create_link(&prev->video_in.video.entity, 0,
+			&prev->subdev.entity, PREV_PAD_SINK, 0);
+	if (ret < 0)
+		goto error_link;
+
+	ret = media_entity_create_link(&prev->subdev.entity, PREV_PAD_SOURCE,
+			&prev->video_out.video.entity, 0, 0);
+	if (ret < 0)
+		goto error_link;
+
+	return 0;
+
+error_link:
+	omap3isp_video_cleanup(&prev->video_out);
+error_video_out:
+	omap3isp_video_cleanup(&prev->video_in);
+error_video_in:
+	media_entity_cleanup(&prev->subdev.entity);
+	return ret;
+}
+
+/*
+ * omap3isp_preview_init - Previewer initialization.
+ * @dev : Pointer to ISP device
+ * return -ENOMEM or zero on success
+ */
+int omap3isp_preview_init(struct isp_device *isp)
+{
+	struct isp_prev_device *prev = &isp->isp_prev;
+
+	init_waitqueue_head(&prev->wait);
+
+	preview_init_params(prev);
+
+	return preview_init_entities(prev);
+}
+
+void omap3isp_preview_cleanup(struct isp_device *isp)
+{
+	struct isp_prev_device *prev = &isp->isp_prev;
+
+	v4l2_ctrl_handler_free(&prev->ctrls);
+	omap3isp_video_cleanup(&prev->video_in);
+	omap3isp_video_cleanup(&prev->video_out);
+	media_entity_cleanup(&prev->subdev.entity);
+}
diff --git a/drivers/media/platform/omap3isp/isppreview.h b/drivers/media/platform/omap3isp/isppreview.h
new file mode 100644
index 0000000..f669234
--- /dev/null
+++ b/drivers/media/platform/omap3isp/isppreview.h
@@ -0,0 +1,174 @@
+/*
+ * isppreview.h
+ *
+ * TI OMAP3 ISP - Preview module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_PREVIEW_H
+#define OMAP3_ISP_PREVIEW_H
+
+#include <linux/omap3isp.h>
+#include <linux/types.h>
+#include <media/v4l2-ctrls.h>
+
+#include "ispvideo.h"
+
+#define ISPPRV_BRIGHT_STEP		0x1
+#define ISPPRV_BRIGHT_DEF		0x0
+#define ISPPRV_BRIGHT_LOW		0x0
+#define ISPPRV_BRIGHT_HIGH		0xFF
+#define ISPPRV_BRIGHT_UNITS		0x1
+
+#define ISPPRV_CONTRAST_STEP		0x1
+#define ISPPRV_CONTRAST_DEF		0x10
+#define ISPPRV_CONTRAST_LOW		0x0
+#define ISPPRV_CONTRAST_HIGH		0xFF
+#define ISPPRV_CONTRAST_UNITS		0x1
+
+/* Additional features not listed in linux/omap3isp.h */
+#define OMAP3ISP_PREV_CONTRAST		(1 << 17)
+#define OMAP3ISP_PREV_BRIGHTNESS	(1 << 18)
+#define OMAP3ISP_PREV_FEATURES_END	(1 << 19)
+
+enum preview_input_entity {
+	PREVIEW_INPUT_NONE,
+	PREVIEW_INPUT_CCDC,
+	PREVIEW_INPUT_MEMORY,
+};
+
+#define PREVIEW_OUTPUT_RESIZER		(1 << 1)
+#define PREVIEW_OUTPUT_MEMORY		(1 << 2)
+
+/* Configure byte layout of YUV image */
+enum preview_ycpos_mode {
+	YCPOS_YCrYCb = 0,
+	YCPOS_YCbYCr = 1,
+	YCPOS_CbYCrY = 2,
+	YCPOS_CrYCbY = 3
+};
+
+/*
+ * struct prev_params - Structure for all configuration
+ * @busy: Bitmask of busy parameters (being updated or used)
+ * @update: Bitmask of the parameters to be updated
+ * @features: Set of features enabled.
+ * @cfa: CFA coefficients.
+ * @csup: Chroma suppression coefficients.
+ * @luma: Luma enhancement coefficients.
+ * @nf: Noise filter coefficients.
+ * @dcor: Noise filter coefficients.
+ * @gamma: Gamma coefficients.
+ * @wbal: White Balance parameters.
+ * @blkadj: Black adjustment parameters.
+ * @rgb2rgb: RGB blending parameters.
+ * @csc: Color space conversion (RGB to YCbCr) parameters.
+ * @hmed: Horizontal median filter.
+ * @yclimit: YC limits parameters.
+ * @contrast: Contrast.
+ * @brightness: Brightness.
+ */
+struct prev_params {
+	u32 busy;
+	u32 update;
+	u32 features;
+	struct omap3isp_prev_cfa cfa;
+	struct omap3isp_prev_csup csup;
+	struct omap3isp_prev_luma luma;
+	struct omap3isp_prev_nf nf;
+	struct omap3isp_prev_dcor dcor;
+	struct omap3isp_prev_gtables gamma;
+	struct omap3isp_prev_wbal wbal;
+	struct omap3isp_prev_blkadj blkadj;
+	struct omap3isp_prev_rgbtorgb rgb2rgb;
+	struct omap3isp_prev_csc csc;
+	struct omap3isp_prev_hmed hmed;
+	struct omap3isp_prev_yclimit yclimit;
+	u8 contrast;
+	u8 brightness;
+};
+
+/* Sink and source previewer pads */
+#define PREV_PAD_SINK			0
+#define PREV_PAD_SOURCE			1
+#define PREV_PADS_NUM			2
+
+/*
+ * struct isp_prev_device - Structure for storing ISP Preview module information
+ * @subdev: V4L2 subdevice
+ * @pads: Media entity pads
+ * @formats: Active formats at the subdev pad
+ * @crop: Active crop rectangle
+ * @input: Module currently connected to the input pad
+ * @output: Bitmask of the active output
+ * @video_in: Input video entity
+ * @video_out: Output video entity
+ * @params.params : Active and shadow parameters sets
+ * @params.active: Bitmask of parameters active in set 0
+ * @params.lock: Parameters lock, protects params.active and params.shadow
+ * @underrun: Whether the preview entity has queued buffers on the output
+ * @state: Current preview pipeline state
+ *
+ * This structure is used to store the OMAP ISP Preview module Information.
+ */
+struct isp_prev_device {
+	struct v4l2_subdev subdev;
+	struct media_pad pads[PREV_PADS_NUM];
+	struct v4l2_mbus_framefmt formats[PREV_PADS_NUM];
+	struct v4l2_rect crop;
+
+	struct v4l2_ctrl_handler ctrls;
+
+	enum preview_input_entity input;
+	unsigned int output;
+	struct isp_video video_in;
+	struct isp_video video_out;
+
+	struct {
+		unsigned int cfa_order;
+		struct prev_params params[2];
+		u32 active;
+		spinlock_t lock;
+	} params;
+
+	enum isp_pipeline_stream_state state;
+	wait_queue_head_t wait;
+	atomic_t stopping;
+};
+
+struct isp_device;
+
+int omap3isp_preview_init(struct isp_device *isp);
+void omap3isp_preview_cleanup(struct isp_device *isp);
+
+int omap3isp_preview_register_entities(struct isp_prev_device *prv,
+				       struct v4l2_device *vdev);
+void omap3isp_preview_unregister_entities(struct isp_prev_device *prv);
+
+void omap3isp_preview_isr_frame_sync(struct isp_prev_device *prev);
+void omap3isp_preview_isr(struct isp_prev_device *prev);
+
+int omap3isp_preview_busy(struct isp_prev_device *isp_prev);
+
+void omap3isp_preview_restore_context(struct isp_device *isp);
+
+#endif	/* OMAP3_ISP_PREVIEW_H */
diff --git a/drivers/media/platform/omap3isp/ispqueue.c b/drivers/media/platform/omap3isp/ispqueue.c
new file mode 100644
index 0000000..15bf3ea
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispqueue.c
@@ -0,0 +1,1158 @@
+/*
+ * ispqueue.c
+ *
+ * TI OMAP3 ISP - Video buffers queue handling
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/poll.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "ispqueue.h"
+
+/* -----------------------------------------------------------------------------
+ * Video buffers management
+ */
+
+/*
+ * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
+ *
+ * The typical operation required here is Cache Invalidation across
+ * the (user space) buffer address range. And this _must_ be done
+ * at QBUF stage (and *only* at QBUF).
+ *
+ * We try to use optimal cache invalidation function:
+ * - dmac_map_area:
+ *    - used when the number of pages are _low_.
+ *    - it becomes quite slow as the number of pages increase.
+ *       - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
+ *       - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
+ *
+ * - flush_cache_all:
+ *    - used when the number of pages are _high_.
+ *    - time taken in the range of 500-900 us.
+ *    - has a higher penalty but, as whole dcache + icache is invalidated
+ */
+/*
+ * FIXME: dmac_inv_range crashes randomly on the user space buffer
+ *        address. Fall back to flush_cache_all for now.
+ */
+#define ISP_CACHE_FLUSH_PAGES_MAX       0
+
+static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
+{
+	if (buf->skip_cache)
+		return;
+
+	if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
+	    buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
+		flush_cache_all();
+	else {
+		dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
+			      DMA_FROM_DEVICE);
+		outer_inv_range(buf->vbuf.m.userptr,
+				buf->vbuf.m.userptr + buf->vbuf.length);
+	}
+}
+
+/*
+ * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
+ *
+ * Lock the VMAs underlying the given buffer into memory. This avoids the
+ * userspace buffer mapping from being swapped out, making VIPT cache handling
+ * easier.
+ *
+ * Note that the pages will not be freed as the buffers have been locked to
+ * memory using by a call to get_user_pages(), but the userspace mapping could
+ * still disappear if the VMAs are not locked. This is caused by the memory
+ * management code trying to be as lock-less as possible, which results in the
+ * userspace mapping manager not finding out that the pages are locked under
+ * some conditions.
+ */
+static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
+{
+	struct vm_area_struct *vma;
+	unsigned long start;
+	unsigned long end;
+	int ret = 0;
+
+	if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
+		return 0;
+
+	/* We can be called from workqueue context if the current task dies to
+	 * unlock the VMAs. In that case there's no current memory management
+	 * context so unlocking can't be performed, but the VMAs have been or
+	 * are getting destroyed anyway so it doesn't really matter.
+	 */
+	if (!current || !current->mm)
+		return lock ? -EINVAL : 0;
+
+	start = buf->vbuf.m.userptr;
+	end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
+
+	down_write(&current->mm->mmap_sem);
+	spin_lock(&current->mm->page_table_lock);
+
+	do {
+		vma = find_vma(current->mm, start);
+		if (vma == NULL) {
+			ret = -EFAULT;
+			goto out;
+		}
+
+		if (lock)
+			vma->vm_flags |= VM_LOCKED;
+		else
+			vma->vm_flags &= ~VM_LOCKED;
+
+		start = vma->vm_end + 1;
+	} while (vma->vm_end < end);
+
+	if (lock)
+		buf->vm_flags |= VM_LOCKED;
+	else
+		buf->vm_flags &= ~VM_LOCKED;
+
+out:
+	spin_unlock(&current->mm->page_table_lock);
+	up_write(&current->mm->mmap_sem);
+	return ret;
+}
+
+/*
+ * isp_video_buffer_sglist_kernel - Build a scatter list for a vmalloc'ed buffer
+ *
+ * Iterate over the vmalloc'ed area and create a scatter list entry for every
+ * page.
+ */
+static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf)
+{
+	struct scatterlist *sglist;
+	unsigned int npages;
+	unsigned int i;
+	void *addr;
+
+	addr = buf->vaddr;
+	npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
+
+	sglist = vmalloc(npages * sizeof(*sglist));
+	if (sglist == NULL)
+		return -ENOMEM;
+
+	sg_init_table(sglist, npages);
+
+	for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
+		struct page *page = vmalloc_to_page(addr);
+
+		if (page == NULL || PageHighMem(page)) {
+			vfree(sglist);
+			return -EINVAL;
+		}
+
+		sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
+	}
+
+	buf->sglen = npages;
+	buf->sglist = sglist;
+
+	return 0;
+}
+
+/*
+ * isp_video_buffer_sglist_user - Build a scatter list for a userspace buffer
+ *
+ * Walk the buffer pages list and create a 1:1 mapping to a scatter list.
+ */
+static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf)
+{
+	struct scatterlist *sglist;
+	unsigned int offset = buf->offset;
+	unsigned int i;
+
+	sglist = vmalloc(buf->npages * sizeof(*sglist));
+	if (sglist == NULL)
+		return -ENOMEM;
+
+	sg_init_table(sglist, buf->npages);
+
+	for (i = 0; i < buf->npages; ++i) {
+		if (PageHighMem(buf->pages[i])) {
+			vfree(sglist);
+			return -EINVAL;
+		}
+
+		sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset,
+			    offset);
+		offset = 0;
+	}
+
+	buf->sglen = buf->npages;
+	buf->sglist = sglist;
+
+	return 0;
+}
+
+/*
+ * isp_video_buffer_sglist_pfnmap - Build a scatter list for a VM_PFNMAP buffer
+ *
+ * Create a scatter list of physically contiguous pages starting at the buffer
+ * memory physical address.
+ */
+static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf)
+{
+	struct scatterlist *sglist;
+	unsigned int offset = buf->offset;
+	unsigned long pfn = buf->paddr >> PAGE_SHIFT;
+	unsigned int i;
+
+	sglist = vmalloc(buf->npages * sizeof(*sglist));
+	if (sglist == NULL)
+		return -ENOMEM;
+
+	sg_init_table(sglist, buf->npages);
+
+	for (i = 0; i < buf->npages; ++i, ++pfn) {
+		sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset,
+			    offset);
+		/* PFNMAP buffers will not get DMA-mapped, set the DMA address
+		 * manually.
+		 */
+		sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset;
+		offset = 0;
+	}
+
+	buf->sglen = buf->npages;
+	buf->sglist = sglist;
+
+	return 0;
+}
+
+/*
+ * isp_video_buffer_cleanup - Release pages for a userspace VMA.
+ *
+ * Release pages locked by a call isp_video_buffer_prepare_user and free the
+ * pages table.
+ */
+static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
+{
+	enum dma_data_direction direction;
+	unsigned int i;
+
+	if (buf->queue->ops->buffer_cleanup)
+		buf->queue->ops->buffer_cleanup(buf);
+
+	if (!(buf->vm_flags & VM_PFNMAP)) {
+		direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
+			  ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+		dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen,
+			     direction);
+	}
+
+	vfree(buf->sglist);
+	buf->sglist = NULL;
+	buf->sglen = 0;
+
+	if (buf->pages != NULL) {
+		isp_video_buffer_lock_vma(buf, 0);
+
+		for (i = 0; i < buf->npages; ++i)
+			page_cache_release(buf->pages[i]);
+
+		vfree(buf->pages);
+		buf->pages = NULL;
+	}
+
+	buf->npages = 0;
+	buf->skip_cache = false;
+}
+
+/*
+ * isp_video_buffer_prepare_user - Pin userspace VMA pages to memory.
+ *
+ * This function creates a list of pages for a userspace VMA. The number of
+ * pages is first computed based on the buffer size, and pages are then
+ * retrieved by a call to get_user_pages.
+ *
+ * Pages are pinned to memory by get_user_pages, making them available for DMA
+ * transfers. However, due to memory management optimization, it seems the
+ * get_user_pages doesn't guarantee that the pinned pages will not be written
+ * to swap and removed from the userspace mapping(s). When this happens, a page
+ * fault can be generated when accessing those unmapped pages.
+ *
+ * If the fault is triggered by a page table walk caused by VIPT cache
+ * management operations, the page fault handler might oops if the MM semaphore
+ * is held, as it can't handle kernel page faults in that case. To fix that, a
+ * fixup entry needs to be added to the cache management code, or the userspace
+ * VMA must be locked to avoid removing pages from the userspace mapping in the
+ * first place.
+ *
+ * If the number of pages retrieved is smaller than the number required by the
+ * buffer size, the function returns -EFAULT.
+ */
+static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
+{
+	unsigned long data;
+	unsigned int first;
+	unsigned int last;
+	int ret;
+
+	data = buf->vbuf.m.userptr;
+	first = (data & PAGE_MASK) >> PAGE_SHIFT;
+	last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
+
+	buf->offset = data & ~PAGE_MASK;
+	buf->npages = last - first + 1;
+	buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
+	if (buf->pages == NULL)
+		return -ENOMEM;
+
+	down_read(&current->mm->mmap_sem);
+	ret = get_user_pages(current, current->mm, data & PAGE_MASK,
+			     buf->npages,
+			     buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
+			     buf->pages, NULL);
+	up_read(&current->mm->mmap_sem);
+
+	if (ret != buf->npages) {
+		buf->npages = ret < 0 ? 0 : ret;
+		isp_video_buffer_cleanup(buf);
+		return -EFAULT;
+	}
+
+	ret = isp_video_buffer_lock_vma(buf, 1);
+	if (ret < 0)
+		isp_video_buffer_cleanup(buf);
+
+	return ret;
+}
+
+/*
+ * isp_video_buffer_prepare_pfnmap - Validate a VM_PFNMAP userspace buffer
+ *
+ * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
+ * memory and if they span a single VMA.
+ *
+ * Return 0 if the buffer is valid, or -EFAULT otherwise.
+ */
+static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
+{
+	struct vm_area_struct *vma;
+	unsigned long prev_pfn;
+	unsigned long this_pfn;
+	unsigned long start;
+	unsigned long end;
+	dma_addr_t pa;
+	int ret = -EFAULT;
+
+	start = buf->vbuf.m.userptr;
+	end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
+
+	buf->offset = start & ~PAGE_MASK;
+	buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
+	buf->pages = NULL;
+
+	down_read(&current->mm->mmap_sem);
+	vma = find_vma(current->mm, start);
+	if (vma == NULL || vma->vm_end < end)
+		goto done;
+
+	for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
+		ret = follow_pfn(vma, start, &this_pfn);
+		if (ret)
+			goto done;
+
+		if (prev_pfn == 0)
+			pa = this_pfn << PAGE_SHIFT;
+		else if (this_pfn != prev_pfn + 1) {
+			ret = -EFAULT;
+			goto done;
+		}
+
+		prev_pfn = this_pfn;
+	}
+
+	buf->paddr = pa + buf->offset;
+	ret = 0;
+
+done:
+	up_read(&current->mm->mmap_sem);
+	return ret;
+}
+
+/*
+ * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
+ *
+ * This function locates the VMAs for the buffer's userspace address and checks
+ * that their flags match. The only flag that we need to care for at the moment
+ * is VM_PFNMAP.
+ *
+ * The buffer vm_flags field is set to the first VMA flags.
+ *
+ * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs
+ * have incompatible flags.
+ */
+static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
+{
+	struct vm_area_struct *vma;
+	pgprot_t vm_page_prot;
+	unsigned long start;
+	unsigned long end;
+	int ret = -EFAULT;
+
+	start = buf->vbuf.m.userptr;
+	end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
+
+	down_read(&current->mm->mmap_sem);
+
+	do {
+		vma = find_vma(current->mm, start);
+		if (vma == NULL)
+			goto done;
+
+		if (start == buf->vbuf.m.userptr) {
+			buf->vm_flags = vma->vm_flags;
+			vm_page_prot = vma->vm_page_prot;
+		}
+
+		if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
+			goto done;
+
+		if (vm_page_prot != vma->vm_page_prot)
+			goto done;
+
+		start = vma->vm_end + 1;
+	} while (vma->vm_end < end);
+
+	/* Skip cache management to enhance performances for non-cached or
+	 * write-combining buffers.
+	 */
+	if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
+	    vm_page_prot == pgprot_writecombine(vm_page_prot))
+		buf->skip_cache = true;
+
+	ret = 0;
+
+done:
+	up_read(&current->mm->mmap_sem);
+	return ret;
+}
+
+/*
+ * isp_video_buffer_prepare - Make a buffer ready for operation
+ *
+ * Preparing a buffer involves:
+ *
+ * - validating VMAs (userspace buffers only)
+ * - locking pages and VMAs into memory (userspace buffers only)
+ * - building page and scatter-gather lists
+ * - mapping buffers for DMA operation
+ * - performing driver-specific preparation
+ *
+ * The function must be called in userspace context with a valid mm context
+ * (this excludes cleanup paths such as sys_close when the userspace process
+ * segfaults).
+ */
+static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
+{
+	enum dma_data_direction direction;
+	int ret;
+
+	switch (buf->vbuf.memory) {
+	case V4L2_MEMORY_MMAP:
+		ret = isp_video_buffer_sglist_kernel(buf);
+		break;
+
+	case V4L2_MEMORY_USERPTR:
+		ret = isp_video_buffer_prepare_vm_flags(buf);
+		if (ret < 0)
+			return ret;
+
+		if (buf->vm_flags & VM_PFNMAP) {
+			ret = isp_video_buffer_prepare_pfnmap(buf);
+			if (ret < 0)
+				return ret;
+
+			ret = isp_video_buffer_sglist_pfnmap(buf);
+		} else {
+			ret = isp_video_buffer_prepare_user(buf);
+			if (ret < 0)
+				return ret;
+
+			ret = isp_video_buffer_sglist_user(buf);
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (ret < 0)
+		goto done;
+
+	if (!(buf->vm_flags & VM_PFNMAP)) {
+		direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
+			  ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+		ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen,
+				 direction);
+		if (ret != buf->sglen) {
+			ret = -EFAULT;
+			goto done;
+		}
+	}
+
+	if (buf->queue->ops->buffer_prepare)
+		ret = buf->queue->ops->buffer_prepare(buf);
+
+done:
+	if (ret < 0) {
+		isp_video_buffer_cleanup(buf);
+		return ret;
+	}
+
+	return ret;
+}
+
+/*
+ * isp_video_queue_query - Query the status of a given buffer
+ *
+ * Locking: must be called with the queue lock held.
+ */
+static void isp_video_buffer_query(struct isp_video_buffer *buf,
+				   struct v4l2_buffer *vbuf)
+{
+	memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
+
+	if (buf->vma_use_count)
+		vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
+
+	switch (buf->state) {
+	case ISP_BUF_STATE_ERROR:
+		vbuf->flags |= V4L2_BUF_FLAG_ERROR;
+	case ISP_BUF_STATE_DONE:
+		vbuf->flags |= V4L2_BUF_FLAG_DONE;
+	case ISP_BUF_STATE_QUEUED:
+	case ISP_BUF_STATE_ACTIVE:
+		vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
+		break;
+	case ISP_BUF_STATE_IDLE:
+	default:
+		break;
+	}
+}
+
+/*
+ * isp_video_buffer_wait - Wait for a buffer to be ready
+ *
+ * In non-blocking mode, return immediately with 0 if the buffer is ready or
+ * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
+ *
+ * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
+ * queue using the same condition.
+ */
+static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
+{
+	if (nonblocking) {
+		return (buf->state != ISP_BUF_STATE_QUEUED &&
+			buf->state != ISP_BUF_STATE_ACTIVE)
+			? 0 : -EAGAIN;
+	}
+
+	return wait_event_interruptible(buf->wait,
+		buf->state != ISP_BUF_STATE_QUEUED &&
+		buf->state != ISP_BUF_STATE_ACTIVE);
+}
+
+/* -----------------------------------------------------------------------------
+ * Queue management
+ */
+
+/*
+ * isp_video_queue_free - Free video buffers memory
+ *
+ * Buffers can only be freed if the queue isn't streaming and if no buffer is
+ * mapped to userspace. Return -EBUSY if those conditions aren't statisfied.
+ *
+ * This function must be called with the queue lock held.
+ */
+static int isp_video_queue_free(struct isp_video_queue *queue)
+{
+	unsigned int i;
+
+	if (queue->streaming)
+		return -EBUSY;
+
+	for (i = 0; i < queue->count; ++i) {
+		if (queue->buffers[i]->vma_use_count != 0)
+			return -EBUSY;
+	}
+
+	for (i = 0; i < queue->count; ++i) {
+		struct isp_video_buffer *buf = queue->buffers[i];
+
+		isp_video_buffer_cleanup(buf);
+
+		vfree(buf->vaddr);
+		buf->vaddr = NULL;
+
+		kfree(buf);
+		queue->buffers[i] = NULL;
+	}
+
+	INIT_LIST_HEAD(&queue->queue);
+	queue->count = 0;
+	return 0;
+}
+
+/*
+ * isp_video_queue_alloc - Allocate video buffers memory
+ *
+ * This function must be called with the queue lock held.
+ */
+static int isp_video_queue_alloc(struct isp_video_queue *queue,
+				 unsigned int nbuffers,
+				 unsigned int size, enum v4l2_memory memory)
+{
+	struct isp_video_buffer *buf;
+	unsigned int i;
+	void *mem;
+	int ret;
+
+	/* Start by freeing the buffers. */
+	ret = isp_video_queue_free(queue);
+	if (ret < 0)
+		return ret;
+
+	/* Bail out if no buffers should be allocated. */
+	if (nbuffers == 0)
+		return 0;
+
+	/* Initialize the allocated buffers. */
+	for (i = 0; i < nbuffers; ++i) {
+		buf = kzalloc(queue->bufsize, GFP_KERNEL);
+		if (buf == NULL)
+			break;
+
+		if (memory == V4L2_MEMORY_MMAP) {
+			/* Allocate video buffers memory for mmap mode. Align
+			 * the size to the page size.
+			 */
+			mem = vmalloc_32_user(PAGE_ALIGN(size));
+			if (mem == NULL) {
+				kfree(buf);
+				break;
+			}
+
+			buf->vbuf.m.offset = i * PAGE_ALIGN(size);
+			buf->vaddr = mem;
+		}
+
+		buf->vbuf.index = i;
+		buf->vbuf.length = size;
+		buf->vbuf.type = queue->type;
+		buf->vbuf.field = V4L2_FIELD_NONE;
+		buf->vbuf.memory = memory;
+
+		buf->queue = queue;
+		init_waitqueue_head(&buf->wait);
+
+		queue->buffers[i] = buf;
+	}
+
+	if (i == 0)
+		return -ENOMEM;
+
+	queue->count = i;
+	return nbuffers;
+}
+
+/**
+ * omap3isp_video_queue_cleanup - Clean up the video buffers queue
+ * @queue: Video buffers queue
+ *
+ * Free all allocated resources and clean up the video buffers queue. The queue
+ * must not be busy (no ongoing video stream) and buffers must have been
+ * unmapped.
+ *
+ * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been
+ * unmapped.
+ */
+int omap3isp_video_queue_cleanup(struct isp_video_queue *queue)
+{
+	return isp_video_queue_free(queue);
+}
+
+/**
+ * omap3isp_video_queue_init - Initialize the video buffers queue
+ * @queue: Video buffers queue
+ * @type: V4L2 buffer type (capture or output)
+ * @ops: Driver-specific queue operations
+ * @dev: Device used for DMA operations
+ * @bufsize: Size of the driver-specific buffer structure
+ *
+ * Initialize the video buffers queue with the supplied parameters.
+ *
+ * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or
+ * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet.
+ *
+ * Buffer objects will be allocated using the given buffer size to allow room
+ * for driver-specific fields. Driver-specific buffer structures must start
+ * with a struct isp_video_buffer field. Drivers with no driver-specific buffer
+ * structure must pass the size of the isp_video_buffer structure in the bufsize
+ * parameter.
+ *
+ * Return 0 on success.
+ */
+int omap3isp_video_queue_init(struct isp_video_queue *queue,
+			      enum v4l2_buf_type type,
+			      const struct isp_video_queue_operations *ops,
+			      struct device *dev, unsigned int bufsize)
+{
+	INIT_LIST_HEAD(&queue->queue);
+	mutex_init(&queue->lock);
+	spin_lock_init(&queue->irqlock);
+
+	queue->type = type;
+	queue->ops = ops;
+	queue->dev = dev;
+	queue->bufsize = bufsize;
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 operations
+ */
+
+/**
+ * omap3isp_video_queue_reqbufs - Allocate video buffers memory
+ *
+ * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It
+ * allocated video buffer objects and, for MMAP buffers, buffer memory.
+ *
+ * If the number of buffers is 0, all buffers are freed and the function returns
+ * without performing any allocation.
+ *
+ * If the number of buffers is not 0, currently allocated buffers (if any) are
+ * freed and the requested number of buffers are allocated. Depending on
+ * driver-specific requirements and on memory availability, a number of buffer
+ * smaller or bigger than requested can be allocated. This isn't considered as
+ * an error.
+ *
+ * Return 0 on success or one of the following error codes:
+ *
+ * -EINVAL if the buffer type or index are invalid
+ * -EBUSY if the queue is busy (streaming or buffers mapped)
+ * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition
+ */
+int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
+				 struct v4l2_requestbuffers *rb)
+{
+	unsigned int nbuffers = rb->count;
+	unsigned int size;
+	int ret;
+
+	if (rb->type != queue->type)
+		return -EINVAL;
+
+	queue->ops->queue_prepare(queue, &nbuffers, &size);
+	if (size == 0)
+		return -EINVAL;
+
+	nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
+
+	mutex_lock(&queue->lock);
+
+	ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
+	if (ret < 0)
+		goto done;
+
+	rb->count = ret;
+	ret = 0;
+
+done:
+	mutex_unlock(&queue->lock);
+	return ret;
+}
+
+/**
+ * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue
+ *
+ * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It
+ * returns the status of a given video buffer.
+ *
+ * Return 0 on success or -EINVAL if the buffer type or index are invalid.
+ */
+int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
+				  struct v4l2_buffer *vbuf)
+{
+	struct isp_video_buffer *buf;
+	int ret = 0;
+
+	if (vbuf->type != queue->type)
+		return -EINVAL;
+
+	mutex_lock(&queue->lock);
+
+	if (vbuf->index >= queue->count) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	buf = queue->buffers[vbuf->index];
+	isp_video_buffer_query(buf, vbuf);
+
+done:
+	mutex_unlock(&queue->lock);
+	return ret;
+}
+
+/**
+ * omap3isp_video_queue_qbuf - Queue a buffer
+ *
+ * This function is intended to be used as a VIDIOC_QBUF ioctl handler.
+ *
+ * The v4l2_buffer structure passed from userspace is first sanity tested. If
+ * sane, the buffer is then processed and added to the main queue and, if the
+ * queue is streaming, to the IRQ queue.
+ *
+ * Before being enqueued, USERPTR buffers are checked for address changes. If
+ * the buffer has a different userspace address, the old memory area is unlocked
+ * and the new memory area is locked.
+ */
+int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
+			      struct v4l2_buffer *vbuf)
+{
+	struct isp_video_buffer *buf;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	if (vbuf->type != queue->type)
+		goto done;
+
+	mutex_lock(&queue->lock);
+
+	if (vbuf->index >= queue->count)
+		goto done;
+
+	buf = queue->buffers[vbuf->index];
+
+	if (vbuf->memory != buf->vbuf.memory)
+		goto done;
+
+	if (buf->state != ISP_BUF_STATE_IDLE)
+		goto done;
+
+	if (vbuf->memory == V4L2_MEMORY_USERPTR &&
+	    vbuf->length < buf->vbuf.length)
+		goto done;
+
+	if (vbuf->memory == V4L2_MEMORY_USERPTR &&
+	    vbuf->m.userptr != buf->vbuf.m.userptr) {
+		isp_video_buffer_cleanup(buf);
+		buf->vbuf.m.userptr = vbuf->m.userptr;
+		buf->prepared = 0;
+	}
+
+	if (!buf->prepared) {
+		ret = isp_video_buffer_prepare(buf);
+		if (ret < 0)
+			goto done;
+		buf->prepared = 1;
+	}
+
+	isp_video_buffer_cache_sync(buf);
+
+	buf->state = ISP_BUF_STATE_QUEUED;
+	list_add_tail(&buf->stream, &queue->queue);
+
+	if (queue->streaming) {
+		spin_lock_irqsave(&queue->irqlock, flags);
+		queue->ops->buffer_queue(buf);
+		spin_unlock_irqrestore(&queue->irqlock, flags);
+	}
+
+	ret = 0;
+
+done:
+	mutex_unlock(&queue->lock);
+	return ret;
+}
+
+/**
+ * omap3isp_video_queue_dqbuf - Dequeue a buffer
+ *
+ * This function is intended to be used as a VIDIOC_DQBUF ioctl handler.
+ *
+ * Wait until a buffer is ready to be dequeued, remove it from the queue and
+ * copy its information to the v4l2_buffer structure.
+ *
+ * If the nonblocking argument is not zero and no buffer is ready, return
+ * -EAGAIN immediately instead of waiting.
+ *
+ * If no buffer has been enqueued, or if the requested buffer type doesn't match
+ * the queue type, return -EINVAL.
+ */
+int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
+			       struct v4l2_buffer *vbuf, int nonblocking)
+{
+	struct isp_video_buffer *buf;
+	int ret;
+
+	if (vbuf->type != queue->type)
+		return -EINVAL;
+
+	mutex_lock(&queue->lock);
+
+	if (list_empty(&queue->queue)) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
+	ret = isp_video_buffer_wait(buf, nonblocking);
+	if (ret < 0)
+		goto done;
+
+	list_del(&buf->stream);
+
+	isp_video_buffer_query(buf, vbuf);
+	buf->state = ISP_BUF_STATE_IDLE;
+	vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
+
+done:
+	mutex_unlock(&queue->lock);
+	return ret;
+}
+
+/**
+ * omap3isp_video_queue_streamon - Start streaming
+ *
+ * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It
+ * starts streaming on the queue and calls the buffer_queue operation for all
+ * queued buffers.
+ *
+ * Return 0 on success.
+ */
+int omap3isp_video_queue_streamon(struct isp_video_queue *queue)
+{
+	struct isp_video_buffer *buf;
+	unsigned long flags;
+
+	mutex_lock(&queue->lock);
+
+	if (queue->streaming)
+		goto done;
+
+	queue->streaming = 1;
+
+	spin_lock_irqsave(&queue->irqlock, flags);
+	list_for_each_entry(buf, &queue->queue, stream)
+		queue->ops->buffer_queue(buf);
+	spin_unlock_irqrestore(&queue->irqlock, flags);
+
+done:
+	mutex_unlock(&queue->lock);
+	return 0;
+}
+
+/**
+ * omap3isp_video_queue_streamoff - Stop streaming
+ *
+ * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It
+ * stops streaming on the queue and wakes up all the buffers.
+ *
+ * Drivers must stop the hardware and synchronize with interrupt handlers and/or
+ * delayed works before calling this function to make sure no buffer will be
+ * touched by the driver and/or hardware.
+ */
+void omap3isp_video_queue_streamoff(struct isp_video_queue *queue)
+{
+	struct isp_video_buffer *buf;
+	unsigned long flags;
+	unsigned int i;
+
+	mutex_lock(&queue->lock);
+
+	if (!queue->streaming)
+		goto done;
+
+	queue->streaming = 0;
+
+	spin_lock_irqsave(&queue->irqlock, flags);
+	for (i = 0; i < queue->count; ++i) {
+		buf = queue->buffers[i];
+
+		if (buf->state == ISP_BUF_STATE_ACTIVE)
+			wake_up(&buf->wait);
+
+		buf->state = ISP_BUF_STATE_IDLE;
+	}
+	spin_unlock_irqrestore(&queue->irqlock, flags);
+
+	INIT_LIST_HEAD(&queue->queue);
+
+done:
+	mutex_unlock(&queue->lock);
+}
+
+/**
+ * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE
+ *
+ * This function is intended to be used with suspend/resume operations. It
+ * discards all 'done' buffers as they would be too old to be requested after
+ * resume.
+ *
+ * Drivers must stop the hardware and synchronize with interrupt handlers and/or
+ * delayed works before calling this function to make sure no buffer will be
+ * touched by the driver and/or hardware.
+ */
+void omap3isp_video_queue_discard_done(struct isp_video_queue *queue)
+{
+	struct isp_video_buffer *buf;
+	unsigned int i;
+
+	mutex_lock(&queue->lock);
+
+	if (!queue->streaming)
+		goto done;
+
+	for (i = 0; i < queue->count; ++i) {
+		buf = queue->buffers[i];
+
+		if (buf->state == ISP_BUF_STATE_DONE)
+			buf->state = ISP_BUF_STATE_ERROR;
+	}
+
+done:
+	mutex_unlock(&queue->lock);
+}
+
+static void isp_video_queue_vm_open(struct vm_area_struct *vma)
+{
+	struct isp_video_buffer *buf = vma->vm_private_data;
+
+	buf->vma_use_count++;
+}
+
+static void isp_video_queue_vm_close(struct vm_area_struct *vma)
+{
+	struct isp_video_buffer *buf = vma->vm_private_data;
+
+	buf->vma_use_count--;
+}
+
+static const struct vm_operations_struct isp_video_queue_vm_ops = {
+	.open = isp_video_queue_vm_open,
+	.close = isp_video_queue_vm_close,
+};
+
+/**
+ * omap3isp_video_queue_mmap - Map buffers to userspace
+ *
+ * This function is intended to be used as an mmap() file operation handler. It
+ * maps a buffer to userspace based on the VMA offset.
+ *
+ * Only buffers of memory type MMAP are supported.
+ */
+int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
+			 struct vm_area_struct *vma)
+{
+	struct isp_video_buffer *uninitialized_var(buf);
+	unsigned long size;
+	unsigned int i;
+	int ret = 0;
+
+	mutex_lock(&queue->lock);
+
+	for (i = 0; i < queue->count; ++i) {
+		buf = queue->buffers[i];
+		if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
+			break;
+	}
+
+	if (i == queue->count) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	size = vma->vm_end - vma->vm_start;
+
+	if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
+	    size != PAGE_ALIGN(buf->vbuf.length)) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = remap_vmalloc_range(vma, buf->vaddr, 0);
+	if (ret < 0)
+		goto done;
+
+	vma->vm_ops = &isp_video_queue_vm_ops;
+	vma->vm_private_data = buf;
+	isp_video_queue_vm_open(vma);
+
+done:
+	mutex_unlock(&queue->lock);
+	return ret;
+}
+
+/**
+ * omap3isp_video_queue_poll - Poll video queue state
+ *
+ * This function is intended to be used as a poll() file operation handler. It
+ * polls the state of the video buffer at the front of the queue and returns an
+ * events mask.
+ *
+ * If no buffer is present at the front of the queue, POLLERR is returned.
+ */
+unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
+				       struct file *file, poll_table *wait)
+{
+	struct isp_video_buffer *buf;
+	unsigned int mask = 0;
+
+	mutex_lock(&queue->lock);
+	if (list_empty(&queue->queue)) {
+		mask |= POLLERR;
+		goto done;
+	}
+	buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
+
+	poll_wait(file, &buf->wait, wait);
+	if (buf->state == ISP_BUF_STATE_DONE ||
+	    buf->state == ISP_BUF_STATE_ERROR) {
+		if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+			mask |= POLLIN | POLLRDNORM;
+		else
+			mask |= POLLOUT | POLLWRNORM;
+	}
+
+done:
+	mutex_unlock(&queue->lock);
+	return mask;
+}
diff --git a/drivers/media/platform/omap3isp/ispqueue.h b/drivers/media/platform/omap3isp/ispqueue.h
new file mode 100644
index 0000000..908dfd7
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispqueue.h
@@ -0,0 +1,187 @@
+/*
+ * ispqueue.h
+ *
+ * TI OMAP3 ISP - Video buffers queue handling
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_QUEUE_H
+#define OMAP3_ISP_QUEUE_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+
+struct isp_video_queue;
+struct page;
+struct scatterlist;
+
+#define ISP_VIDEO_MAX_BUFFERS		16
+
+/**
+ * enum isp_video_buffer_state - ISP video buffer state
+ * @ISP_BUF_STATE_IDLE:	The buffer is under userspace control (dequeued
+ *	or not queued yet).
+ * @ISP_BUF_STATE_QUEUED: The buffer has been queued but isn't used by the
+ *	device yet.
+ * @ISP_BUF_STATE_ACTIVE: The buffer is in use for an active video transfer.
+ * @ISP_BUF_STATE_ERROR: The device is done with the buffer and an error
+ *	occurred. For capture device the buffer likely contains corrupted data or
+ *	no data at all.
+ * @ISP_BUF_STATE_DONE: The device is done with the buffer and no error occurred.
+ *	For capture devices the buffer contains valid data.
+ */
+enum isp_video_buffer_state {
+	ISP_BUF_STATE_IDLE,
+	ISP_BUF_STATE_QUEUED,
+	ISP_BUF_STATE_ACTIVE,
+	ISP_BUF_STATE_ERROR,
+	ISP_BUF_STATE_DONE,
+};
+
+/**
+ * struct isp_video_buffer - ISP video buffer
+ * @vma_use_count: Number of times the buffer is mmap'ed to userspace
+ * @stream: List head for insertion into main queue
+ * @queue: ISP buffers queue this buffer belongs to
+ * @prepared: Whether the buffer has been prepared
+ * @skip_cache: Whether to skip cache management operations for this buffer
+ * @vaddr: Memory virtual address (for kernel buffers)
+ * @vm_flags: Buffer VMA flags (for userspace buffers)
+ * @offset: Offset inside the first page (for userspace buffers)
+ * @npages: Number of pages (for userspace buffers)
+ * @pages: Pages table (for userspace non-VM_PFNMAP buffers)
+ * @paddr: Memory physical address (for userspace VM_PFNMAP buffers)
+ * @sglen: Number of elements in the scatter list (for non-VM_PFNMAP buffers)
+ * @sglist: Scatter list (for non-VM_PFNMAP buffers)
+ * @vbuf: V4L2 buffer
+ * @irqlist: List head for insertion into IRQ queue
+ * @state: Current buffer state
+ * @wait: Wait queue to signal buffer completion
+ */
+struct isp_video_buffer {
+	unsigned long vma_use_count;
+	struct list_head stream;
+	struct isp_video_queue *queue;
+	unsigned int prepared:1;
+	bool skip_cache;
+
+	/* For kernel buffers. */
+	void *vaddr;
+
+	/* For userspace buffers. */
+	vm_flags_t vm_flags;
+	unsigned long offset;
+	unsigned int npages;
+	struct page **pages;
+	dma_addr_t paddr;
+
+	/* For all buffers except VM_PFNMAP. */
+	unsigned int sglen;
+	struct scatterlist *sglist;
+
+	/* Touched by the interrupt handler. */
+	struct v4l2_buffer vbuf;
+	struct list_head irqlist;
+	enum isp_video_buffer_state state;
+	wait_queue_head_t wait;
+};
+
+#define to_isp_video_buffer(vb)	container_of(vb, struct isp_video_buffer, vb)
+
+/**
+ * struct isp_video_queue_operations - Driver-specific operations
+ * @queue_prepare: Called before allocating buffers. Drivers should clamp the
+ *	number of buffers according to their requirements, and must return the
+ *	buffer size in bytes.
+ * @buffer_prepare: Called the first time a buffer is queued, or after changing
+ *	the userspace memory address for a USERPTR buffer, with the queue lock
+ *	held. Drivers should perform device-specific buffer preparation (such as
+ *	mapping the buffer memory in an IOMMU). This operation is optional.
+ * @buffer_queue: Called when a buffer is being added to the queue with the
+ *	queue irqlock spinlock held.
+ * @buffer_cleanup: Called before freeing buffers, or before changing the
+ *	userspace memory address for a USERPTR buffer, with the queue lock held.
+ *	Drivers must perform cleanup operations required to undo the
+ *	buffer_prepare call. This operation is optional.
+ */
+struct isp_video_queue_operations {
+	void (*queue_prepare)(struct isp_video_queue *queue,
+			      unsigned int *nbuffers, unsigned int *size);
+	int  (*buffer_prepare)(struct isp_video_buffer *buf);
+	void (*buffer_queue)(struct isp_video_buffer *buf);
+	void (*buffer_cleanup)(struct isp_video_buffer *buf);
+};
+
+/**
+ * struct isp_video_queue - ISP video buffers queue
+ * @type: Type of video buffers handled by this queue
+ * @ops: Queue operations
+ * @dev: Device used for DMA operations
+ * @bufsize: Size of a driver-specific buffer object
+ * @count: Number of currently allocated buffers
+ * @buffers: ISP video buffers
+ * @lock: Mutex to protect access to the buffers, main queue and state
+ * @irqlock: Spinlock to protect access to the IRQ queue
+ * @streaming: Queue state, indicates whether the queue is streaming
+ * @queue: List of all queued buffers
+ */
+struct isp_video_queue {
+	enum v4l2_buf_type type;
+	const struct isp_video_queue_operations *ops;
+	struct device *dev;
+	unsigned int bufsize;
+
+	unsigned int count;
+	struct isp_video_buffer *buffers[ISP_VIDEO_MAX_BUFFERS];
+	struct mutex lock;
+	spinlock_t irqlock;
+
+	unsigned int streaming:1;
+
+	struct list_head queue;
+};
+
+int omap3isp_video_queue_cleanup(struct isp_video_queue *queue);
+int omap3isp_video_queue_init(struct isp_video_queue *queue,
+			      enum v4l2_buf_type type,
+			      const struct isp_video_queue_operations *ops,
+			      struct device *dev, unsigned int bufsize);
+
+int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
+				 struct v4l2_requestbuffers *rb);
+int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
+				  struct v4l2_buffer *vbuf);
+int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
+			      struct v4l2_buffer *vbuf);
+int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
+			       struct v4l2_buffer *vbuf, int nonblocking);
+int omap3isp_video_queue_streamon(struct isp_video_queue *queue);
+void omap3isp_video_queue_streamoff(struct isp_video_queue *queue);
+void omap3isp_video_queue_discard_done(struct isp_video_queue *queue);
+int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
+			      struct vm_area_struct *vma);
+unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
+				       struct file *file, poll_table *wait);
+
+#endif /* OMAP3_ISP_QUEUE_H */
diff --git a/drivers/media/platform/omap3isp/ispreg.h b/drivers/media/platform/omap3isp/ispreg.h
new file mode 100644
index 0000000..084ea77
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispreg.h
@@ -0,0 +1,1586 @@
+/*
+ * ispreg.h
+ *
+ * TI OMAP3 ISP - Registers definitions
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_REG_H
+#define OMAP3_ISP_REG_H
+
+#include <plat/omap34xx.h>
+
+
+#define CM_CAM_MCLK_HZ			172800000	/* Hz */
+
+/* ISP Submodules offset */
+
+#define OMAP3ISP_REG_BASE		OMAP3430_ISP_BASE
+#define OMAP3ISP_REG(offset)		(OMAP3ISP_REG_BASE + (offset))
+
+#define OMAP3ISP_CCP2_REG_OFFSET	0x0400
+#define OMAP3ISP_CCP2_REG_BASE		(OMAP3ISP_REG_BASE +		\
+					 OMAP3ISP_CCP2_REG_OFFSET)
+#define OMAP3ISP_CCP2_REG(offset)	(OMAP3ISP_CCP2_REG_BASE + (offset))
+
+#define OMAP3ISP_CCDC_REG_OFFSET	0x0600
+#define OMAP3ISP_CCDC_REG_BASE		(OMAP3ISP_REG_BASE +		\
+					 OMAP3ISP_CCDC_REG_OFFSET)
+#define OMAP3ISP_CCDC_REG(offset)	(OMAP3ISP_CCDC_REG_BASE + (offset))
+
+#define OMAP3ISP_HIST_REG_OFFSET	0x0A00
+#define OMAP3ISP_HIST_REG_BASE		(OMAP3ISP_REG_BASE +		\
+					 OMAP3ISP_HIST_REG_OFFSET)
+#define OMAP3ISP_HIST_REG(offset)	(OMAP3ISP_HIST_REG_BASE + (offset))
+
+#define OMAP3ISP_H3A_REG_OFFSET		0x0C00
+#define OMAP3ISP_H3A_REG_BASE		(OMAP3ISP_REG_BASE +		\
+					 OMAP3ISP_H3A_REG_OFFSET)
+#define OMAP3ISP_H3A_REG(offset)	(OMAP3ISP_H3A_REG_BASE + (offset))
+
+#define OMAP3ISP_PREV_REG_OFFSET	0x0E00
+#define OMAP3ISP_PREV_REG_BASE		(OMAP3ISP_REG_BASE +		\
+					 OMAP3ISP_PREV_REG_OFFSET)
+#define OMAP3ISP_PREV_REG(offset)	(OMAP3ISP_PREV_REG_BASE + (offset))
+
+#define OMAP3ISP_RESZ_REG_OFFSET	0x1000
+#define OMAP3ISP_RESZ_REG_BASE		(OMAP3ISP_REG_BASE +		\
+					 OMAP3ISP_RESZ_REG_OFFSET)
+#define OMAP3ISP_RESZ_REG(offset)	(OMAP3ISP_RESZ_REG_BASE + (offset))
+
+#define OMAP3ISP_SBL_REG_OFFSET		0x1200
+#define OMAP3ISP_SBL_REG_BASE		(OMAP3ISP_REG_BASE +		\
+					 OMAP3ISP_SBL_REG_OFFSET)
+#define OMAP3ISP_SBL_REG(offset)	(OMAP3ISP_SBL_REG_BASE + (offset))
+
+#define OMAP3ISP_CSI2A_REGS1_REG_OFFSET	0x1800
+#define OMAP3ISP_CSI2A_REGS1_REG_BASE	(OMAP3ISP_REG_BASE +		\
+					 OMAP3ISP_CSI2A_REGS1_REG_OFFSET)
+#define OMAP3ISP_CSI2A_REGS1_REG(offset)				\
+				(OMAP3ISP_CSI2A_REGS1_REG_BASE + (offset))
+
+#define OMAP3ISP_CSIPHY2_REG_OFFSET	0x1970
+#define OMAP3ISP_CSIPHY2_REG_BASE	(OMAP3ISP_REG_BASE +	\
+					 OMAP3ISP_CSIPHY2_REG_OFFSET)
+#define OMAP3ISP_CSIPHY2_REG(offset)	(OMAP3ISP_CSIPHY2_REG_BASE + (offset))
+
+#define OMAP3ISP_CSI2A_REGS2_REG_OFFSET	0x19C0
+#define OMAP3ISP_CSI2A_REGS2_REG_BASE	(OMAP3ISP_REG_BASE +		\
+					 OMAP3ISP_CSI2A_REGS2_REG_OFFSET)
+#define OMAP3ISP_CSI2A_REGS2_REG(offset)				\
+				(OMAP3ISP_CSI2A_REGS2_REG_BASE + (offset))
+
+#define OMAP3ISP_CSI2C_REGS1_REG_OFFSET	0x1C00
+#define OMAP3ISP_CSI2C_REGS1_REG_BASE	(OMAP3ISP_REG_BASE +		\
+					 OMAP3ISP_CSI2C_REGS1_REG_OFFSET)
+#define OMAP3ISP_CSI2C_REGS1_REG(offset)				\
+				(OMAP3ISP_CSI2C_REGS1_REG_BASE + (offset))
+
+#define OMAP3ISP_CSIPHY1_REG_OFFSET	0x1D70
+#define OMAP3ISP_CSIPHY1_REG_BASE	(OMAP3ISP_REG_BASE +	\
+					 OMAP3ISP_CSIPHY1_REG_OFFSET)
+#define OMAP3ISP_CSIPHY1_REG(offset)	(OMAP3ISP_CSIPHY1_REG_BASE + (offset))
+
+#define OMAP3ISP_CSI2C_REGS2_REG_OFFSET	0x1DC0
+#define OMAP3ISP_CSI2C_REGS2_REG_BASE	(OMAP3ISP_REG_BASE +		\
+					 OMAP3ISP_CSI2C_REGS2_REG_OFFSET)
+#define OMAP3ISP_CSI2C_REGS2_REG(offset)				\
+				(OMAP3ISP_CSI2C_REGS2_REG_BASE + (offset))
+
+/* ISP module register offset */
+
+#define ISP_REVISION			(0x000)
+#define ISP_SYSCONFIG			(0x004)
+#define ISP_SYSSTATUS			(0x008)
+#define ISP_IRQ0ENABLE			(0x00C)
+#define ISP_IRQ0STATUS			(0x010)
+#define ISP_IRQ1ENABLE			(0x014)
+#define ISP_IRQ1STATUS			(0x018)
+#define ISP_TCTRL_GRESET_LENGTH		(0x030)
+#define ISP_TCTRL_PSTRB_REPLAY		(0x034)
+#define ISP_CTRL			(0x040)
+#define ISP_SECURE			(0x044)
+#define ISP_TCTRL_CTRL			(0x050)
+#define ISP_TCTRL_FRAME			(0x054)
+#define ISP_TCTRL_PSTRB_DELAY		(0x058)
+#define ISP_TCTRL_STRB_DELAY		(0x05C)
+#define ISP_TCTRL_SHUT_DELAY		(0x060)
+#define ISP_TCTRL_PSTRB_LENGTH		(0x064)
+#define ISP_TCTRL_STRB_LENGTH		(0x068)
+#define ISP_TCTRL_SHUT_LENGTH		(0x06C)
+#define ISP_PING_PONG_ADDR		(0x070)
+#define ISP_PING_PONG_MEM_RANGE		(0x074)
+#define ISP_PING_PONG_BUF_SIZE		(0x078)
+
+/* CCP2 receiver registers */
+
+#define ISPCCP2_REVISION		(0x000)
+#define ISPCCP2_SYSCONFIG		(0x004)
+#define ISPCCP2_SYSCONFIG_SOFT_RESET	(1 << 1)
+#define ISPCCP2_SYSCONFIG_AUTO_IDLE		0x1
+#define ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SHIFT	12
+#define ISPCCP2_SYSCONFIG_MSTANDBY_MODE_FORCE	\
+	(0x0 << ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
+#define ISPCCP2_SYSCONFIG_MSTANDBY_MODE_NO	\
+	(0x1 << ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
+#define ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SMART	\
+	(0x2 << ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
+#define ISPCCP2_SYSSTATUS		(0x008)
+#define ISPCCP2_SYSSTATUS_RESET_DONE	(1 << 0)
+#define ISPCCP2_LC01_IRQENABLE		(0x00C)
+#define ISPCCP2_LC01_IRQSTATUS		(0x010)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ	(1 << 11)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_LE_IRQ	(1 << 10)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_LS_IRQ	(1 << 9)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FE_IRQ	(1 << 8)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_COUNT_IRQ	(1 << 7)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ	(1 << 5)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ	(1 << 4)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ	(1 << 3)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ	(1 << 2)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ	(1 << 1)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ	(1 << 0)
+
+#define ISPCCP2_LC23_IRQENABLE		(0x014)
+#define ISPCCP2_LC23_IRQSTATUS		(0x018)
+#define ISPCCP2_LCM_IRQENABLE		(0x02C)
+#define ISPCCP2_LCM_IRQSTATUS_EOF_IRQ		(1 << 0)
+#define ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ	(1 << 1)
+#define ISPCCP2_LCM_IRQSTATUS		(0x030)
+#define ISPCCP2_CTRL			(0x040)
+#define ISPCCP2_CTRL_IF_EN		(1 << 0)
+#define ISPCCP2_CTRL_PHY_SEL		(1 << 1)
+#define ISPCCP2_CTRL_PHY_SEL_CLOCK	(0 << 1)
+#define ISPCCP2_CTRL_PHY_SEL_STROBE	(1 << 1)
+#define ISPCCP2_CTRL_PHY_SEL_MASK	0x1
+#define ISPCCP2_CTRL_PHY_SEL_SHIFT	1
+#define ISPCCP2_CTRL_IO_OUT_SEL		(1 << 2)
+#define ISPCCP2_CTRL_MODE		(1 << 4)
+#define ISPCCP2_CTRL_VP_CLK_FORCE_ON	(1 << 9)
+#define ISPCCP2_CTRL_INV		(1 << 10)
+#define ISPCCP2_CTRL_INV_MASK		0x1
+#define ISPCCP2_CTRL_INV_SHIFT		10
+#define ISPCCP2_CTRL_VP_ONLY_EN		(1 << 11)
+#define ISPCCP2_CTRL_VP_CLK_POL		(1 << 12)
+#define ISPCCP2_CTRL_VPCLK_DIV_SHIFT	15
+#define ISPCCP2_CTRL_VPCLK_DIV_MASK	0x1ffff /* [31:15] */
+#define ISPCCP2_CTRL_VP_OUT_CTRL_SHIFT	8 /* 3430 bits */
+#define ISPCCP2_CTRL_VP_OUT_CTRL_MASK	0x3 /* 3430 bits */
+#define ISPCCP2_DBG			(0x044)
+#define ISPCCP2_GNQ			(0x048)
+#define ISPCCP2_LCx_CTRL(x)			((0x050)+0x30*(x))
+#define ISPCCP2_LCx_CTRL_CHAN_EN		(1 << 0)
+#define ISPCCP2_LCx_CTRL_CRC_EN			(1 << 19)
+#define ISPCCP2_LCx_CTRL_CRC_MASK		0x1
+#define ISPCCP2_LCx_CTRL_CRC_SHIFT		2
+#define ISPCCP2_LCx_CTRL_CRC_SHIFT_15_0		19
+#define ISPCCP2_LCx_CTRL_REGION_EN		(1 << 1)
+#define ISPCCP2_LCx_CTRL_REGION_MASK		0x1
+#define ISPCCP2_LCx_CTRL_REGION_SHIFT		1
+#define ISPCCP2_LCx_CTRL_FORMAT_MASK_15_0	0x3f
+#define ISPCCP2_LCx_CTRL_FORMAT_SHIFT_15_0	0x2
+#define ISPCCP2_LCx_CTRL_FORMAT_MASK		0x1f
+#define ISPCCP2_LCx_CTRL_FORMAT_SHIFT		0x3
+#define ISPCCP2_LCx_CODE(x)		((0x054)+0x30*(x))
+#define ISPCCP2_LCx_STAT_START(x)	((0x058)+0x30*(x))
+#define ISPCCP2_LCx_STAT_SIZE(x)	((0x05C)+0x30*(x))
+#define ISPCCP2_LCx_SOF_ADDR(x)		((0x060)+0x30*(x))
+#define ISPCCP2_LCx_EOF_ADDR(x)		((0x064)+0x30*(x))
+#define ISPCCP2_LCx_DAT_START(x)	((0x068)+0x30*(x))
+#define ISPCCP2_LCx_DAT_SIZE(x)		((0x06C)+0x30*(x))
+#define ISPCCP2_LCx_DAT_MASK		0xFFF
+#define ISPCCP2_LCx_DAT_SHIFT		16
+#define ISPCCP2_LCx_DAT_PING_ADDR(x)	((0x070)+0x30*(x))
+#define ISPCCP2_LCx_DAT_PONG_ADDR(x)	((0x074)+0x30*(x))
+#define ISPCCP2_LCx_DAT_OFST(x)		((0x078)+0x30*(x))
+#define ISPCCP2_LCM_CTRL		(0x1D0)
+#define ISPCCP2_LCM_CTRL_CHAN_EN               (1 << 0)
+#define ISPCCP2_LCM_CTRL_DST_PORT              (1 << 2)
+#define ISPCCP2_LCM_CTRL_DST_PORT_SHIFT		2
+#define ISPCCP2_LCM_CTRL_READ_THROTTLE_SHIFT	3
+#define ISPCCP2_LCM_CTRL_READ_THROTTLE_MASK	0x11
+#define ISPCCP2_LCM_CTRL_BURST_SIZE_SHIFT	5
+#define ISPCCP2_LCM_CTRL_BURST_SIZE_MASK	0x7
+#define ISPCCP2_LCM_CTRL_SRC_FORMAT_SHIFT	16
+#define ISPCCP2_LCM_CTRL_SRC_FORMAT_MASK	0x7
+#define ISPCCP2_LCM_CTRL_SRC_DECOMPR_SHIFT	20
+#define ISPCCP2_LCM_CTRL_SRC_DECOMPR_MASK	0x3
+#define ISPCCP2_LCM_CTRL_SRC_DPCM_PRED		(1 << 22)
+#define ISPCCP2_LCM_CTRL_SRC_PACK		(1 << 23)
+#define ISPCCP2_LCM_CTRL_DST_FORMAT_SHIFT	24
+#define ISPCCP2_LCM_CTRL_DST_FORMAT_MASK	0x7
+#define ISPCCP2_LCM_VSIZE		(0x1D4)
+#define ISPCCP2_LCM_VSIZE_SHIFT		16
+#define ISPCCP2_LCM_HSIZE		(0x1D8)
+#define ISPCCP2_LCM_HSIZE_SHIFT		16
+#define ISPCCP2_LCM_PREFETCH		(0x1DC)
+#define ISPCCP2_LCM_PREFETCH_SHIFT	3
+#define ISPCCP2_LCM_SRC_ADDR		(0x1E0)
+#define ISPCCP2_LCM_SRC_OFST		(0x1E4)
+#define ISPCCP2_LCM_DST_ADDR		(0x1E8)
+#define ISPCCP2_LCM_DST_OFST		(0x1EC)
+
+/* CCDC module register offset */
+
+#define ISPCCDC_PID			(0x000)
+#define ISPCCDC_PCR			(0x004)
+#define ISPCCDC_SYN_MODE		(0x008)
+#define ISPCCDC_HD_VD_WID		(0x00C)
+#define ISPCCDC_PIX_LINES		(0x010)
+#define ISPCCDC_HORZ_INFO		(0x014)
+#define ISPCCDC_VERT_START		(0x018)
+#define ISPCCDC_VERT_LINES		(0x01C)
+#define ISPCCDC_CULLING			(0x020)
+#define ISPCCDC_HSIZE_OFF		(0x024)
+#define ISPCCDC_SDOFST			(0x028)
+#define ISPCCDC_SDR_ADDR		(0x02C)
+#define ISPCCDC_CLAMP			(0x030)
+#define ISPCCDC_DCSUB			(0x034)
+#define ISPCCDC_COLPTN			(0x038)
+#define ISPCCDC_BLKCMP			(0x03C)
+#define ISPCCDC_FPC			(0x040)
+#define ISPCCDC_FPC_ADDR		(0x044)
+#define ISPCCDC_VDINT			(0x048)
+#define ISPCCDC_ALAW			(0x04C)
+#define ISPCCDC_REC656IF		(0x050)
+#define ISPCCDC_CFG			(0x054)
+#define ISPCCDC_FMTCFG			(0x058)
+#define ISPCCDC_FMT_HORZ		(0x05C)
+#define ISPCCDC_FMT_VERT		(0x060)
+#define ISPCCDC_FMT_ADDR0		(0x064)
+#define ISPCCDC_FMT_ADDR1		(0x068)
+#define ISPCCDC_FMT_ADDR2		(0x06C)
+#define ISPCCDC_FMT_ADDR3		(0x070)
+#define ISPCCDC_FMT_ADDR4		(0x074)
+#define ISPCCDC_FMT_ADDR5		(0x078)
+#define ISPCCDC_FMT_ADDR6		(0x07C)
+#define ISPCCDC_FMT_ADDR7		(0x080)
+#define ISPCCDC_PRGEVEN0		(0x084)
+#define ISPCCDC_PRGEVEN1		(0x088)
+#define ISPCCDC_PRGODD0			(0x08C)
+#define ISPCCDC_PRGODD1			(0x090)
+#define ISPCCDC_VP_OUT			(0x094)
+
+#define ISPCCDC_LSC_CONFIG		(0x098)
+#define ISPCCDC_LSC_INITIAL		(0x09C)
+#define ISPCCDC_LSC_TABLE_BASE		(0x0A0)
+#define ISPCCDC_LSC_TABLE_OFFSET	(0x0A4)
+
+/* SBL */
+#define ISPSBL_PCR			0x4
+#define ISPSBL_PCR_H3A_AEAWB_WBL_OVF	(1 << 16)
+#define ISPSBL_PCR_H3A_AF_WBL_OVF	(1 << 17)
+#define ISPSBL_PCR_RSZ4_WBL_OVF		(1 << 18)
+#define ISPSBL_PCR_RSZ3_WBL_OVF		(1 << 19)
+#define ISPSBL_PCR_RSZ2_WBL_OVF		(1 << 20)
+#define ISPSBL_PCR_RSZ1_WBL_OVF		(1 << 21)
+#define ISPSBL_PCR_PRV_WBL_OVF		(1 << 22)
+#define ISPSBL_PCR_CCDC_WBL_OVF		(1 << 23)
+#define ISPSBL_PCR_CCDCPRV_2_RSZ_OVF	(1 << 24)
+#define ISPSBL_PCR_CSIA_WBL_OVF		(1 << 25)
+#define ISPSBL_PCR_CSIB_WBL_OVF		(1 << 26)
+#define ISPSBL_CCDC_WR_0		(0x028)
+#define ISPSBL_CCDC_WR_0_DATA_READY	(1 << 21)
+#define ISPSBL_CCDC_WR_1		(0x02C)
+#define ISPSBL_CCDC_WR_2		(0x030)
+#define ISPSBL_CCDC_WR_3		(0x034)
+
+#define ISPSBL_SDR_REQ_EXP		0xF8
+#define ISPSBL_SDR_REQ_HIST_EXP_SHIFT	0
+#define ISPSBL_SDR_REQ_HIST_EXP_MASK	(0x3FF)
+#define ISPSBL_SDR_REQ_RSZ_EXP_SHIFT	10
+#define ISPSBL_SDR_REQ_RSZ_EXP_MASK	(0x3FF << ISPSBL_SDR_REQ_RSZ_EXP_SHIFT)
+#define ISPSBL_SDR_REQ_PRV_EXP_SHIFT	20
+#define ISPSBL_SDR_REQ_PRV_EXP_MASK	(0x3FF << ISPSBL_SDR_REQ_PRV_EXP_SHIFT)
+
+/* Histogram registers */
+#define ISPHIST_PID			(0x000)
+#define ISPHIST_PCR			(0x004)
+#define ISPHIST_CNT			(0x008)
+#define ISPHIST_WB_GAIN			(0x00C)
+#define ISPHIST_R0_HORZ			(0x010)
+#define ISPHIST_R0_VERT			(0x014)
+#define ISPHIST_R1_HORZ			(0x018)
+#define ISPHIST_R1_VERT			(0x01C)
+#define ISPHIST_R2_HORZ			(0x020)
+#define ISPHIST_R2_VERT			(0x024)
+#define ISPHIST_R3_HORZ			(0x028)
+#define ISPHIST_R3_VERT			(0x02C)
+#define ISPHIST_ADDR			(0x030)
+#define ISPHIST_DATA			(0x034)
+#define ISPHIST_RADD			(0x038)
+#define ISPHIST_RADD_OFF		(0x03C)
+#define ISPHIST_H_V_INFO		(0x040)
+
+/* H3A module registers */
+#define ISPH3A_PID			(0x000)
+#define ISPH3A_PCR			(0x004)
+#define ISPH3A_AEWWIN1			(0x04C)
+#define ISPH3A_AEWINSTART		(0x050)
+#define ISPH3A_AEWINBLK			(0x054)
+#define ISPH3A_AEWSUBWIN		(0x058)
+#define ISPH3A_AEWBUFST			(0x05C)
+#define ISPH3A_AFPAX1			(0x008)
+#define ISPH3A_AFPAX2			(0x00C)
+#define ISPH3A_AFPAXSTART		(0x010)
+#define ISPH3A_AFIIRSH			(0x014)
+#define ISPH3A_AFBUFST			(0x018)
+#define ISPH3A_AFCOEF010		(0x01C)
+#define ISPH3A_AFCOEF032		(0x020)
+#define ISPH3A_AFCOEF054		(0x024)
+#define ISPH3A_AFCOEF076		(0x028)
+#define ISPH3A_AFCOEF098		(0x02C)
+#define ISPH3A_AFCOEF0010		(0x030)
+#define ISPH3A_AFCOEF110		(0x034)
+#define ISPH3A_AFCOEF132		(0x038)
+#define ISPH3A_AFCOEF154		(0x03C)
+#define ISPH3A_AFCOEF176		(0x040)
+#define ISPH3A_AFCOEF198		(0x044)
+#define ISPH3A_AFCOEF1010		(0x048)
+
+#define ISPPRV_PCR			(0x004)
+#define ISPPRV_HORZ_INFO		(0x008)
+#define ISPPRV_VERT_INFO		(0x00C)
+#define ISPPRV_RSDR_ADDR		(0x010)
+#define ISPPRV_RADR_OFFSET		(0x014)
+#define ISPPRV_DSDR_ADDR		(0x018)
+#define ISPPRV_DRKF_OFFSET		(0x01C)
+#define ISPPRV_WSDR_ADDR		(0x020)
+#define ISPPRV_WADD_OFFSET		(0x024)
+#define ISPPRV_AVE			(0x028)
+#define ISPPRV_HMED			(0x02C)
+#define ISPPRV_NF			(0x030)
+#define ISPPRV_WB_DGAIN			(0x034)
+#define ISPPRV_WBGAIN			(0x038)
+#define ISPPRV_WBSEL			(0x03C)
+#define ISPPRV_CFA			(0x040)
+#define ISPPRV_BLKADJOFF		(0x044)
+#define ISPPRV_RGB_MAT1			(0x048)
+#define ISPPRV_RGB_MAT2			(0x04C)
+#define ISPPRV_RGB_MAT3			(0x050)
+#define ISPPRV_RGB_MAT4			(0x054)
+#define ISPPRV_RGB_MAT5			(0x058)
+#define ISPPRV_RGB_OFF1			(0x05C)
+#define ISPPRV_RGB_OFF2			(0x060)
+#define ISPPRV_CSC0			(0x064)
+#define ISPPRV_CSC1			(0x068)
+#define ISPPRV_CSC2			(0x06C)
+#define ISPPRV_CSC_OFFSET		(0x070)
+#define ISPPRV_CNT_BRT			(0x074)
+#define ISPPRV_CSUP			(0x078)
+#define ISPPRV_SETUP_YC			(0x07C)
+#define ISPPRV_SET_TBL_ADDR		(0x080)
+#define ISPPRV_SET_TBL_DATA		(0x084)
+#define ISPPRV_CDC_THR0			(0x090)
+#define ISPPRV_CDC_THR1			(ISPPRV_CDC_THR0 + (0x4))
+#define ISPPRV_CDC_THR2			(ISPPRV_CDC_THR0 + (0x4) * 2)
+#define ISPPRV_CDC_THR3			(ISPPRV_CDC_THR0 + (0x4) * 3)
+
+#define ISPPRV_REDGAMMA_TABLE_ADDR	0x0000
+#define ISPPRV_GREENGAMMA_TABLE_ADDR	0x0400
+#define ISPPRV_BLUEGAMMA_TABLE_ADDR	0x0800
+#define ISPPRV_NF_TABLE_ADDR		0x0C00
+#define ISPPRV_YENH_TABLE_ADDR		0x1000
+#define ISPPRV_CFA_TABLE_ADDR		0x1400
+
+#define ISPRSZ_MIN_OUTPUT		64
+#define ISPRSZ_MAX_OUTPUT		3312
+
+/* Resizer module register offset */
+#define ISPRSZ_PID			(0x000)
+#define ISPRSZ_PCR			(0x004)
+#define ISPRSZ_CNT			(0x008)
+#define ISPRSZ_OUT_SIZE			(0x00C)
+#define ISPRSZ_IN_START			(0x010)
+#define ISPRSZ_IN_SIZE			(0x014)
+#define ISPRSZ_SDR_INADD		(0x018)
+#define ISPRSZ_SDR_INOFF		(0x01C)
+#define ISPRSZ_SDR_OUTADD		(0x020)
+#define ISPRSZ_SDR_OUTOFF		(0x024)
+#define ISPRSZ_HFILT10			(0x028)
+#define ISPRSZ_HFILT32			(0x02C)
+#define ISPRSZ_HFILT54			(0x030)
+#define ISPRSZ_HFILT76			(0x034)
+#define ISPRSZ_HFILT98			(0x038)
+#define ISPRSZ_HFILT1110		(0x03C)
+#define ISPRSZ_HFILT1312		(0x040)
+#define ISPRSZ_HFILT1514		(0x044)
+#define ISPRSZ_HFILT1716		(0x048)
+#define ISPRSZ_HFILT1918		(0x04C)
+#define ISPRSZ_HFILT2120		(0x050)
+#define ISPRSZ_HFILT2322		(0x054)
+#define ISPRSZ_HFILT2524		(0x058)
+#define ISPRSZ_HFILT2726		(0x05C)
+#define ISPRSZ_HFILT2928		(0x060)
+#define ISPRSZ_HFILT3130		(0x064)
+#define ISPRSZ_VFILT10			(0x068)
+#define ISPRSZ_VFILT32			(0x06C)
+#define ISPRSZ_VFILT54			(0x070)
+#define ISPRSZ_VFILT76			(0x074)
+#define ISPRSZ_VFILT98			(0x078)
+#define ISPRSZ_VFILT1110		(0x07C)
+#define ISPRSZ_VFILT1312		(0x080)
+#define ISPRSZ_VFILT1514		(0x084)
+#define ISPRSZ_VFILT1716		(0x088)
+#define ISPRSZ_VFILT1918		(0x08C)
+#define ISPRSZ_VFILT2120		(0x090)
+#define ISPRSZ_VFILT2322		(0x094)
+#define ISPRSZ_VFILT2524		(0x098)
+#define ISPRSZ_VFILT2726		(0x09C)
+#define ISPRSZ_VFILT2928		(0x0A0)
+#define ISPRSZ_VFILT3130		(0x0A4)
+#define ISPRSZ_YENH			(0x0A8)
+
+#define ISP_INT_CLR			0xFF113F11
+#define ISPPRV_PCR_EN			1
+#define ISPPRV_PCR_BUSY			(1 << 1)
+#define ISPPRV_PCR_SOURCE		(1 << 2)
+#define ISPPRV_PCR_ONESHOT		(1 << 3)
+#define ISPPRV_PCR_WIDTH		(1 << 4)
+#define ISPPRV_PCR_INVALAW		(1 << 5)
+#define ISPPRV_PCR_DRKFEN		(1 << 6)
+#define ISPPRV_PCR_DRKFCAP		(1 << 7)
+#define ISPPRV_PCR_HMEDEN		(1 << 8)
+#define ISPPRV_PCR_NFEN			(1 << 9)
+#define ISPPRV_PCR_CFAEN		(1 << 10)
+#define ISPPRV_PCR_CFAFMT_SHIFT		11
+#define ISPPRV_PCR_CFAFMT_MASK		0x7800
+#define ISPPRV_PCR_CFAFMT_BAYER		(0 << 11)
+#define ISPPRV_PCR_CFAFMT_SONYVGA	(1 << 11)
+#define ISPPRV_PCR_CFAFMT_RGBFOVEON	(2 << 11)
+#define ISPPRV_PCR_CFAFMT_DNSPL		(3 << 11)
+#define ISPPRV_PCR_CFAFMT_HONEYCOMB	(4 << 11)
+#define ISPPRV_PCR_CFAFMT_RRGGBBFOVEON	(5 << 11)
+#define ISPPRV_PCR_YNENHEN		(1 << 15)
+#define ISPPRV_PCR_SUPEN		(1 << 16)
+#define ISPPRV_PCR_YCPOS_SHIFT		17
+#define ISPPRV_PCR_YCPOS_YCrYCb		(0 << 17)
+#define ISPPRV_PCR_YCPOS_YCbYCr		(1 << 17)
+#define ISPPRV_PCR_YCPOS_CbYCrY		(2 << 17)
+#define ISPPRV_PCR_YCPOS_CrYCbY		(3 << 17)
+#define ISPPRV_PCR_RSZPORT		(1 << 19)
+#define ISPPRV_PCR_SDRPORT		(1 << 20)
+#define ISPPRV_PCR_SCOMP_EN		(1 << 21)
+#define ISPPRV_PCR_SCOMP_SFT_SHIFT	(22)
+#define ISPPRV_PCR_SCOMP_SFT_MASK	(7 << 22)
+#define ISPPRV_PCR_GAMMA_BYPASS		(1 << 26)
+#define ISPPRV_PCR_DCOREN		(1 << 27)
+#define ISPPRV_PCR_DCCOUP		(1 << 28)
+#define ISPPRV_PCR_DRK_FAIL		(1 << 31)
+
+#define ISPPRV_HORZ_INFO_EPH_SHIFT	0
+#define ISPPRV_HORZ_INFO_EPH_MASK	0x3fff
+#define ISPPRV_HORZ_INFO_SPH_SHIFT	16
+#define ISPPRV_HORZ_INFO_SPH_MASK	0x3fff0
+
+#define ISPPRV_VERT_INFO_ELV_SHIFT	0
+#define ISPPRV_VERT_INFO_ELV_MASK	0x3fff
+#define ISPPRV_VERT_INFO_SLV_SHIFT	16
+#define ISPPRV_VERT_INFO_SLV_MASK	0x3fff0
+
+#define ISPPRV_AVE_EVENDIST_SHIFT	2
+#define ISPPRV_AVE_EVENDIST_1		0x0
+#define ISPPRV_AVE_EVENDIST_2		0x1
+#define ISPPRV_AVE_EVENDIST_3		0x2
+#define ISPPRV_AVE_EVENDIST_4		0x3
+#define ISPPRV_AVE_ODDDIST_SHIFT	4
+#define ISPPRV_AVE_ODDDIST_1		0x0
+#define ISPPRV_AVE_ODDDIST_2		0x1
+#define ISPPRV_AVE_ODDDIST_3		0x2
+#define ISPPRV_AVE_ODDDIST_4		0x3
+
+#define ISPPRV_HMED_THRESHOLD_SHIFT	0
+#define ISPPRV_HMED_EVENDIST		(1 << 8)
+#define ISPPRV_HMED_ODDDIST		(1 << 9)
+
+#define ISPPRV_WBGAIN_COEF0_SHIFT	0
+#define ISPPRV_WBGAIN_COEF1_SHIFT	8
+#define ISPPRV_WBGAIN_COEF2_SHIFT	16
+#define ISPPRV_WBGAIN_COEF3_SHIFT	24
+
+#define ISPPRV_WBSEL_COEF0		0x0
+#define ISPPRV_WBSEL_COEF1		0x1
+#define ISPPRV_WBSEL_COEF2		0x2
+#define ISPPRV_WBSEL_COEF3		0x3
+
+#define ISPPRV_WBSEL_N0_0_SHIFT		0
+#define ISPPRV_WBSEL_N0_1_SHIFT		2
+#define ISPPRV_WBSEL_N0_2_SHIFT		4
+#define ISPPRV_WBSEL_N0_3_SHIFT		6
+#define ISPPRV_WBSEL_N1_0_SHIFT		8
+#define ISPPRV_WBSEL_N1_1_SHIFT		10
+#define ISPPRV_WBSEL_N1_2_SHIFT		12
+#define ISPPRV_WBSEL_N1_3_SHIFT		14
+#define ISPPRV_WBSEL_N2_0_SHIFT		16
+#define ISPPRV_WBSEL_N2_1_SHIFT		18
+#define ISPPRV_WBSEL_N2_2_SHIFT		20
+#define ISPPRV_WBSEL_N2_3_SHIFT		22
+#define ISPPRV_WBSEL_N3_0_SHIFT		24
+#define ISPPRV_WBSEL_N3_1_SHIFT		26
+#define ISPPRV_WBSEL_N3_2_SHIFT		28
+#define ISPPRV_WBSEL_N3_3_SHIFT		30
+
+#define ISPPRV_CFA_GRADTH_HOR_SHIFT	0
+#define ISPPRV_CFA_GRADTH_VER_SHIFT	8
+
+#define ISPPRV_BLKADJOFF_B_SHIFT	0
+#define ISPPRV_BLKADJOFF_G_SHIFT	8
+#define ISPPRV_BLKADJOFF_R_SHIFT	16
+
+#define ISPPRV_RGB_MAT1_MTX_RR_SHIFT	0
+#define ISPPRV_RGB_MAT1_MTX_GR_SHIFT	16
+
+#define ISPPRV_RGB_MAT2_MTX_BR_SHIFT	0
+#define ISPPRV_RGB_MAT2_MTX_RG_SHIFT	16
+
+#define ISPPRV_RGB_MAT3_MTX_GG_SHIFT	0
+#define ISPPRV_RGB_MAT3_MTX_BG_SHIFT	16
+
+#define ISPPRV_RGB_MAT4_MTX_RB_SHIFT	0
+#define ISPPRV_RGB_MAT4_MTX_GB_SHIFT	16
+
+#define ISPPRV_RGB_MAT5_MTX_BB_SHIFT	0
+
+#define ISPPRV_RGB_OFF1_MTX_OFFG_SHIFT	0
+#define ISPPRV_RGB_OFF1_MTX_OFFR_SHIFT	16
+
+#define ISPPRV_RGB_OFF2_MTX_OFFB_SHIFT	0
+
+#define ISPPRV_CSC0_RY_SHIFT		0
+#define ISPPRV_CSC0_GY_SHIFT		10
+#define ISPPRV_CSC0_BY_SHIFT		20
+
+#define ISPPRV_CSC1_RCB_SHIFT		0
+#define ISPPRV_CSC1_GCB_SHIFT		10
+#define ISPPRV_CSC1_BCB_SHIFT		20
+
+#define ISPPRV_CSC2_RCR_SHIFT		0
+#define ISPPRV_CSC2_GCR_SHIFT		10
+#define ISPPRV_CSC2_BCR_SHIFT		20
+
+#define ISPPRV_CSC_OFFSET_CR_SHIFT	0
+#define ISPPRV_CSC_OFFSET_CB_SHIFT	8
+#define ISPPRV_CSC_OFFSET_Y_SHIFT	16
+
+#define ISPPRV_CNT_BRT_BRT_SHIFT	0
+#define ISPPRV_CNT_BRT_CNT_SHIFT	8
+
+#define ISPPRV_CONTRAST_MAX		0x10
+#define ISPPRV_CONTRAST_MIN		0xFF
+#define ISPPRV_BRIGHT_MIN		0x00
+#define ISPPRV_BRIGHT_MAX		0xFF
+
+#define ISPPRV_CSUP_CSUPG_SHIFT		0
+#define ISPPRV_CSUP_THRES_SHIFT		8
+#define ISPPRV_CSUP_HPYF_SHIFT		16
+
+#define ISPPRV_SETUP_YC_MINC_SHIFT	0
+#define ISPPRV_SETUP_YC_MAXC_SHIFT	8
+#define ISPPRV_SETUP_YC_MINY_SHIFT	16
+#define ISPPRV_SETUP_YC_MAXY_SHIFT	24
+#define ISPPRV_YC_MAX			0xFF
+#define ISPPRV_YC_MIN			0x0
+
+/* Define bit fields within selected registers */
+#define ISP_REVISION_SHIFT			0
+
+#define ISP_SYSCONFIG_AUTOIDLE			(1 << 0)
+#define ISP_SYSCONFIG_SOFTRESET			(1 << 1)
+#define ISP_SYSCONFIG_MIDLEMODE_SHIFT		12
+#define ISP_SYSCONFIG_MIDLEMODE_FORCESTANDBY	0x0
+#define ISP_SYSCONFIG_MIDLEMODE_NOSTANBY	0x1
+#define ISP_SYSCONFIG_MIDLEMODE_SMARTSTANDBY	0x2
+
+#define ISP_SYSSTATUS_RESETDONE			0
+
+#define IRQ0ENABLE_CSIA_IRQ			(1 << 0)
+#define IRQ0ENABLE_CSIC_IRQ			(1 << 1)
+#define IRQ0ENABLE_CCP2_LCM_IRQ			(1 << 3)
+#define IRQ0ENABLE_CCP2_LC0_IRQ			(1 << 4)
+#define IRQ0ENABLE_CCP2_LC1_IRQ			(1 << 5)
+#define IRQ0ENABLE_CCP2_LC2_IRQ			(1 << 6)
+#define IRQ0ENABLE_CCP2_LC3_IRQ			(1 << 7)
+#define IRQ0ENABLE_CSIB_IRQ			(IRQ0ENABLE_CCP2_LCM_IRQ | \
+						IRQ0ENABLE_CCP2_LC0_IRQ | \
+						IRQ0ENABLE_CCP2_LC1_IRQ | \
+						IRQ0ENABLE_CCP2_LC2_IRQ | \
+						IRQ0ENABLE_CCP2_LC3_IRQ)
+
+#define IRQ0ENABLE_CCDC_VD0_IRQ			(1 << 8)
+#define IRQ0ENABLE_CCDC_VD1_IRQ			(1 << 9)
+#define IRQ0ENABLE_CCDC_VD2_IRQ			(1 << 10)
+#define IRQ0ENABLE_CCDC_ERR_IRQ			(1 << 11)
+#define IRQ0ENABLE_H3A_AF_DONE_IRQ		(1 << 12)
+#define IRQ0ENABLE_H3A_AWB_DONE_IRQ		(1 << 13)
+#define IRQ0ENABLE_HIST_DONE_IRQ		(1 << 16)
+#define IRQ0ENABLE_CCDC_LSC_DONE_IRQ		(1 << 17)
+#define IRQ0ENABLE_CCDC_LSC_PREF_COMP_IRQ	(1 << 18)
+#define IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ	(1 << 19)
+#define IRQ0ENABLE_PRV_DONE_IRQ			(1 << 20)
+#define IRQ0ENABLE_RSZ_DONE_IRQ			(1 << 24)
+#define IRQ0ENABLE_OVF_IRQ			(1 << 25)
+#define IRQ0ENABLE_PING_IRQ			(1 << 26)
+#define IRQ0ENABLE_PONG_IRQ			(1 << 27)
+#define IRQ0ENABLE_MMU_ERR_IRQ			(1 << 28)
+#define IRQ0ENABLE_OCP_ERR_IRQ			(1 << 29)
+#define IRQ0ENABLE_SEC_ERR_IRQ			(1 << 30)
+#define IRQ0ENABLE_HS_VS_IRQ			(1 << 31)
+
+#define IRQ0STATUS_CSIA_IRQ			(1 << 0)
+#define IRQ0STATUS_CSI2C_IRQ			(1 << 1)
+#define IRQ0STATUS_CCP2_LCM_IRQ			(1 << 3)
+#define IRQ0STATUS_CCP2_LC0_IRQ			(1 << 4)
+#define IRQ0STATUS_CSIB_IRQ			(IRQ0STATUS_CCP2_LCM_IRQ | \
+						IRQ0STATUS_CCP2_LC0_IRQ)
+
+#define IRQ0STATUS_CSIB_LC1_IRQ			(1 << 5)
+#define IRQ0STATUS_CSIB_LC2_IRQ			(1 << 6)
+#define IRQ0STATUS_CSIB_LC3_IRQ			(1 << 7)
+#define IRQ0STATUS_CCDC_VD0_IRQ			(1 << 8)
+#define IRQ0STATUS_CCDC_VD1_IRQ			(1 << 9)
+#define IRQ0STATUS_CCDC_VD2_IRQ			(1 << 10)
+#define IRQ0STATUS_CCDC_ERR_IRQ			(1 << 11)
+#define IRQ0STATUS_H3A_AF_DONE_IRQ		(1 << 12)
+#define IRQ0STATUS_H3A_AWB_DONE_IRQ		(1 << 13)
+#define IRQ0STATUS_HIST_DONE_IRQ		(1 << 16)
+#define IRQ0STATUS_CCDC_LSC_DONE_IRQ		(1 << 17)
+#define IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ	(1 << 18)
+#define IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ	(1 << 19)
+#define IRQ0STATUS_PRV_DONE_IRQ			(1 << 20)
+#define IRQ0STATUS_RSZ_DONE_IRQ			(1 << 24)
+#define IRQ0STATUS_OVF_IRQ			(1 << 25)
+#define IRQ0STATUS_PING_IRQ			(1 << 26)
+#define IRQ0STATUS_PONG_IRQ			(1 << 27)
+#define IRQ0STATUS_MMU_ERR_IRQ			(1 << 28)
+#define IRQ0STATUS_OCP_ERR_IRQ			(1 << 29)
+#define IRQ0STATUS_SEC_ERR_IRQ			(1 << 30)
+#define IRQ0STATUS_HS_VS_IRQ			(1 << 31)
+
+#define TCTRL_GRESET_LEN			0
+
+#define TCTRL_PSTRB_REPLAY_DELAY		0
+#define TCTRL_PSTRB_REPLAY_COUNTER_SHIFT	25
+
+#define ISPCTRL_PAR_SER_CLK_SEL_PARALLEL	0x0
+#define ISPCTRL_PAR_SER_CLK_SEL_CSIA		0x1
+#define ISPCTRL_PAR_SER_CLK_SEL_CSIB		0x2
+#define ISPCTRL_PAR_SER_CLK_SEL_CSIC		0x3
+#define ISPCTRL_PAR_SER_CLK_SEL_MASK		0x3
+
+#define ISPCTRL_PAR_BRIDGE_SHIFT		2
+#define ISPCTRL_PAR_BRIDGE_DISABLE		(0x0 << 2)
+#define ISPCTRL_PAR_BRIDGE_LENDIAN		(0x2 << 2)
+#define ISPCTRL_PAR_BRIDGE_BENDIAN		(0x3 << 2)
+#define ISPCTRL_PAR_BRIDGE_MASK			(0x3 << 2)
+
+#define ISPCTRL_PAR_CLK_POL_SHIFT		4
+#define ISPCTRL_PAR_CLK_POL_INV			(1 << 4)
+#define ISPCTRL_PING_PONG_EN			(1 << 5)
+#define ISPCTRL_SHIFT_SHIFT			6
+#define ISPCTRL_SHIFT_0				(0x0 << 6)
+#define ISPCTRL_SHIFT_2				(0x1 << 6)
+#define ISPCTRL_SHIFT_4				(0x2 << 6)
+#define ISPCTRL_SHIFT_MASK			(0x3 << 6)
+
+#define ISPCTRL_CCDC_CLK_EN			(1 << 8)
+#define ISPCTRL_SCMP_CLK_EN			(1 << 9)
+#define ISPCTRL_H3A_CLK_EN			(1 << 10)
+#define ISPCTRL_HIST_CLK_EN			(1 << 11)
+#define ISPCTRL_PREV_CLK_EN			(1 << 12)
+#define ISPCTRL_RSZ_CLK_EN			(1 << 13)
+#define ISPCTRL_SYNC_DETECT_SHIFT		14
+#define ISPCTRL_SYNC_DETECT_HSFALL	(0x0 << ISPCTRL_SYNC_DETECT_SHIFT)
+#define ISPCTRL_SYNC_DETECT_HSRISE	(0x1 << ISPCTRL_SYNC_DETECT_SHIFT)
+#define ISPCTRL_SYNC_DETECT_VSFALL	(0x2 << ISPCTRL_SYNC_DETECT_SHIFT)
+#define ISPCTRL_SYNC_DETECT_VSRISE	(0x3 << ISPCTRL_SYNC_DETECT_SHIFT)
+#define ISPCTRL_SYNC_DETECT_MASK	(0x3 << ISPCTRL_SYNC_DETECT_SHIFT)
+
+#define ISPCTRL_CCDC_RAM_EN		(1 << 16)
+#define ISPCTRL_PREV_RAM_EN		(1 << 17)
+#define ISPCTRL_SBL_RD_RAM_EN		(1 << 18)
+#define ISPCTRL_SBL_WR1_RAM_EN		(1 << 19)
+#define ISPCTRL_SBL_WR0_RAM_EN		(1 << 20)
+#define ISPCTRL_SBL_AUTOIDLE		(1 << 21)
+#define ISPCTRL_SBL_SHARED_WPORTC	(1 << 26)
+#define ISPCTRL_SBL_SHARED_RPORTA	(1 << 27)
+#define ISPCTRL_SBL_SHARED_RPORTB	(1 << 28)
+#define ISPCTRL_JPEG_FLUSH		(1 << 30)
+#define ISPCTRL_CCDC_FLUSH		(1 << 31)
+
+#define ISPSECURE_SECUREMODE		0
+
+#define ISPTCTRL_CTRL_DIV_LOW		0x0
+#define ISPTCTRL_CTRL_DIV_HIGH		0x1
+#define ISPTCTRL_CTRL_DIV_BYPASS	0x1F
+
+#define ISPTCTRL_CTRL_DIVA_SHIFT	0
+#define ISPTCTRL_CTRL_DIVA_MASK		(0x1F << ISPTCTRL_CTRL_DIVA_SHIFT)
+
+#define ISPTCTRL_CTRL_DIVB_SHIFT	5
+#define ISPTCTRL_CTRL_DIVB_MASK		(0x1F << ISPTCTRL_CTRL_DIVB_SHIFT)
+
+#define ISPTCTRL_CTRL_DIVC_SHIFT	10
+#define ISPTCTRL_CTRL_DIVC_NOCLOCK	(0x0 << 10)
+
+#define ISPTCTRL_CTRL_SHUTEN		(1 << 21)
+#define ISPTCTRL_CTRL_PSTRBEN		(1 << 22)
+#define ISPTCTRL_CTRL_STRBEN		(1 << 23)
+#define ISPTCTRL_CTRL_SHUTPOL		(1 << 24)
+#define ISPTCTRL_CTRL_STRBPSTRBPOL	(1 << 26)
+
+#define ISPTCTRL_CTRL_INSEL_SHIFT	27
+#define ISPTCTRL_CTRL_INSEL_PARALLEL	(0x0 << 27)
+#define ISPTCTRL_CTRL_INSEL_CSIA	(0x1 << 27)
+#define ISPTCTRL_CTRL_INSEL_CSIB	(0x2 << 27)
+
+#define ISPTCTRL_CTRL_GRESETEn		(1 << 29)
+#define ISPTCTRL_CTRL_GRESETPOL		(1 << 30)
+#define ISPTCTRL_CTRL_GRESETDIR		(1 << 31)
+
+#define ISPTCTRL_FRAME_SHUT_SHIFT		0
+#define ISPTCTRL_FRAME_PSTRB_SHIFT		6
+#define ISPTCTRL_FRAME_STRB_SHIFT		12
+
+#define ISPCCDC_PID_PREV_SHIFT			0
+#define ISPCCDC_PID_CID_SHIFT			8
+#define ISPCCDC_PID_TID_SHIFT			16
+
+#define ISPCCDC_PCR_EN				1
+#define ISPCCDC_PCR_BUSY			(1 << 1)
+
+#define ISPCCDC_SYN_MODE_VDHDOUT		0x1
+#define ISPCCDC_SYN_MODE_FLDOUT			(1 << 1)
+#define ISPCCDC_SYN_MODE_VDPOL			(1 << 2)
+#define ISPCCDC_SYN_MODE_HDPOL			(1 << 3)
+#define ISPCCDC_SYN_MODE_FLDPOL			(1 << 4)
+#define ISPCCDC_SYN_MODE_EXWEN			(1 << 5)
+#define ISPCCDC_SYN_MODE_DATAPOL		(1 << 6)
+#define ISPCCDC_SYN_MODE_FLDMODE		(1 << 7)
+#define ISPCCDC_SYN_MODE_DATSIZ_MASK		(0x7 << 8)
+#define ISPCCDC_SYN_MODE_DATSIZ_8_16		(0x0 << 8)
+#define ISPCCDC_SYN_MODE_DATSIZ_12		(0x4 << 8)
+#define ISPCCDC_SYN_MODE_DATSIZ_11		(0x5 << 8)
+#define ISPCCDC_SYN_MODE_DATSIZ_10		(0x6 << 8)
+#define ISPCCDC_SYN_MODE_DATSIZ_8		(0x7 << 8)
+#define ISPCCDC_SYN_MODE_PACK8			(1 << 11)
+#define ISPCCDC_SYN_MODE_INPMOD_MASK		(3 << 12)
+#define ISPCCDC_SYN_MODE_INPMOD_RAW		(0 << 12)
+#define ISPCCDC_SYN_MODE_INPMOD_YCBCR16		(1 << 12)
+#define ISPCCDC_SYN_MODE_INPMOD_YCBCR8		(2 << 12)
+#define ISPCCDC_SYN_MODE_LPF			(1 << 14)
+#define ISPCCDC_SYN_MODE_FLDSTAT		(1 << 15)
+#define ISPCCDC_SYN_MODE_VDHDEN			(1 << 16)
+#define ISPCCDC_SYN_MODE_WEN			(1 << 17)
+#define ISPCCDC_SYN_MODE_VP2SDR			(1 << 18)
+#define ISPCCDC_SYN_MODE_SDR2RSZ		(1 << 19)
+
+#define ISPCCDC_HD_VD_WID_VDW_SHIFT		0
+#define ISPCCDC_HD_VD_WID_HDW_SHIFT		16
+
+#define ISPCCDC_PIX_LINES_HLPRF_SHIFT		0
+#define ISPCCDC_PIX_LINES_PPLN_SHIFT		16
+
+#define ISPCCDC_HORZ_INFO_NPH_SHIFT		0
+#define ISPCCDC_HORZ_INFO_NPH_MASK		0x00007fff
+#define ISPCCDC_HORZ_INFO_SPH_SHIFT		16
+#define ISPCCDC_HORZ_INFO_SPH_MASK		0x7fff0000
+
+#define ISPCCDC_VERT_START_SLV1_SHIFT		0
+#define ISPCCDC_VERT_START_SLV0_SHIFT		16
+#define ISPCCDC_VERT_START_SLV0_MASK		0x7fff0000
+
+#define ISPCCDC_VERT_LINES_NLV_SHIFT		0
+#define ISPCCDC_VERT_LINES_NLV_MASK		0x00007fff
+
+#define ISPCCDC_CULLING_CULV_SHIFT		0
+#define ISPCCDC_CULLING_CULHODD_SHIFT		16
+#define ISPCCDC_CULLING_CULHEVN_SHIFT		24
+
+#define ISPCCDC_HSIZE_OFF_SHIFT			0
+
+#define ISPCCDC_SDOFST_FINV			(1 << 14)
+#define ISPCCDC_SDOFST_FOFST_1L			0
+#define ISPCCDC_SDOFST_FOFST_4L			(3 << 12)
+#define ISPCCDC_SDOFST_LOFST3_SHIFT		0
+#define ISPCCDC_SDOFST_LOFST2_SHIFT		3
+#define ISPCCDC_SDOFST_LOFST1_SHIFT		6
+#define ISPCCDC_SDOFST_LOFST0_SHIFT		9
+#define EVENEVEN				1
+#define ODDEVEN					2
+#define EVENODD					3
+#define ODDODD					4
+
+#define ISPCCDC_CLAMP_OBGAIN_SHIFT		0
+#define ISPCCDC_CLAMP_OBST_SHIFT		10
+#define ISPCCDC_CLAMP_OBSLN_SHIFT		25
+#define ISPCCDC_CLAMP_OBSLEN_SHIFT		28
+#define ISPCCDC_CLAMP_CLAMPEN			(1 << 31)
+
+#define ISPCCDC_COLPTN_R_Ye			0x0
+#define ISPCCDC_COLPTN_Gr_Cy			0x1
+#define ISPCCDC_COLPTN_Gb_G			0x2
+#define ISPCCDC_COLPTN_B_Mg			0x3
+#define ISPCCDC_COLPTN_CP0PLC0_SHIFT		0
+#define ISPCCDC_COLPTN_CP0PLC1_SHIFT		2
+#define ISPCCDC_COLPTN_CP0PLC2_SHIFT		4
+#define ISPCCDC_COLPTN_CP0PLC3_SHIFT		6
+#define ISPCCDC_COLPTN_CP1PLC0_SHIFT		8
+#define ISPCCDC_COLPTN_CP1PLC1_SHIFT		10
+#define ISPCCDC_COLPTN_CP1PLC2_SHIFT		12
+#define ISPCCDC_COLPTN_CP1PLC3_SHIFT		14
+#define ISPCCDC_COLPTN_CP2PLC0_SHIFT		16
+#define ISPCCDC_COLPTN_CP2PLC1_SHIFT		18
+#define ISPCCDC_COLPTN_CP2PLC2_SHIFT		20
+#define ISPCCDC_COLPTN_CP2PLC3_SHIFT		22
+#define ISPCCDC_COLPTN_CP3PLC0_SHIFT		24
+#define ISPCCDC_COLPTN_CP3PLC1_SHIFT		26
+#define ISPCCDC_COLPTN_CP3PLC2_SHIFT		28
+#define ISPCCDC_COLPTN_CP3PLC3_SHIFT		30
+
+#define ISPCCDC_BLKCMP_B_MG_SHIFT		0
+#define ISPCCDC_BLKCMP_GB_G_SHIFT		8
+#define ISPCCDC_BLKCMP_GR_CY_SHIFT		16
+#define ISPCCDC_BLKCMP_R_YE_SHIFT		24
+
+#define ISPCCDC_FPC_FPNUM_SHIFT			0
+#define ISPCCDC_FPC_FPCEN			(1 << 15)
+#define ISPCCDC_FPC_FPERR			(1 << 16)
+
+#define ISPCCDC_VDINT_1_SHIFT			0
+#define ISPCCDC_VDINT_1_MASK			0x00007fff
+#define ISPCCDC_VDINT_0_SHIFT			16
+#define ISPCCDC_VDINT_0_MASK			0x7fff0000
+
+#define ISPCCDC_ALAW_GWDI_12_3			(0x3 << 0)
+#define ISPCCDC_ALAW_GWDI_11_2			(0x4 << 0)
+#define ISPCCDC_ALAW_GWDI_10_1			(0x5 << 0)
+#define ISPCCDC_ALAW_GWDI_9_0			(0x6 << 0)
+#define ISPCCDC_ALAW_CCDTBL			(1 << 3)
+
+#define ISPCCDC_REC656IF_R656ON			1
+#define ISPCCDC_REC656IF_ECCFVH			(1 << 1)
+
+#define ISPCCDC_CFG_BW656			(1 << 5)
+#define ISPCCDC_CFG_FIDMD_SHIFT			6
+#define ISPCCDC_CFG_WENLOG			(1 << 8)
+#define ISPCCDC_CFG_WENLOG_AND			(0 << 8)
+#define ISPCCDC_CFG_WENLOG_OR			(1 << 8)
+#define ISPCCDC_CFG_Y8POS			(1 << 11)
+#define ISPCCDC_CFG_BSWD			(1 << 12)
+#define ISPCCDC_CFG_MSBINVI			(1 << 13)
+#define ISPCCDC_CFG_VDLC			(1 << 15)
+
+#define ISPCCDC_FMTCFG_FMTEN			0x1
+#define ISPCCDC_FMTCFG_LNALT			(1 << 1)
+#define ISPCCDC_FMTCFG_LNUM_SHIFT		2
+#define ISPCCDC_FMTCFG_PLEN_ODD_SHIFT		4
+#define ISPCCDC_FMTCFG_PLEN_EVEN_SHIFT		8
+#define ISPCCDC_FMTCFG_VPIN_MASK		0x00007000
+#define ISPCCDC_FMTCFG_VPIN_12_3		(0x3 << 12)
+#define ISPCCDC_FMTCFG_VPIN_11_2		(0x4 << 12)
+#define ISPCCDC_FMTCFG_VPIN_10_1		(0x5 << 12)
+#define ISPCCDC_FMTCFG_VPIN_9_0			(0x6 << 12)
+#define ISPCCDC_FMTCFG_VPEN			(1 << 15)
+
+#define ISPCCDC_FMTCFG_VPIF_FRQ_MASK		0x003f0000
+#define ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT		16
+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY2		(0x0 << 16)
+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY3		(0x1 << 16)
+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY4		(0x2 << 16)
+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY5		(0x3 << 16)
+#define ISPCCDC_FMTCFG_VPIF_FRQ_BY6		(0x4 << 16)
+
+#define ISPCCDC_FMT_HORZ_FMTLNH_SHIFT		0
+#define ISPCCDC_FMT_HORZ_FMTSPH_SHIFT		16
+
+#define ISPCCDC_FMT_VERT_FMTLNV_SHIFT		0
+#define ISPCCDC_FMT_VERT_FMTSLV_SHIFT		16
+
+#define ISPCCDC_FMT_HORZ_FMTSPH_MASK		0x1fff0000
+#define ISPCCDC_FMT_HORZ_FMTLNH_MASK		0x00001fff
+
+#define ISPCCDC_FMT_VERT_FMTSLV_MASK		0x1fff0000
+#define ISPCCDC_FMT_VERT_FMTLNV_MASK		0x00001fff
+
+#define ISPCCDC_VP_OUT_HORZ_ST_SHIFT		0
+#define ISPCCDC_VP_OUT_HORZ_NUM_SHIFT		4
+#define ISPCCDC_VP_OUT_VERT_NUM_SHIFT		17
+
+#define ISPRSZ_PID_PREV_SHIFT			0
+#define ISPRSZ_PID_CID_SHIFT			8
+#define ISPRSZ_PID_TID_SHIFT			16
+
+#define ISPRSZ_PCR_ENABLE			(1 << 0)
+#define ISPRSZ_PCR_BUSY				(1 << 1)
+#define ISPRSZ_PCR_ONESHOT			(1 << 2)
+
+#define ISPRSZ_CNT_HRSZ_SHIFT			0
+#define ISPRSZ_CNT_HRSZ_MASK			\
+	(0x3FF << ISPRSZ_CNT_HRSZ_SHIFT)
+#define ISPRSZ_CNT_VRSZ_SHIFT			10
+#define ISPRSZ_CNT_VRSZ_MASK			\
+	(0x3FF << ISPRSZ_CNT_VRSZ_SHIFT)
+#define ISPRSZ_CNT_HSTPH_SHIFT			20
+#define ISPRSZ_CNT_HSTPH_MASK			(0x7 << ISPRSZ_CNT_HSTPH_SHIFT)
+#define ISPRSZ_CNT_VSTPH_SHIFT			23
+#define ISPRSZ_CNT_VSTPH_MASK			(0x7 << ISPRSZ_CNT_VSTPH_SHIFT)
+#define ISPRSZ_CNT_YCPOS			(1 << 26)
+#define ISPRSZ_CNT_INPTYP			(1 << 27)
+#define ISPRSZ_CNT_INPSRC			(1 << 28)
+#define ISPRSZ_CNT_CBILIN			(1 << 29)
+
+#define ISPRSZ_OUT_SIZE_HORZ_SHIFT		0
+#define ISPRSZ_OUT_SIZE_HORZ_MASK		\
+	(0xFFF << ISPRSZ_OUT_SIZE_HORZ_SHIFT)
+#define ISPRSZ_OUT_SIZE_VERT_SHIFT		16
+#define ISPRSZ_OUT_SIZE_VERT_MASK		\
+	(0xFFF << ISPRSZ_OUT_SIZE_VERT_SHIFT)
+
+#define ISPRSZ_IN_START_HORZ_ST_SHIFT		0
+#define ISPRSZ_IN_START_HORZ_ST_MASK		\
+	(0x1FFF << ISPRSZ_IN_START_HORZ_ST_SHIFT)
+#define ISPRSZ_IN_START_VERT_ST_SHIFT		16
+#define ISPRSZ_IN_START_VERT_ST_MASK		\
+	(0x1FFF << ISPRSZ_IN_START_VERT_ST_SHIFT)
+
+#define ISPRSZ_IN_SIZE_HORZ_SHIFT		0
+#define ISPRSZ_IN_SIZE_HORZ_MASK		\
+	(0x1FFF << ISPRSZ_IN_SIZE_HORZ_SHIFT)
+#define ISPRSZ_IN_SIZE_VERT_SHIFT		16
+#define ISPRSZ_IN_SIZE_VERT_MASK		\
+	(0x1FFF << ISPRSZ_IN_SIZE_VERT_SHIFT)
+
+#define ISPRSZ_SDR_INADD_ADDR_SHIFT		0
+#define ISPRSZ_SDR_INADD_ADDR_MASK		0xFFFFFFFF
+
+#define ISPRSZ_SDR_INOFF_OFFSET_SHIFT		0
+#define ISPRSZ_SDR_INOFF_OFFSET_MASK		\
+	(0xFFFF << ISPRSZ_SDR_INOFF_OFFSET_SHIFT)
+
+#define ISPRSZ_SDR_OUTADD_ADDR_SHIFT		0
+#define ISPRSZ_SDR_OUTADD_ADDR_MASK		0xFFFFFFFF
+
+
+#define ISPRSZ_SDR_OUTOFF_OFFSET_SHIFT		0
+#define ISPRSZ_SDR_OUTOFF_OFFSET_MASK		\
+	(0xFFFF << ISPRSZ_SDR_OUTOFF_OFFSET_SHIFT)
+
+#define ISPRSZ_HFILT_COEF0_SHIFT		0
+#define ISPRSZ_HFILT_COEF0_MASK			\
+	(0x3FF << ISPRSZ_HFILT_COEF0_SHIFT)
+#define ISPRSZ_HFILT_COEF1_SHIFT		16
+#define ISPRSZ_HFILT_COEF1_MASK			\
+	(0x3FF << ISPRSZ_HFILT_COEF1_SHIFT)
+
+#define ISPRSZ_HFILT32_COEF2_SHIFT		0
+#define ISPRSZ_HFILT32_COEF2_MASK		0x3FF
+#define ISPRSZ_HFILT32_COEF3_SHIFT		16
+#define ISPRSZ_HFILT32_COEF3_MASK		0x3FF0000
+
+#define ISPRSZ_HFILT54_COEF4_SHIFT		0
+#define ISPRSZ_HFILT54_COEF4_MASK		0x3FF
+#define ISPRSZ_HFILT54_COEF5_SHIFT		16
+#define ISPRSZ_HFILT54_COEF5_MASK		0x3FF0000
+
+#define ISPRSZ_HFILT76_COEFF6_SHIFT		0
+#define ISPRSZ_HFILT76_COEFF6_MASK		0x3FF
+#define ISPRSZ_HFILT76_COEFF7_SHIFT		16
+#define ISPRSZ_HFILT76_COEFF7_MASK		0x3FF0000
+
+#define ISPRSZ_HFILT98_COEFF8_SHIFT		0
+#define ISPRSZ_HFILT98_COEFF8_MASK		0x3FF
+#define ISPRSZ_HFILT98_COEFF9_SHIFT		16
+#define ISPRSZ_HFILT98_COEFF9_MASK		0x3FF0000
+
+#define ISPRSZ_HFILT1110_COEF10_SHIFT		0
+#define ISPRSZ_HFILT1110_COEF10_MASK		0x3FF
+#define ISPRSZ_HFILT1110_COEF11_SHIFT		16
+#define ISPRSZ_HFILT1110_COEF11_MASK		0x3FF0000
+
+#define ISPRSZ_HFILT1312_COEFF12_SHIFT		0
+#define ISPRSZ_HFILT1312_COEFF12_MASK		0x3FF
+#define ISPRSZ_HFILT1312_COEFF13_SHIFT		16
+#define ISPRSZ_HFILT1312_COEFF13_MASK		0x3FF0000
+
+#define ISPRSZ_HFILT1514_COEFF14_SHIFT		0
+#define ISPRSZ_HFILT1514_COEFF14_MASK		0x3FF
+#define ISPRSZ_HFILT1514_COEFF15_SHIFT		16
+#define ISPRSZ_HFILT1514_COEFF15_MASK		0x3FF0000
+
+#define ISPRSZ_HFILT1716_COEF16_SHIFT		0
+#define ISPRSZ_HFILT1716_COEF16_MASK		0x3FF
+#define ISPRSZ_HFILT1716_COEF17_SHIFT		16
+#define ISPRSZ_HFILT1716_COEF17_MASK		0x3FF0000
+
+#define ISPRSZ_HFILT1918_COEF18_SHIFT		0
+#define ISPRSZ_HFILT1918_COEF18_MASK		0x3FF
+#define ISPRSZ_HFILT1918_COEF19_SHIFT		16
+#define ISPRSZ_HFILT1918_COEF19_MASK		0x3FF0000
+
+#define ISPRSZ_HFILT2120_COEF20_SHIFT		0
+#define ISPRSZ_HFILT2120_COEF20_MASK		0x3FF
+#define ISPRSZ_HFILT2120_COEF21_SHIFT		16
+#define ISPRSZ_HFILT2120_COEF21_MASK		0x3FF0000
+
+#define ISPRSZ_HFILT2322_COEF22_SHIFT		0
+#define ISPRSZ_HFILT2322_COEF22_MASK		0x3FF
+#define ISPRSZ_HFILT2322_COEF23_SHIFT		16
+#define ISPRSZ_HFILT2322_COEF23_MASK		0x3FF0000
+
+#define ISPRSZ_HFILT2524_COEF24_SHIFT		0
+#define ISPRSZ_HFILT2524_COEF24_MASK		0x3FF
+#define ISPRSZ_HFILT2524_COEF25_SHIFT		16
+#define ISPRSZ_HFILT2524_COEF25_MASK		0x3FF0000
+
+#define ISPRSZ_HFILT2726_COEF26_SHIFT		0
+#define ISPRSZ_HFILT2726_COEF26_MASK		0x3FF
+#define ISPRSZ_HFILT2726_COEF27_SHIFT		16
+#define ISPRSZ_HFILT2726_COEF27_MASK		0x3FF0000
+
+#define ISPRSZ_HFILT2928_COEF28_SHIFT		0
+#define ISPRSZ_HFILT2928_COEF28_MASK		0x3FF
+#define ISPRSZ_HFILT2928_COEF29_SHIFT		16
+#define ISPRSZ_HFILT2928_COEF29_MASK		0x3FF0000
+
+#define ISPRSZ_HFILT3130_COEF30_SHIFT		0
+#define ISPRSZ_HFILT3130_COEF30_MASK		0x3FF
+#define ISPRSZ_HFILT3130_COEF31_SHIFT		16
+#define ISPRSZ_HFILT3130_COEF31_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT_COEF0_SHIFT		0
+#define ISPRSZ_VFILT_COEF0_MASK			\
+	(0x3FF << ISPRSZ_VFILT_COEF0_SHIFT)
+#define ISPRSZ_VFILT_COEF1_SHIFT		16
+#define ISPRSZ_VFILT_COEF1_MASK			\
+	(0x3FF << ISPRSZ_VFILT_COEF1_SHIFT)
+
+#define ISPRSZ_VFILT10_COEF0_SHIFT		0
+#define ISPRSZ_VFILT10_COEF0_MASK		0x3FF
+#define ISPRSZ_VFILT10_COEF1_SHIFT		16
+#define ISPRSZ_VFILT10_COEF1_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT32_COEF2_SHIFT		0
+#define ISPRSZ_VFILT32_COEF2_MASK		0x3FF
+#define ISPRSZ_VFILT32_COEF3_SHIFT		16
+#define ISPRSZ_VFILT32_COEF3_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT54_COEF4_SHIFT		0
+#define ISPRSZ_VFILT54_COEF4_MASK		0x3FF
+#define ISPRSZ_VFILT54_COEF5_SHIFT		16
+#define ISPRSZ_VFILT54_COEF5_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT76_COEFF6_SHIFT		0
+#define ISPRSZ_VFILT76_COEFF6_MASK		0x3FF
+#define ISPRSZ_VFILT76_COEFF7_SHIFT		16
+#define ISPRSZ_VFILT76_COEFF7_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT98_COEFF8_SHIFT		0
+#define ISPRSZ_VFILT98_COEFF8_MASK		0x3FF
+#define ISPRSZ_VFILT98_COEFF9_SHIFT		16
+#define ISPRSZ_VFILT98_COEFF9_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT1110_COEF10_SHIFT		0
+#define ISPRSZ_VFILT1110_COEF10_MASK		0x3FF
+#define ISPRSZ_VFILT1110_COEF11_SHIFT		16
+#define ISPRSZ_VFILT1110_COEF11_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT1312_COEFF12_SHIFT		0
+#define ISPRSZ_VFILT1312_COEFF12_MASK		0x3FF
+#define ISPRSZ_VFILT1312_COEFF13_SHIFT		16
+#define ISPRSZ_VFILT1312_COEFF13_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT1514_COEFF14_SHIFT		0
+#define ISPRSZ_VFILT1514_COEFF14_MASK		0x3FF
+#define ISPRSZ_VFILT1514_COEFF15_SHIFT		16
+#define ISPRSZ_VFILT1514_COEFF15_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT1716_COEF16_SHIFT		0
+#define ISPRSZ_VFILT1716_COEF16_MASK		0x3FF
+#define ISPRSZ_VFILT1716_COEF17_SHIFT		16
+#define ISPRSZ_VFILT1716_COEF17_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT1918_COEF18_SHIFT		0
+#define ISPRSZ_VFILT1918_COEF18_MASK		0x3FF
+#define ISPRSZ_VFILT1918_COEF19_SHIFT		16
+#define ISPRSZ_VFILT1918_COEF19_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT2120_COEF20_SHIFT		0
+#define ISPRSZ_VFILT2120_COEF20_MASK		0x3FF
+#define ISPRSZ_VFILT2120_COEF21_SHIFT		16
+#define ISPRSZ_VFILT2120_COEF21_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT2322_COEF22_SHIFT		0
+#define ISPRSZ_VFILT2322_COEF22_MASK		0x3FF
+#define ISPRSZ_VFILT2322_COEF23_SHIFT		16
+#define ISPRSZ_VFILT2322_COEF23_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT2524_COEF24_SHIFT		0
+#define ISPRSZ_VFILT2524_COEF24_MASK		0x3FF
+#define ISPRSZ_VFILT2524_COEF25_SHIFT		16
+#define ISPRSZ_VFILT2524_COEF25_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT2726_COEF26_SHIFT		0
+#define ISPRSZ_VFILT2726_COEF26_MASK		0x3FF
+#define ISPRSZ_VFILT2726_COEF27_SHIFT		16
+#define ISPRSZ_VFILT2726_COEF27_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT2928_COEF28_SHIFT		0
+#define ISPRSZ_VFILT2928_COEF28_MASK		0x3FF
+#define ISPRSZ_VFILT2928_COEF29_SHIFT		16
+#define ISPRSZ_VFILT2928_COEF29_MASK		0x3FF0000
+
+#define ISPRSZ_VFILT3130_COEF30_SHIFT		0
+#define ISPRSZ_VFILT3130_COEF30_MASK		0x3FF
+#define ISPRSZ_VFILT3130_COEF31_SHIFT		16
+#define ISPRSZ_VFILT3130_COEF31_MASK		0x3FF0000
+
+#define ISPRSZ_YENH_CORE_SHIFT			0
+#define ISPRSZ_YENH_CORE_MASK			\
+	(0xFF << ISPRSZ_YENH_CORE_SHIFT)
+#define ISPRSZ_YENH_SLOP_SHIFT			8
+#define ISPRSZ_YENH_SLOP_MASK			\
+	(0xF << ISPRSZ_YENH_SLOP_SHIFT)
+#define ISPRSZ_YENH_GAIN_SHIFT			12
+#define ISPRSZ_YENH_GAIN_MASK			\
+	(0xF << ISPRSZ_YENH_GAIN_SHIFT)
+#define ISPRSZ_YENH_ALGO_SHIFT			16
+#define ISPRSZ_YENH_ALGO_MASK			\
+	(0x3 << ISPRSZ_YENH_ALGO_SHIFT)
+
+#define ISPH3A_PCR_AEW_ALAW_EN_SHIFT		1
+#define ISPH3A_PCR_AF_MED_TH_SHIFT		3
+#define ISPH3A_PCR_AF_RGBPOS_SHIFT		11
+#define ISPH3A_PCR_AEW_AVE2LMT_SHIFT		22
+#define ISPH3A_PCR_AEW_AVE2LMT_MASK		0xFFC00000
+#define ISPH3A_PCR_BUSYAF			(1 << 15)
+#define ISPH3A_PCR_BUSYAEAWB			(1 << 18)
+
+#define ISPH3A_AEWWIN1_WINHC_SHIFT		0
+#define ISPH3A_AEWWIN1_WINHC_MASK		0x3F
+#define ISPH3A_AEWWIN1_WINVC_SHIFT		6
+#define ISPH3A_AEWWIN1_WINVC_MASK		0x1FC0
+#define ISPH3A_AEWWIN1_WINW_SHIFT		13
+#define ISPH3A_AEWWIN1_WINW_MASK		0xFE000
+#define ISPH3A_AEWWIN1_WINH_SHIFT		24
+#define ISPH3A_AEWWIN1_WINH_MASK		0x7F000000
+
+#define ISPH3A_AEWINSTART_WINSH_SHIFT		0
+#define ISPH3A_AEWINSTART_WINSH_MASK		0x0FFF
+#define ISPH3A_AEWINSTART_WINSV_SHIFT		16
+#define ISPH3A_AEWINSTART_WINSV_MASK		0x0FFF0000
+
+#define ISPH3A_AEWINBLK_WINH_SHIFT		0
+#define ISPH3A_AEWINBLK_WINH_MASK		0x7F
+#define ISPH3A_AEWINBLK_WINSV_SHIFT		16
+#define ISPH3A_AEWINBLK_WINSV_MASK		0x0FFF0000
+
+#define ISPH3A_AEWSUBWIN_AEWINCH_SHIFT		0
+#define ISPH3A_AEWSUBWIN_AEWINCH_MASK		0x0F
+#define ISPH3A_AEWSUBWIN_AEWINCV_SHIFT		8
+#define ISPH3A_AEWSUBWIN_AEWINCV_MASK		0x0F00
+
+#define ISPHIST_PCR_ENABLE_SHIFT	0
+#define ISPHIST_PCR_ENABLE_MASK		0x01
+#define ISPHIST_PCR_ENABLE		(1 << ISPHIST_PCR_ENABLE_SHIFT)
+#define ISPHIST_PCR_BUSY		0x02
+
+#define ISPHIST_CNT_DATASIZE_SHIFT	8
+#define ISPHIST_CNT_DATASIZE_MASK	0x0100
+#define ISPHIST_CNT_CLEAR_SHIFT		7
+#define ISPHIST_CNT_CLEAR_MASK		0x080
+#define ISPHIST_CNT_CLEAR		(1 << ISPHIST_CNT_CLEAR_SHIFT)
+#define ISPHIST_CNT_CFA_SHIFT		6
+#define ISPHIST_CNT_CFA_MASK		0x040
+#define ISPHIST_CNT_BINS_SHIFT		4
+#define ISPHIST_CNT_BINS_MASK		0x030
+#define ISPHIST_CNT_SOURCE_SHIFT	3
+#define ISPHIST_CNT_SOURCE_MASK		0x08
+#define ISPHIST_CNT_SHIFT_SHIFT		0
+#define ISPHIST_CNT_SHIFT_MASK		0x07
+
+#define ISPHIST_WB_GAIN_WG00_SHIFT	24
+#define ISPHIST_WB_GAIN_WG00_MASK	0xFF000000
+#define ISPHIST_WB_GAIN_WG01_SHIFT	16
+#define ISPHIST_WB_GAIN_WG01_MASK	0xFF0000
+#define ISPHIST_WB_GAIN_WG02_SHIFT	8
+#define ISPHIST_WB_GAIN_WG02_MASK	0xFF00
+#define ISPHIST_WB_GAIN_WG03_SHIFT	0
+#define ISPHIST_WB_GAIN_WG03_MASK	0xFF
+
+#define ISPHIST_REG_START_END_MASK		0x3FFF
+#define ISPHIST_REG_START_SHIFT			16
+#define ISPHIST_REG_END_SHIFT			0
+#define ISPHIST_REG_START_MASK			(ISPHIST_REG_START_END_MASK << \
+						 ISPHIST_REG_START_SHIFT)
+#define ISPHIST_REG_END_MASK			(ISPHIST_REG_START_END_MASK << \
+						 ISPHIST_REG_END_SHIFT)
+
+#define ISPHIST_REG_MASK			(ISPHIST_REG_START_MASK | \
+						 ISPHIST_REG_END_MASK)
+
+#define ISPHIST_ADDR_SHIFT			0
+#define ISPHIST_ADDR_MASK			0x3FF
+
+#define ISPHIST_DATA_SHIFT			0
+#define ISPHIST_DATA_MASK			0xFFFFF
+
+#define ISPHIST_RADD_SHIFT			0
+#define ISPHIST_RADD_MASK			0xFFFFFFFF
+
+#define ISPHIST_RADD_OFF_SHIFT			0
+#define ISPHIST_RADD_OFF_MASK			0xFFFF
+
+#define ISPHIST_HV_INFO_HSIZE_SHIFT		16
+#define ISPHIST_HV_INFO_HSIZE_MASK		0x3FFF0000
+#define ISPHIST_HV_INFO_VSIZE_SHIFT		0
+#define ISPHIST_HV_INFO_VSIZE_MASK		0x3FFF
+
+#define ISPHIST_HV_INFO_MASK			0x3FFF3FFF
+
+#define ISPCCDC_LSC_ENABLE			1
+#define ISPCCDC_LSC_BUSY			(1 << 7)
+#define ISPCCDC_LSC_GAIN_MODE_N_MASK		0x700
+#define ISPCCDC_LSC_GAIN_MODE_N_SHIFT		8
+#define ISPCCDC_LSC_GAIN_MODE_M_MASK		0x3800
+#define ISPCCDC_LSC_GAIN_MODE_M_SHIFT		12
+#define ISPCCDC_LSC_GAIN_FORMAT_MASK		0xE
+#define ISPCCDC_LSC_GAIN_FORMAT_SHIFT		1
+#define ISPCCDC_LSC_AFTER_REFORMATTER_MASK	(1<<6)
+
+#define ISPCCDC_LSC_INITIAL_X_MASK		0x3F
+#define ISPCCDC_LSC_INITIAL_X_SHIFT		0
+#define ISPCCDC_LSC_INITIAL_Y_MASK		0x3F0000
+#define ISPCCDC_LSC_INITIAL_Y_SHIFT		16
+
+/* -----------------------------------------------------------------------------
+ * CSI2 receiver registers (ES2.0)
+ */
+
+#define ISPCSI2_REVISION			(0x000)
+#define ISPCSI2_SYSCONFIG			(0x010)
+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT	12
+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK	\
+	(0x3 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_FORCE	\
+	(0x0 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_NO	\
+	(0x1 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
+#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SMART	\
+	(0x2 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
+#define ISPCSI2_SYSCONFIG_SOFT_RESET		(1 << 1)
+#define ISPCSI2_SYSCONFIG_AUTO_IDLE		(1 << 0)
+
+#define ISPCSI2_SYSSTATUS			(0x014)
+#define ISPCSI2_SYSSTATUS_RESET_DONE		(1 << 0)
+
+#define ISPCSI2_IRQSTATUS			(0x018)
+#define ISPCSI2_IRQSTATUS_OCP_ERR_IRQ		(1 << 14)
+#define ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ	(1 << 13)
+#define ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ	(1 << 12)
+#define ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ	(1 << 11)
+#define ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ	(1 << 10)
+#define ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ	(1 << 9)
+#define ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ		(1 << 8)
+#define ISPCSI2_IRQSTATUS_CONTEXT(n)		(1 << (n))
+
+#define ISPCSI2_IRQENABLE			(0x01c)
+#define ISPCSI2_CTRL				(0x040)
+#define ISPCSI2_CTRL_VP_CLK_EN			(1 << 15)
+#define ISPCSI2_CTRL_VP_ONLY_EN			(1 << 11)
+#define ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT		8
+#define ISPCSI2_CTRL_VP_OUT_CTRL_MASK		\
+	(3 << ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT)
+#define ISPCSI2_CTRL_DBG_EN			(1 << 7)
+#define ISPCSI2_CTRL_BURST_SIZE_SHIFT		5
+#define ISPCSI2_CTRL_BURST_SIZE_MASK		\
+	(3 << ISPCSI2_CTRL_BURST_SIZE_SHIFT)
+#define ISPCSI2_CTRL_FRAME			(1 << 3)
+#define ISPCSI2_CTRL_ECC_EN			(1 << 2)
+#define ISPCSI2_CTRL_SECURE			(1 << 1)
+#define ISPCSI2_CTRL_IF_EN			(1 << 0)
+
+#define ISPCSI2_DBG_H				(0x044)
+#define ISPCSI2_GNQ				(0x048)
+#define ISPCSI2_PHY_CFG				(0x050)
+#define ISPCSI2_PHY_CFG_RESET_CTRL		(1 << 30)
+#define ISPCSI2_PHY_CFG_RESET_DONE		(1 << 29)
+#define ISPCSI2_PHY_CFG_PWR_CMD_SHIFT		27
+#define ISPCSI2_PHY_CFG_PWR_CMD_MASK		\
+	(0x3 << ISPCSI2_PHY_CFG_PWR_CMD_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_CMD_OFF		\
+	(0x0 << ISPCSI2_PHY_CFG_PWR_CMD_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_CMD_ON		\
+	(0x1 << ISPCSI2_PHY_CFG_PWR_CMD_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_CMD_ULPW		\
+	(0x2 << ISPCSI2_PHY_CFG_PWR_CMD_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT	25
+#define ISPCSI2_PHY_CFG_PWR_STATUS_MASK		\
+	(0x3 << ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_STATUS_OFF		\
+	(0x0 << ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_STATUS_ON		\
+	(0x1 << ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_STATUS_ULPW		\
+	(0x2 << ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT)
+#define ISPCSI2_PHY_CFG_PWR_AUTO		(1 << 24)
+
+#define ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n)	(3 + ((n) * 4))
+#define ISPCSI2_PHY_CFG_DATA_POL_MASK(n)	\
+	(0x1 << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POL_PN(n)		\
+	(0x0 << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POL_NP(n)		\
+	(0x1 << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n))
+
+#define ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n)	((n) * 4)
+#define ISPCSI2_PHY_CFG_DATA_POSITION_MASK(n)	\
+	(0x7 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POSITION_NC(n)	\
+	(0x0 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POSITION_1(n)	\
+	(0x1 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POSITION_2(n)	\
+	(0x2 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POSITION_3(n)	\
+	(0x3 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POSITION_4(n)	\
+	(0x4 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
+#define ISPCSI2_PHY_CFG_DATA_POSITION_5(n)	\
+	(0x5 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n))
+
+#define ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT		3
+#define ISPCSI2_PHY_CFG_CLOCK_POL_MASK		\
+	(0x1 << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT)
+#define ISPCSI2_PHY_CFG_CLOCK_POL_PN		\
+	(0x0 << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT)
+#define ISPCSI2_PHY_CFG_CLOCK_POL_NP		\
+	(0x1 << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT)
+
+#define ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT	0
+#define ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK	\
+	(0x7 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
+#define ISPCSI2_PHY_CFG_CLOCK_POSITION_1	\
+	(0x1 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
+#define ISPCSI2_PHY_CFG_CLOCK_POSITION_2	\
+	(0x2 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
+#define ISPCSI2_PHY_CFG_CLOCK_POSITION_3	\
+	(0x3 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
+#define ISPCSI2_PHY_CFG_CLOCK_POSITION_4	\
+	(0x4 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
+#define ISPCSI2_PHY_CFG_CLOCK_POSITION_5	\
+	(0x5 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
+
+#define ISPCSI2_PHY_IRQSTATUS			(0x054)
+#define ISPCSI2_PHY_IRQSTATUS_STATEALLULPMEXIT	(1 << 26)
+#define ISPCSI2_PHY_IRQSTATUS_STATEALLULPMENTER	(1 << 25)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM5	(1 << 24)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM4	(1 << 23)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM3	(1 << 22)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM2	(1 << 21)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM1	(1 << 20)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL5	(1 << 19)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL4	(1 << 18)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL3	(1 << 17)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL2	(1 << 16)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL1	(1 << 15)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC5		(1 << 14)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC4		(1 << 13)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC3		(1 << 12)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC2		(1 << 11)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC1		(1 << 10)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS5	(1 << 9)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS4	(1 << 8)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS3	(1 << 7)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS2	(1 << 6)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS1	(1 << 5)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS5		(1 << 4)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS4		(1 << 3)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS3		(1 << 2)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS2		(1 << 1)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS1		1
+
+#define ISPCSI2_SHORT_PACKET			(0x05c)
+#define ISPCSI2_PHY_IRQENABLE			(0x060)
+#define ISPCSI2_PHY_IRQENABLE_STATEALLULPMEXIT	(1 << 26)
+#define ISPCSI2_PHY_IRQENABLE_STATEALLULPMENTER	(1 << 25)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM5	(1 << 24)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM4	(1 << 23)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM3	(1 << 22)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM2	(1 << 21)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM1	(1 << 20)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL5	(1 << 19)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL4	(1 << 18)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL3	(1 << 17)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL2	(1 << 16)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL1	(1 << 15)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC5		(1 << 14)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC4		(1 << 13)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC3		(1 << 12)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC2		(1 << 11)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC1		(1 << 10)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS5	(1 << 9)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS4	(1 << 8)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS3	(1 << 7)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS2	(1 << 6)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS1	(1 << 5)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS5		(1 << 4)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS4		(1 << 3)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS3		(1 << 2)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS2		(1 << 1)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS1		(1 << 0)
+
+#define ISPCSI2_DBG_P				(0x068)
+#define ISPCSI2_TIMING				(0x06c)
+#define ISPCSI2_TIMING_FORCE_RX_MODE_IO(n)	(1 << ((16 * ((n) - 1)) + 15))
+#define ISPCSI2_TIMING_STOP_STATE_X16_IO(n)	(1 << ((16 * ((n) - 1)) + 14))
+#define ISPCSI2_TIMING_STOP_STATE_X4_IO(n)	(1 << ((16 * ((n) - 1)) + 13))
+#define ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(n)	(16 * ((n) - 1))
+#define ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_MASK(n)	\
+	(0x1fff << ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(n))
+
+#define ISPCSI2_CTX_CTRL1(n)			((0x070) + 0x20 * (n))
+#define ISPCSI2_CTX_CTRL1_COUNT_SHIFT		8
+#define ISPCSI2_CTX_CTRL1_COUNT_MASK		\
+	(0xff << ISPCSI2_CTX_CTRL1_COUNT_SHIFT)
+#define ISPCSI2_CTX_CTRL1_EOF_EN		(1 << 7)
+#define ISPCSI2_CTX_CTRL1_EOL_EN		(1 << 6)
+#define ISPCSI2_CTX_CTRL1_CS_EN			(1 << 5)
+#define ISPCSI2_CTX_CTRL1_COUNT_UNLOCK		(1 << 4)
+#define ISPCSI2_CTX_CTRL1_PING_PONG		(1 << 3)
+#define ISPCSI2_CTX_CTRL1_CTX_EN		(1 << 0)
+
+#define ISPCSI2_CTX_CTRL2(n)			((0x074) + 0x20 * (n))
+#define ISPCSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT	13
+#define ISPCSI2_CTX_CTRL2_USER_DEF_MAP_MASK	\
+	(0x3 << ISPCSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT)
+#define ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT	11
+#define ISPCSI2_CTX_CTRL2_VIRTUAL_ID_MASK	\
+	(0x3 <<	ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT)
+#define ISPCSI2_CTX_CTRL2_DPCM_PRED		(1 << 10)
+#define ISPCSI2_CTX_CTRL2_FORMAT_SHIFT		0
+#define ISPCSI2_CTX_CTRL2_FORMAT_MASK		\
+	(0x3ff << ISPCSI2_CTX_CTRL2_FORMAT_SHIFT)
+#define ISPCSI2_CTX_CTRL2_FRAME_SHIFT		16
+#define ISPCSI2_CTX_CTRL2_FRAME_MASK		\
+	(0xffff << ISPCSI2_CTX_CTRL2_FRAME_SHIFT)
+
+#define ISPCSI2_CTX_DAT_OFST(n)			((0x078) + 0x20 * (n))
+#define ISPCSI2_CTX_DAT_OFST_OFST_SHIFT		0
+#define ISPCSI2_CTX_DAT_OFST_OFST_MASK		\
+	(0x1ffe0 << ISPCSI2_CTX_DAT_OFST_OFST_SHIFT)
+
+#define ISPCSI2_CTX_DAT_PING_ADDR(n)		((0x07c) + 0x20 * (n))
+#define ISPCSI2_CTX_DAT_PONG_ADDR(n)		((0x080) + 0x20 * (n))
+#define ISPCSI2_CTX_IRQENABLE(n)		((0x084) + 0x20 * (n))
+#define ISPCSI2_CTX_IRQENABLE_ECC_CORRECTION_IRQ	(1 << 8)
+#define ISPCSI2_CTX_IRQENABLE_LINE_NUMBER_IRQ	(1 << 7)
+#define ISPCSI2_CTX_IRQENABLE_FRAME_NUMBER_IRQ	(1 << 6)
+#define ISPCSI2_CTX_IRQENABLE_CS_IRQ		(1 << 5)
+#define ISPCSI2_CTX_IRQENABLE_LE_IRQ		(1 << 3)
+#define ISPCSI2_CTX_IRQENABLE_LS_IRQ		(1 << 2)
+#define ISPCSI2_CTX_IRQENABLE_FE_IRQ		(1 << 1)
+#define ISPCSI2_CTX_IRQENABLE_FS_IRQ		(1 << 0)
+
+#define ISPCSI2_CTX_IRQSTATUS(n)		((0x088) + 0x20 * (n))
+#define ISPCSI2_CTX_IRQSTATUS_ECC_CORRECTION_IRQ	(1 << 8)
+#define ISPCSI2_CTX_IRQSTATUS_LINE_NUMBER_IRQ	(1 << 7)
+#define ISPCSI2_CTX_IRQSTATUS_FRAME_NUMBER_IRQ	(1 << 6)
+#define ISPCSI2_CTX_IRQSTATUS_CS_IRQ		(1 << 5)
+#define ISPCSI2_CTX_IRQSTATUS_LE_IRQ		(1 << 3)
+#define ISPCSI2_CTX_IRQSTATUS_LS_IRQ		(1 << 2)
+#define ISPCSI2_CTX_IRQSTATUS_FE_IRQ		(1 << 1)
+#define ISPCSI2_CTX_IRQSTATUS_FS_IRQ		(1 << 0)
+
+#define ISPCSI2_CTX_CTRL3(n)			((0x08c) + 0x20 * (n))
+#define ISPCSI2_CTX_CTRL3_ALPHA_SHIFT		5
+#define ISPCSI2_CTX_CTRL3_ALPHA_MASK		\
+	(0x3fff << ISPCSI2_CTX_CTRL3_ALPHA_SHIFT)
+
+/* This instance is for OMAP3630 only */
+#define ISPCSI2_CTX_TRANSCODEH(n)		(0x000 + 0x8 * (n))
+#define ISPCSI2_CTX_TRANSCODEH_HCOUNT_SHIFT	16
+#define ISPCSI2_CTX_TRANSCODEH_HCOUNT_MASK	\
+	(0x1fff << ISPCSI2_CTX_TRANSCODEH_HCOUNT_SHIFT)
+#define ISPCSI2_CTX_TRANSCODEH_HSKIP_SHIFT	0
+#define ISPCSI2_CTX_TRANSCODEH_HSKIP_MASK	\
+	(0x1fff << ISPCSI2_CTX_TRANSCODEH_HCOUNT_SHIFT)
+#define ISPCSI2_CTX_TRANSCODEV(n)		(0x004 + 0x8 * (n))
+#define ISPCSI2_CTX_TRANSCODEV_VCOUNT_SHIFT	16
+#define ISPCSI2_CTX_TRANSCODEV_VCOUNT_MASK	\
+	(0x1fff << ISPCSI2_CTX_TRANSCODEV_VCOUNT_SHIFT)
+#define ISPCSI2_CTX_TRANSCODEV_VSKIP_SHIFT	0
+#define ISPCSI2_CTX_TRANSCODEV_VSKIP_MASK	\
+	(0x1fff << ISPCSI2_CTX_TRANSCODEV_VCOUNT_SHIFT)
+
+/* -----------------------------------------------------------------------------
+ * CSI PHY registers
+ */
+
+#define ISPCSIPHY_REG0				(0x000)
+#define ISPCSIPHY_REG0_THS_TERM_SHIFT		8
+#define ISPCSIPHY_REG0_THS_TERM_MASK		\
+	(0xff << ISPCSIPHY_REG0_THS_TERM_SHIFT)
+#define ISPCSIPHY_REG0_THS_SETTLE_SHIFT		0
+#define ISPCSIPHY_REG0_THS_SETTLE_MASK		\
+	(0xff << ISPCSIPHY_REG0_THS_SETTLE_SHIFT)
+
+#define ISPCSIPHY_REG1					(0x004)
+#define ISPCSIPHY_REG1_RESET_DONE_CTRLCLK		(1 << 29)
+/* This field is for OMAP3630 only */
+#define ISPCSIPHY_REG1_CLOCK_MISS_DETECTOR_STATUS	(1 << 25)
+#define ISPCSIPHY_REG1_TCLK_TERM_SHIFT			18
+#define ISPCSIPHY_REG1_TCLK_TERM_MASK			\
+	(0x7f << ISPCSIPHY_REG1_TCLK_TERM_SHIFT)
+#define ISPCSIPHY_REG1_DPHY_HS_SYNC_PATTERN_SHIFT	10
+#define ISPCSIPHY_REG1_DPHY_HS_SYNC_PATTERN_MASK	\
+	(0xff << ISPCSIPHY_REG1_DPHY_HS_SYNC_PATTERN)
+/* This field is for OMAP3430 only */
+#define ISPCSIPHY_REG1_TCLK_MISS_SHIFT			8
+#define ISPCSIPHY_REG1_TCLK_MISS_MASK			\
+	(0x3 << ISPCSIPHY_REG1_TCLK_MISS_SHIFT)
+/* This field is for OMAP3630 only */
+#define ISPCSIPHY_REG1_CTRLCLK_DIV_FACTOR_SHIFT		8
+#define ISPCSIPHY_REG1_CTRLCLK_DIV_FACTOR_MASK		\
+	(0x3 << ISPCSIPHY_REG1_CTRLCLK_DIV_FACTOR_SHIFT)
+#define ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT		0
+#define ISPCSIPHY_REG1_TCLK_SETTLE_MASK			\
+	(0xff << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT)
+
+/* This register is for OMAP3630 only */
+#define ISPCSIPHY_REG2					(0x008)
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC0_SHIFT	30
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC0_MASK	\
+	(0x3 << ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC0_SHIFT)
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC1_SHIFT	28
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC1_MASK	\
+	(0x3 << ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC1_SHIFT)
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC2_SHIFT	26
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC2_MASK	\
+	(0x3 << ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC2_SHIFT)
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC3_SHIFT	24
+#define ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC3_MASK	\
+	(0x3 << ISPCSIPHY_REG2_TRIGGER_CMD_RXTRIGESC3_SHIFT)
+#define ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_SHIFT		0
+#define ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_MASK		\
+	(0x7fffff << ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_SHIFT)
+
+#endif	/* OMAP3_ISP_REG_H */
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
new file mode 100644
index 0000000..d11fb26
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -0,0 +1,1778 @@
+/*
+ * ispresizer.c
+ *
+ * TI OMAP3 ISP - Resizer module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include "isp.h"
+#include "ispreg.h"
+#include "ispresizer.h"
+
+/*
+ * Resizer Constants
+ */
+#define MIN_RESIZE_VALUE		64
+#define MID_RESIZE_VALUE		512
+#define MAX_RESIZE_VALUE		1024
+
+#define MIN_IN_WIDTH			32
+#define MIN_IN_HEIGHT			32
+#define MAX_IN_WIDTH_MEMORY_MODE	4095
+#define MAX_IN_WIDTH_ONTHEFLY_MODE_ES1	1280
+#define MAX_IN_WIDTH_ONTHEFLY_MODE_ES2	4095
+#define MAX_IN_HEIGHT			4095
+
+#define MIN_OUT_WIDTH			16
+#define MIN_OUT_HEIGHT			2
+#define MAX_OUT_HEIGHT			4095
+
+/*
+ * Resizer Use Constraints
+ * "TRM ES3.1, table 12-46"
+ */
+#define MAX_4TAP_OUT_WIDTH_ES1		1280
+#define MAX_7TAP_OUT_WIDTH_ES1		640
+#define MAX_4TAP_OUT_WIDTH_ES2		3312
+#define MAX_7TAP_OUT_WIDTH_ES2		1650
+#define MAX_4TAP_OUT_WIDTH_3630		4096
+#define MAX_7TAP_OUT_WIDTH_3630		2048
+
+/*
+ * Constants for ratio calculation
+ */
+#define RESIZE_DIVISOR			256
+#define DEFAULT_PHASE			1
+
+/*
+ * Default (and only) configuration of filter coefficients.
+ * 7-tap mode is for scale factors 0.25x to 0.5x.
+ * 4-tap mode is for scale factors 0.5x to 4.0x.
+ * There shouldn't be any reason to recalculate these, EVER.
+ */
+static const struct isprsz_coef filter_coefs = {
+	/* For 8-phase 4-tap horizontal filter: */
+	{
+		0x0000, 0x0100, 0x0000, 0x0000,
+		0x03FA, 0x00F6, 0x0010, 0x0000,
+		0x03F9, 0x00DB, 0x002C, 0x0000,
+		0x03FB, 0x00B3, 0x0053, 0x03FF,
+		0x03FD, 0x0082, 0x0084, 0x03FD,
+		0x03FF, 0x0053, 0x00B3, 0x03FB,
+		0x0000, 0x002C, 0x00DB, 0x03F9,
+		0x0000, 0x0010, 0x00F6, 0x03FA
+	},
+	/* For 8-phase 4-tap vertical filter: */
+	{
+		0x0000, 0x0100, 0x0000, 0x0000,
+		0x03FA, 0x00F6, 0x0010, 0x0000,
+		0x03F9, 0x00DB, 0x002C, 0x0000,
+		0x03FB, 0x00B3, 0x0053, 0x03FF,
+		0x03FD, 0x0082, 0x0084, 0x03FD,
+		0x03FF, 0x0053, 0x00B3, 0x03FB,
+		0x0000, 0x002C, 0x00DB, 0x03F9,
+		0x0000, 0x0010, 0x00F6, 0x03FA
+	},
+	/* For 4-phase 7-tap horizontal filter: */
+	#define DUMMY 0
+	{
+		0x0004, 0x0023, 0x005A, 0x0058, 0x0023, 0x0004, 0x0000, DUMMY,
+		0x0002, 0x0018, 0x004d, 0x0060, 0x0031, 0x0008, 0x0000, DUMMY,
+		0x0001, 0x000f, 0x003f, 0x0062, 0x003f, 0x000f, 0x0001, DUMMY,
+		0x0000, 0x0008, 0x0031, 0x0060, 0x004d, 0x0018, 0x0002, DUMMY
+	},
+	/* For 4-phase 7-tap vertical filter: */
+	{
+		0x0004, 0x0023, 0x005A, 0x0058, 0x0023, 0x0004, 0x0000, DUMMY,
+		0x0002, 0x0018, 0x004d, 0x0060, 0x0031, 0x0008, 0x0000, DUMMY,
+		0x0001, 0x000f, 0x003f, 0x0062, 0x003f, 0x000f, 0x0001, DUMMY,
+		0x0000, 0x0008, 0x0031, 0x0060, 0x004d, 0x0018, 0x0002, DUMMY
+	}
+	/*
+	 * The dummy padding is required in 7-tap mode because of how the
+	 * registers are arranged physically.
+	 */
+	#undef DUMMY
+};
+
+/*
+ * __resizer_get_format - helper function for getting resizer format
+ * @res   : pointer to resizer private structure
+ * @pad   : pad number
+ * @fh    : V4L2 subdev file handle
+ * @which : wanted subdev format
+ * return zero
+ */
+static struct v4l2_mbus_framefmt *
+__resizer_get_format(struct isp_res_device *res, struct v4l2_subdev_fh *fh,
+		     unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_format(fh, pad);
+	else
+		return &res->formats[pad];
+}
+
+/*
+ * __resizer_get_crop - helper function for getting resizer crop rectangle
+ * @res   : pointer to resizer private structure
+ * @fh    : V4L2 subdev file handle
+ * @which : wanted subdev crop rectangle
+ */
+static struct v4l2_rect *
+__resizer_get_crop(struct isp_res_device *res, struct v4l2_subdev_fh *fh,
+		   enum v4l2_subdev_format_whence which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_crop(fh, RESZ_PAD_SINK);
+	else
+		return &res->crop.request;
+}
+
+/*
+ * resizer_set_filters - Set resizer filters
+ * @res: Device context.
+ * @h_coeff: horizontal coefficient
+ * @v_coeff: vertical coefficient
+ * Return none
+ */
+static void resizer_set_filters(struct isp_res_device *res, const u16 *h_coeff,
+				const u16 *v_coeff)
+{
+	struct isp_device *isp = to_isp_device(res);
+	u32 startaddr_h, startaddr_v, tmp_h, tmp_v;
+	int i;
+
+	startaddr_h = ISPRSZ_HFILT10;
+	startaddr_v = ISPRSZ_VFILT10;
+
+	for (i = 0; i < COEFF_CNT; i += 2) {
+		tmp_h = h_coeff[i] |
+			(h_coeff[i + 1] << ISPRSZ_HFILT_COEF1_SHIFT);
+		tmp_v = v_coeff[i] |
+			(v_coeff[i + 1] << ISPRSZ_VFILT_COEF1_SHIFT);
+		isp_reg_writel(isp, tmp_h, OMAP3_ISP_IOMEM_RESZ, startaddr_h);
+		isp_reg_writel(isp, tmp_v, OMAP3_ISP_IOMEM_RESZ, startaddr_v);
+		startaddr_h += 4;
+		startaddr_v += 4;
+	}
+}
+
+/*
+ * resizer_set_bilinear - Chrominance horizontal algorithm select
+ * @res: Device context.
+ * @type: Filtering interpolation type.
+ *
+ * Filtering that is same as luminance processing is
+ * intended only for downsampling, and bilinear interpolation
+ * is intended only for upsampling.
+ */
+static void resizer_set_bilinear(struct isp_res_device *res,
+				 enum resizer_chroma_algo type)
+{
+	struct isp_device *isp = to_isp_device(res);
+
+	if (type == RSZ_BILINEAR)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+			    ISPRSZ_CNT_CBILIN);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+			    ISPRSZ_CNT_CBILIN);
+}
+
+/*
+ * resizer_set_ycpos - Luminance and chrominance order
+ * @res: Device context.
+ * @order: order type.
+ */
+static void resizer_set_ycpos(struct isp_res_device *res,
+			      enum v4l2_mbus_pixelcode pixelcode)
+{
+	struct isp_device *isp = to_isp_device(res);
+
+	switch (pixelcode) {
+	case V4L2_MBUS_FMT_YUYV8_1X16:
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+			    ISPRSZ_CNT_YCPOS);
+		break;
+	case V4L2_MBUS_FMT_UYVY8_1X16:
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+			    ISPRSZ_CNT_YCPOS);
+		break;
+	default:
+		return;
+	}
+}
+
+/*
+ * resizer_set_phase - Setup horizontal and vertical starting phase
+ * @res: Device context.
+ * @h_phase: horizontal phase parameters.
+ * @v_phase: vertical phase parameters.
+ *
+ * Horizontal and vertical phase range is 0 to 7
+ */
+static void resizer_set_phase(struct isp_res_device *res, u32 h_phase,
+			      u32 v_phase)
+{
+	struct isp_device *isp = to_isp_device(res);
+	u32 rgval = 0;
+
+	rgval = isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT) &
+	      ~(ISPRSZ_CNT_HSTPH_MASK | ISPRSZ_CNT_VSTPH_MASK);
+	rgval |= (h_phase << ISPRSZ_CNT_HSTPH_SHIFT) & ISPRSZ_CNT_HSTPH_MASK;
+	rgval |= (v_phase << ISPRSZ_CNT_VSTPH_SHIFT) & ISPRSZ_CNT_VSTPH_MASK;
+
+	isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT);
+}
+
+/*
+ * resizer_set_luma - Setup luminance enhancer parameters
+ * @res: Device context.
+ * @luma: Structure for luminance enhancer parameters.
+ *
+ * Algorithm select:
+ *  0x0: Disable
+ *  0x1: [-1  2 -1]/2 high-pass filter
+ *  0x2: [-1 -2  6 -2 -1]/4 high-pass filter
+ *
+ * Maximum gain:
+ *  The data is coded in U4Q4 representation.
+ *
+ * Slope:
+ *  The data is coded in U4Q4 representation.
+ *
+ * Coring offset:
+ *  The data is coded in U8Q0 representation.
+ *
+ * The new luminance value is computed as:
+ *  Y += HPF(Y) x max(GAIN, (HPF(Y) - CORE) x SLOP + 8) >> 4.
+ */
+static void resizer_set_luma(struct isp_res_device *res,
+			     struct resizer_luma_yenh *luma)
+{
+	struct isp_device *isp = to_isp_device(res);
+	u32 rgval = 0;
+
+	rgval  = (luma->algo << ISPRSZ_YENH_ALGO_SHIFT)
+		  & ISPRSZ_YENH_ALGO_MASK;
+	rgval |= (luma->gain << ISPRSZ_YENH_GAIN_SHIFT)
+		  & ISPRSZ_YENH_GAIN_MASK;
+	rgval |= (luma->slope << ISPRSZ_YENH_SLOP_SHIFT)
+		  & ISPRSZ_YENH_SLOP_MASK;
+	rgval |= (luma->core << ISPRSZ_YENH_CORE_SHIFT)
+		  & ISPRSZ_YENH_CORE_MASK;
+
+	isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_YENH);
+}
+
+/*
+ * resizer_set_source - Input source select
+ * @res: Device context.
+ * @source: Input source type
+ *
+ * If this field is set to RESIZER_INPUT_VP, the resizer input is fed from
+ * Preview/CCDC engine, otherwise from memory.
+ */
+static void resizer_set_source(struct isp_res_device *res,
+			       enum resizer_input_entity source)
+{
+	struct isp_device *isp = to_isp_device(res);
+
+	if (source == RESIZER_INPUT_MEMORY)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+			    ISPRSZ_CNT_INPSRC);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+			    ISPRSZ_CNT_INPSRC);
+}
+
+/*
+ * resizer_set_ratio - Setup horizontal and vertical resizing value
+ * @res: Device context.
+ * @ratio: Structure for ratio parameters.
+ *
+ * Resizing range from 64 to 1024
+ */
+static void resizer_set_ratio(struct isp_res_device *res,
+			      const struct resizer_ratio *ratio)
+{
+	struct isp_device *isp = to_isp_device(res);
+	const u16 *h_filter, *v_filter;
+	u32 rgval = 0;
+
+	rgval = isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT) &
+			      ~(ISPRSZ_CNT_HRSZ_MASK | ISPRSZ_CNT_VRSZ_MASK);
+	rgval |= ((ratio->horz - 1) << ISPRSZ_CNT_HRSZ_SHIFT)
+		  & ISPRSZ_CNT_HRSZ_MASK;
+	rgval |= ((ratio->vert - 1) << ISPRSZ_CNT_VRSZ_SHIFT)
+		  & ISPRSZ_CNT_VRSZ_MASK;
+	isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT);
+
+	/* prepare horizontal filter coefficients */
+	if (ratio->horz > MID_RESIZE_VALUE)
+		h_filter = &filter_coefs.h_filter_coef_7tap[0];
+	else
+		h_filter = &filter_coefs.h_filter_coef_4tap[0];
+
+	/* prepare vertical filter coefficients */
+	if (ratio->vert > MID_RESIZE_VALUE)
+		v_filter = &filter_coefs.v_filter_coef_7tap[0];
+	else
+		v_filter = &filter_coefs.v_filter_coef_4tap[0];
+
+	resizer_set_filters(res, h_filter, v_filter);
+}
+
+/*
+ * resizer_set_dst_size - Setup the output height and width
+ * @res: Device context.
+ * @width: Output width.
+ * @height: Output height.
+ *
+ * Width :
+ *  The value must be EVEN.
+ *
+ * Height:
+ *  The number of bytes written to SDRAM must be
+ *  a multiple of 16-bytes if the vertical resizing factor
+ *  is greater than 1x (upsizing)
+ */
+static void resizer_set_output_size(struct isp_res_device *res,
+				    u32 width, u32 height)
+{
+	struct isp_device *isp = to_isp_device(res);
+	u32 rgval = 0;
+
+	dev_dbg(isp->dev, "Output size[w/h]: %dx%d\n", width, height);
+	rgval  = (width << ISPRSZ_OUT_SIZE_HORZ_SHIFT)
+		 & ISPRSZ_OUT_SIZE_HORZ_MASK;
+	rgval |= (height << ISPRSZ_OUT_SIZE_VERT_SHIFT)
+		 & ISPRSZ_OUT_SIZE_VERT_MASK;
+	isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_OUT_SIZE);
+}
+
+/*
+ * resizer_set_output_offset - Setup memory offset for the output lines.
+ * @res: Device context.
+ * @offset: Memory offset.
+ *
+ * The 5 LSBs are forced to be zeros by the hardware to align on a 32-byte
+ * boundary; the 5 LSBs are read-only. For optimal use of SDRAM bandwidth,
+ * the SDRAM line offset must be set on a 256-byte boundary
+ */
+static void resizer_set_output_offset(struct isp_res_device *res, u32 offset)
+{
+	struct isp_device *isp = to_isp_device(res);
+
+	isp_reg_writel(isp, offset, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTOFF);
+}
+
+/*
+ * resizer_set_start - Setup vertical and horizontal start position
+ * @res: Device context.
+ * @left: Horizontal start position.
+ * @top: Vertical start position.
+ *
+ * Vertical start line:
+ *  This field makes sense only when the resizer obtains its input
+ *  from the preview engine/CCDC
+ *
+ * Horizontal start pixel:
+ *  Pixels are coded on 16 bits for YUV and 8 bits for color separate data.
+ *  When the resizer gets its input from SDRAM, this field must be set
+ *  to <= 15 for YUV 16-bit data and <= 31 for 8-bit color separate data
+ */
+static void resizer_set_start(struct isp_res_device *res, u32 left, u32 top)
+{
+	struct isp_device *isp = to_isp_device(res);
+	u32 rgval = 0;
+
+	rgval = (left << ISPRSZ_IN_START_HORZ_ST_SHIFT)
+		& ISPRSZ_IN_START_HORZ_ST_MASK;
+	rgval |= (top << ISPRSZ_IN_START_VERT_ST_SHIFT)
+		 & ISPRSZ_IN_START_VERT_ST_MASK;
+
+	isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_START);
+}
+
+/*
+ * resizer_set_input_size - Setup the input size
+ * @res: Device context.
+ * @width: The range is 0 to 4095 pixels
+ * @height: The range is 0 to 4095 lines
+ */
+static void resizer_set_input_size(struct isp_res_device *res,
+				   u32 width, u32 height)
+{
+	struct isp_device *isp = to_isp_device(res);
+	u32 rgval = 0;
+
+	dev_dbg(isp->dev, "Input size[w/h]: %dx%d\n", width, height);
+
+	rgval = (width << ISPRSZ_IN_SIZE_HORZ_SHIFT)
+		& ISPRSZ_IN_SIZE_HORZ_MASK;
+	rgval |= (height << ISPRSZ_IN_SIZE_VERT_SHIFT)
+		 & ISPRSZ_IN_SIZE_VERT_MASK;
+
+	isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_SIZE);
+}
+
+/*
+ * resizer_set_src_offs - Setup the memory offset for the input lines
+ * @res: Device context.
+ * @offset: Memory offset.
+ *
+ * The 5 LSBs are forced to be zeros by the hardware to align on a 32-byte
+ * boundary; the 5 LSBs are read-only. This field must be programmed to be
+ * 0x0 if the resizer input is from preview engine/CCDC.
+ */
+static void resizer_set_input_offset(struct isp_res_device *res, u32 offset)
+{
+	struct isp_device *isp = to_isp_device(res);
+
+	isp_reg_writel(isp, offset, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INOFF);
+}
+
+/*
+ * resizer_set_intype - Input type select
+ * @res: Device context.
+ * @type: Pixel format type.
+ */
+static void resizer_set_intype(struct isp_res_device *res,
+			       enum resizer_colors_type type)
+{
+	struct isp_device *isp = to_isp_device(res);
+
+	if (type == RSZ_COLOR8)
+		isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+			    ISPRSZ_CNT_INPTYP);
+	else
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
+			    ISPRSZ_CNT_INPTYP);
+}
+
+/*
+ * __resizer_set_inaddr - Helper function for set input address
+ * @res : pointer to resizer private data structure
+ * @addr: input address
+ * return none
+ */
+static void __resizer_set_inaddr(struct isp_res_device *res, u32 addr)
+{
+	struct isp_device *isp = to_isp_device(res);
+
+	isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INADD);
+}
+
+/*
+ * The data rate at the horizontal resizer output must not exceed half the
+ * functional clock or 100 MP/s, whichever is lower. According to the TRM
+ * there's no similar requirement for the vertical resizer output. However
+ * experience showed that vertical upscaling by 4 leads to SBL overflows (with
+ * data rates at the resizer output exceeding 300 MP/s). Limiting the resizer
+ * output data rate to the functional clock or 200 MP/s, whichever is lower,
+ * seems to get rid of SBL overflows.
+ *
+ * The maximum data rate at the output of the horizontal resizer can thus be
+ * computed with
+ *
+ * max intermediate rate <= L3 clock * input height / output height
+ * max intermediate rate <= L3 clock / 2
+ *
+ * The maximum data rate at the resizer input is then
+ *
+ * max input rate <= max intermediate rate * input width / output width
+ *
+ * where the input width and height are the resizer input crop rectangle size.
+ * The TRM doesn't clearly explain if that's a maximum instant data rate or a
+ * maximum average data rate.
+ */
+void omap3isp_resizer_max_rate(struct isp_res_device *res,
+			       unsigned int *max_rate)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(&res->subdev.entity);
+	const struct v4l2_mbus_framefmt *ofmt = &res->formats[RESZ_PAD_SOURCE];
+	unsigned long limit = min(pipe->l3_ick, 200000000UL);
+	unsigned long clock;
+
+	clock = div_u64((u64)limit * res->crop.active.height, ofmt->height);
+	clock = min(clock, limit / 2);
+	*max_rate = div_u64((u64)clock * res->crop.active.width, ofmt->width);
+}
+
+/*
+ * When the resizer processes images from memory, the driver must slow down read
+ * requests on the input to at least comply with the internal data rate
+ * requirements. If the application real-time requirements can cope with slower
+ * processing, the resizer can be slowed down even more to put less pressure on
+ * the overall system.
+ *
+ * When the resizer processes images on the fly (either from the CCDC or the
+ * preview module), the same data rate requirements apply but they can't be
+ * enforced at the resizer level. The image input module (sensor, CCP2 or
+ * preview module) must not provide image data faster than the resizer can
+ * process.
+ *
+ * For live image pipelines, the data rate is set by the frame format, size and
+ * rate. The sensor output frame rate must not exceed the maximum resizer data
+ * rate.
+ *
+ * The resizer slows down read requests by inserting wait cycles in the SBL
+ * requests. The maximum number of 256-byte requests per second can be computed
+ * as (the data rate is multiplied by 2 to convert from pixels per second to
+ * bytes per second)
+ *
+ * request per second = data rate * 2 / 256
+ * cycles per request = cycles per second / requests per second
+ *
+ * The number of cycles per second is controlled by the L3 clock, leading to
+ *
+ * cycles per request = L3 frequency / 2 * 256 / data rate
+ */
+static void resizer_adjust_bandwidth(struct isp_res_device *res)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(&res->subdev.entity);
+	struct isp_device *isp = to_isp_device(res);
+	unsigned long l3_ick = pipe->l3_ick;
+	struct v4l2_fract *timeperframe;
+	unsigned int cycles_per_frame;
+	unsigned int requests_per_frame;
+	unsigned int cycles_per_request;
+	unsigned int granularity;
+	unsigned int minimum;
+	unsigned int maximum;
+	unsigned int value;
+
+	if (res->input != RESIZER_INPUT_MEMORY) {
+		isp_reg_clr(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP,
+			    ISPSBL_SDR_REQ_RSZ_EXP_MASK);
+		return;
+	}
+
+	switch (isp->revision) {
+	case ISP_REVISION_1_0:
+	case ISP_REVISION_2_0:
+	default:
+		granularity = 1024;
+		break;
+
+	case ISP_REVISION_15_0:
+		granularity = 32;
+		break;
+	}
+
+	/* Compute the minimum number of cycles per request, based on the
+	 * pipeline maximum data rate. This is an absolute lower bound if we
+	 * don't want SBL overflows, so round the value up.
+	 */
+	cycles_per_request = div_u64((u64)l3_ick / 2 * 256 + pipe->max_rate - 1,
+				     pipe->max_rate);
+	minimum = DIV_ROUND_UP(cycles_per_request, granularity);
+
+	/* Compute the maximum number of cycles per request, based on the
+	 * requested frame rate. This is a soft upper bound to achieve a frame
+	 * rate equal or higher than the requested value, so round the value
+	 * down.
+	 */
+	timeperframe = &pipe->max_timeperframe;
+
+	requests_per_frame = DIV_ROUND_UP(res->crop.active.width * 2, 256)
+			   * res->crop.active.height;
+	cycles_per_frame = div_u64((u64)l3_ick * timeperframe->numerator,
+				   timeperframe->denominator);
+	cycles_per_request = cycles_per_frame / requests_per_frame;
+
+	maximum = cycles_per_request / granularity;
+
+	value = max(minimum, maximum);
+
+	dev_dbg(isp->dev, "%s: cycles per request = %u\n", __func__, value);
+	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP,
+			ISPSBL_SDR_REQ_RSZ_EXP_MASK,
+			value << ISPSBL_SDR_REQ_RSZ_EXP_SHIFT);
+}
+
+/*
+ * omap3isp_resizer_busy - Checks if ISP resizer is busy.
+ *
+ * Returns busy field from ISPRSZ_PCR register.
+ */
+int omap3isp_resizer_busy(struct isp_res_device *res)
+{
+	struct isp_device *isp = to_isp_device(res);
+
+	return isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_PCR) &
+			     ISPRSZ_PCR_BUSY;
+}
+
+/*
+ * resizer_set_inaddr - Sets the memory address of the input frame.
+ * @addr: 32bit memory address aligned on 32byte boundary.
+ */
+static void resizer_set_inaddr(struct isp_res_device *res, u32 addr)
+{
+	res->addr_base = addr;
+
+	/* This will handle crop settings in stream off state */
+	if (res->crop_offset)
+		addr += res->crop_offset & ~0x1f;
+
+	__resizer_set_inaddr(res, addr);
+}
+
+/*
+ * Configures the memory address to which the output frame is written.
+ * @addr: 32bit memory address aligned on 32byte boundary.
+ * Note: For SBL efficiency reasons the address should be on a 256-byte
+ * boundary.
+ */
+static void resizer_set_outaddr(struct isp_res_device *res, u32 addr)
+{
+	struct isp_device *isp = to_isp_device(res);
+
+	/*
+	 * Set output address. This needs to be in its own function
+	 * because it changes often.
+	 */
+	isp_reg_writel(isp, addr << ISPRSZ_SDR_OUTADD_ADDR_SHIFT,
+		       OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTADD);
+}
+
+/*
+ * resizer_print_status - Prints the values of the resizer module registers.
+ */
+#define RSZ_PRINT_REGISTER(isp, name)\
+	dev_dbg(isp->dev, "###RSZ " #name "=0x%08x\n", \
+		isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_##name))
+
+static void resizer_print_status(struct isp_res_device *res)
+{
+	struct isp_device *isp = to_isp_device(res);
+
+	dev_dbg(isp->dev, "-------------Resizer Register dump----------\n");
+
+	RSZ_PRINT_REGISTER(isp, PCR);
+	RSZ_PRINT_REGISTER(isp, CNT);
+	RSZ_PRINT_REGISTER(isp, OUT_SIZE);
+	RSZ_PRINT_REGISTER(isp, IN_START);
+	RSZ_PRINT_REGISTER(isp, IN_SIZE);
+	RSZ_PRINT_REGISTER(isp, SDR_INADD);
+	RSZ_PRINT_REGISTER(isp, SDR_INOFF);
+	RSZ_PRINT_REGISTER(isp, SDR_OUTADD);
+	RSZ_PRINT_REGISTER(isp, SDR_OUTOFF);
+	RSZ_PRINT_REGISTER(isp, YENH);
+
+	dev_dbg(isp->dev, "--------------------------------------------\n");
+}
+
+/*
+ * resizer_calc_ratios - Helper function for calculating resizer ratios
+ * @res: pointer to resizer private data structure
+ * @input: input frame size
+ * @output: output frame size
+ * @ratio : return calculated ratios
+ * return none
+ *
+ * The resizer uses a polyphase sample rate converter. The upsampling filter
+ * has a fixed number of phases that depend on the resizing ratio. As the ratio
+ * computation depends on the number of phases, we need to compute a first
+ * approximation and then refine it.
+ *
+ * The input/output/ratio relationship is given by the OMAP34xx TRM:
+ *
+ * - 8-phase, 4-tap mode (RSZ = 64 ~ 512)
+ *	iw = (32 * sph + (ow - 1) * hrsz + 16) >> 8 + 7
+ *	ih = (32 * spv + (oh - 1) * vrsz + 16) >> 8 + 4
+ * - 4-phase, 7-tap mode (RSZ = 513 ~ 1024)
+ *	iw = (64 * sph + (ow - 1) * hrsz + 32) >> 8 + 7
+ *	ih = (64 * spv + (oh - 1) * vrsz + 32) >> 8 + 7
+ *
+ * iw and ih are the input width and height after cropping. Those equations need
+ * to be satisfied exactly for the resizer to work correctly.
+ *
+ * The equations can't be easily reverted, as the >> 8 operation is not linear.
+ * In addition, not all input sizes can be achieved for a given output size. To
+ * get the highest input size lower than or equal to the requested input size,
+ * we need to compute the highest resizing ratio that satisfies the following
+ * inequality (taking the 4-tap mode width equation as an example)
+ *
+ *	iw >= (32 * sph + (ow - 1) * hrsz + 16) >> 8 - 7
+ *
+ * (where iw is the requested input width) which can be rewritten as
+ *
+ *	  iw - 7            >= (32 * sph + (ow - 1) * hrsz + 16) >> 8
+ *	 (iw - 7) << 8      >=  32 * sph + (ow - 1) * hrsz + 16 - b
+ *	((iw - 7) << 8) + b >=  32 * sph + (ow - 1) * hrsz + 16
+ *
+ * where b is the value of the 8 least significant bits of the right hand side
+ * expression of the last inequality. The highest resizing ratio value will be
+ * achieved when b is equal to its maximum value of 255. That resizing ratio
+ * value will still satisfy the original inequality, as b will disappear when
+ * the expression will be shifted right by 8.
+ *
+ * The reverted equations thus become
+ *
+ * - 8-phase, 4-tap mode
+ *	hrsz = ((iw - 7) * 256 + 255 - 16 - 32 * sph) / (ow - 1)
+ *	vrsz = ((ih - 4) * 256 + 255 - 16 - 32 * spv) / (oh - 1)
+ * - 4-phase, 7-tap mode
+ *	hrsz = ((iw - 7) * 256 + 255 - 32 - 64 * sph) / (ow - 1)
+ *	vrsz = ((ih - 7) * 256 + 255 - 32 - 64 * spv) / (oh - 1)
+ *
+ * The ratios are integer values, and are rounded down to ensure that the
+ * cropped input size is not bigger than the uncropped input size.
+ *
+ * As the number of phases/taps, used to select the correct equations to compute
+ * the ratio, depends on the ratio, we start with the 4-tap mode equations to
+ * compute an approximation of the ratio, and switch to the 7-tap mode equations
+ * if the approximation is higher than the ratio threshold.
+ *
+ * As the 7-tap mode equations will return a ratio smaller than or equal to the
+ * 4-tap mode equations, the resulting ratio could become lower than or equal to
+ * the ratio threshold. This 'equations loop' isn't an issue as long as the
+ * correct equations are used to compute the final input size. Starting with the
+ * 4-tap mode equations ensure that, in case of values resulting in a 'ratio
+ * loop', the smallest of the ratio values will be used, never exceeding the
+ * requested input size.
+ *
+ * We first clamp the output size according to the hardware capability to avoid
+ * auto-cropping the input more than required to satisfy the TRM equations. The
+ * minimum output size is achieved with a scaling factor of 1024. It is thus
+ * computed using the 7-tap equations.
+ *
+ *	min ow = ((iw - 7) * 256 - 32 - 64 * sph) / 1024 + 1
+ *	min oh = ((ih - 7) * 256 - 32 - 64 * spv) / 1024 + 1
+ *
+ * Similarly, the maximum output size is achieved with a scaling factor of 64
+ * and computed using the 4-tap equations.
+ *
+ *	max ow = ((iw - 7) * 256 + 255 - 16 - 32 * sph) / 64 + 1
+ *	max oh = ((ih - 4) * 256 + 255 - 16 - 32 * spv) / 64 + 1
+ *
+ * The additional +255 term compensates for the round down operation performed
+ * by the TRM equations when shifting the value right by 8 bits.
+ *
+ * We then compute and clamp the ratios (x1/4 ~ x4). Clamping the output size to
+ * the maximum value guarantees that the ratio value will never be smaller than
+ * the minimum, but it could still slightly exceed the maximum. Clamping the
+ * ratio will thus result in a resizing factor slightly larger than the
+ * requested value.
+ *
+ * To accommodate that, and make sure the TRM equations are satisfied exactly, we
+ * compute the input crop rectangle as the last step.
+ *
+ * As if the situation wasn't complex enough, the maximum output width depends
+ * on the vertical resizing ratio.  Fortunately, the output height doesn't
+ * depend on the horizontal resizing ratio. We can then start by computing the
+ * output height and the vertical ratio, and then move to computing the output
+ * width and the horizontal ratio.
+ */
+static void resizer_calc_ratios(struct isp_res_device *res,
+				struct v4l2_rect *input,
+				struct v4l2_mbus_framefmt *output,
+				struct resizer_ratio *ratio)
+{
+	struct isp_device *isp = to_isp_device(res);
+	const unsigned int spv = DEFAULT_PHASE;
+	const unsigned int sph = DEFAULT_PHASE;
+	unsigned int upscaled_width;
+	unsigned int upscaled_height;
+	unsigned int min_width;
+	unsigned int min_height;
+	unsigned int max_width;
+	unsigned int max_height;
+	unsigned int width_alignment;
+	unsigned int width;
+	unsigned int height;
+
+	/*
+	 * Clamp the output height based on the hardware capabilities and
+	 * compute the vertical resizing ratio.
+	 */
+	min_height = ((input->height - 7) * 256 - 32 - 64 * spv) / 1024 + 1;
+	min_height = max_t(unsigned int, min_height, MIN_OUT_HEIGHT);
+	max_height = ((input->height - 4) * 256 + 255 - 16 - 32 * spv) / 64 + 1;
+	max_height = min_t(unsigned int, max_height, MAX_OUT_HEIGHT);
+	output->height = clamp(output->height, min_height, max_height);
+
+	ratio->vert = ((input->height - 4) * 256 + 255 - 16 - 32 * spv)
+		    / (output->height - 1);
+	if (ratio->vert > MID_RESIZE_VALUE)
+		ratio->vert = ((input->height - 7) * 256 + 255 - 32 - 64 * spv)
+			    / (output->height - 1);
+	ratio->vert = clamp_t(unsigned int, ratio->vert,
+			      MIN_RESIZE_VALUE, MAX_RESIZE_VALUE);
+
+	if (ratio->vert <= MID_RESIZE_VALUE) {
+		upscaled_height = (output->height - 1) * ratio->vert
+				+ 32 * spv + 16;
+		height = (upscaled_height >> 8) + 4;
+	} else {
+		upscaled_height = (output->height - 1) * ratio->vert
+				+ 64 * spv + 32;
+		height = (upscaled_height >> 8) + 7;
+	}
+
+	/*
+	 * Compute the minimum and maximum output widths based on the hardware
+	 * capabilities. The maximum depends on the vertical resizing ratio.
+	 */
+	min_width = ((input->width - 7) * 256 - 32 - 64 * sph) / 1024 + 1;
+	min_width = max_t(unsigned int, min_width, MIN_OUT_WIDTH);
+
+	if (ratio->vert <= MID_RESIZE_VALUE) {
+		switch (isp->revision) {
+		case ISP_REVISION_1_0:
+			max_width = MAX_4TAP_OUT_WIDTH_ES1;
+			break;
+
+		case ISP_REVISION_2_0:
+		default:
+			max_width = MAX_4TAP_OUT_WIDTH_ES2;
+			break;
+
+		case ISP_REVISION_15_0:
+			max_width = MAX_4TAP_OUT_WIDTH_3630;
+			break;
+		}
+	} else {
+		switch (isp->revision) {
+		case ISP_REVISION_1_0:
+			max_width = MAX_7TAP_OUT_WIDTH_ES1;
+			break;
+
+		case ISP_REVISION_2_0:
+		default:
+			max_width = MAX_7TAP_OUT_WIDTH_ES2;
+			break;
+
+		case ISP_REVISION_15_0:
+			max_width = MAX_7TAP_OUT_WIDTH_3630;
+			break;
+		}
+	}
+	max_width = min(((input->width - 7) * 256 + 255 - 16 - 32 * sph) / 64
+			+ 1, max_width);
+
+	/*
+	 * The output width must be even, and must be a multiple of 16 bytes
+	 * when upscaling vertically. Clamp the output width to the valid range.
+	 * Take the alignment into account (the maximum width in 7-tap mode on
+	 * ES2 isn't a multiple of 8) and align the result up to make sure it
+	 * won't be smaller than the minimum.
+	 */
+	width_alignment = ratio->vert < 256 ? 8 : 2;
+	output->width = clamp(output->width, min_width,
+			      max_width & ~(width_alignment - 1));
+	output->width = ALIGN(output->width, width_alignment);
+
+	ratio->horz = ((input->width - 7) * 256 + 255 - 16 - 32 * sph)
+		    / (output->width - 1);
+	if (ratio->horz > MID_RESIZE_VALUE)
+		ratio->horz = ((input->width - 7) * 256 + 255 - 32 - 64 * sph)
+			    / (output->width - 1);
+	ratio->horz = clamp_t(unsigned int, ratio->horz,
+			      MIN_RESIZE_VALUE, MAX_RESIZE_VALUE);
+
+	if (ratio->horz <= MID_RESIZE_VALUE) {
+		upscaled_width = (output->width - 1) * ratio->horz
+			       + 32 * sph + 16;
+		width = (upscaled_width >> 8) + 7;
+	} else {
+		upscaled_width = (output->width - 1) * ratio->horz
+			       + 64 * sph + 32;
+		width = (upscaled_width >> 8) + 7;
+	}
+
+	/* Center the new crop rectangle. */
+	input->left += (input->width - width) / 2;
+	input->top += (input->height - height) / 2;
+	input->width = width;
+	input->height = height;
+}
+
+/*
+ * resizer_set_crop_params - Setup hardware with cropping parameters
+ * @res : resizer private structure
+ * @crop_rect : current crop rectangle
+ * @ratio : resizer ratios
+ * return none
+ */
+static void resizer_set_crop_params(struct isp_res_device *res,
+				    const struct v4l2_mbus_framefmt *input,
+				    const struct v4l2_mbus_framefmt *output)
+{
+	resizer_set_ratio(res, &res->ratio);
+
+	/* Set chrominance horizontal algorithm */
+	if (res->ratio.horz >= RESIZE_DIVISOR)
+		resizer_set_bilinear(res, RSZ_THE_SAME);
+	else
+		resizer_set_bilinear(res, RSZ_BILINEAR);
+
+	resizer_adjust_bandwidth(res);
+
+	if (res->input == RESIZER_INPUT_MEMORY) {
+		/* Calculate additional offset for crop */
+		res->crop_offset = (res->crop.active.top * input->width +
+				    res->crop.active.left) * 2;
+		/*
+		 * Write lowest 4 bits of horizontal pixel offset (in pixels),
+		 * vertical start must be 0.
+		 */
+		resizer_set_start(res, (res->crop_offset / 2) & 0xf, 0);
+
+		/*
+		 * Set start (read) address for cropping, in bytes.
+		 * Lowest 5 bits must be zero.
+		 */
+		__resizer_set_inaddr(res,
+				res->addr_base + (res->crop_offset & ~0x1f));
+	} else {
+		/*
+		 * Set vertical start line and horizontal starting pixel.
+		 * If the input is from CCDC/PREV, horizontal start field is
+		 * in bytes (twice number of pixels).
+		 */
+		resizer_set_start(res, res->crop.active.left * 2,
+				  res->crop.active.top);
+		/* Input address and offset must be 0 for preview/ccdc input */
+		__resizer_set_inaddr(res, 0);
+		resizer_set_input_offset(res, 0);
+	}
+
+	/* Set the input size */
+	resizer_set_input_size(res, res->crop.active.width,
+			       res->crop.active.height);
+}
+
+static void resizer_configure(struct isp_res_device *res)
+{
+	struct v4l2_mbus_framefmt *informat, *outformat;
+	struct resizer_luma_yenh luma = {0, 0, 0, 0};
+
+	resizer_set_source(res, res->input);
+
+	informat = &res->formats[RESZ_PAD_SINK];
+	outformat = &res->formats[RESZ_PAD_SOURCE];
+
+	/* RESZ_PAD_SINK */
+	if (res->input == RESIZER_INPUT_VP)
+		resizer_set_input_offset(res, 0);
+	else
+		resizer_set_input_offset(res, ALIGN(informat->width, 0x10) * 2);
+
+	/* YUV422 interleaved, default phase, no luma enhancement */
+	resizer_set_intype(res, RSZ_YUV422);
+	resizer_set_ycpos(res, informat->code);
+	resizer_set_phase(res, DEFAULT_PHASE, DEFAULT_PHASE);
+	resizer_set_luma(res, &luma);
+
+	/* RESZ_PAD_SOURCE */
+	resizer_set_output_offset(res, ALIGN(outformat->width * 2, 32));
+	resizer_set_output_size(res, outformat->width, outformat->height);
+
+	resizer_set_crop_params(res, informat, outformat);
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+static void resizer_enable_oneshot(struct isp_res_device *res)
+{
+	struct isp_device *isp = to_isp_device(res);
+
+	isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_PCR,
+		    ISPRSZ_PCR_ENABLE | ISPRSZ_PCR_ONESHOT);
+}
+
+void omap3isp_resizer_isr_frame_sync(struct isp_res_device *res)
+{
+	/*
+	 * If ISP_VIDEO_DMAQUEUE_QUEUED is set, DMA queue had an underrun
+	 * condition, the module was paused and now we have a buffer queued
+	 * on the output again. Restart the pipeline if running in continuous
+	 * mode.
+	 */
+	if (res->state == ISP_PIPELINE_STREAM_CONTINUOUS &&
+	    res->video_out.dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED) {
+		resizer_enable_oneshot(res);
+		isp_video_dmaqueue_flags_clr(&res->video_out);
+	}
+}
+
+static void resizer_isr_buffer(struct isp_res_device *res)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(&res->subdev.entity);
+	struct isp_buffer *buffer;
+	int restart = 0;
+
+	if (res->state == ISP_PIPELINE_STREAM_STOPPED)
+		return;
+
+	/* Complete the output buffer and, if reading from memory, the input
+	 * buffer.
+	 */
+	buffer = omap3isp_video_buffer_next(&res->video_out);
+	if (buffer != NULL) {
+		resizer_set_outaddr(res, buffer->isp_addr);
+		restart = 1;
+	}
+
+	pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
+
+	if (res->input == RESIZER_INPUT_MEMORY) {
+		buffer = omap3isp_video_buffer_next(&res->video_in);
+		if (buffer != NULL)
+			resizer_set_inaddr(res, buffer->isp_addr);
+		pipe->state |= ISP_PIPELINE_IDLE_INPUT;
+	}
+
+	if (res->state == ISP_PIPELINE_STREAM_SINGLESHOT) {
+		if (isp_pipeline_ready(pipe))
+			omap3isp_pipeline_set_stream(pipe,
+						ISP_PIPELINE_STREAM_SINGLESHOT);
+	} else {
+		/* If an underrun occurs, the video queue operation handler will
+		 * restart the resizer. Otherwise restart it immediately.
+		 */
+		if (restart)
+			resizer_enable_oneshot(res);
+	}
+}
+
+/*
+ * omap3isp_resizer_isr - ISP resizer interrupt handler
+ *
+ * Manage the resizer video buffers and configure shadowed and busy-locked
+ * registers.
+ */
+void omap3isp_resizer_isr(struct isp_res_device *res)
+{
+	struct v4l2_mbus_framefmt *informat, *outformat;
+
+	if (omap3isp_module_sync_is_stopping(&res->wait, &res->stopping))
+		return;
+
+	if (res->applycrop) {
+		outformat = __resizer_get_format(res, NULL, RESZ_PAD_SOURCE,
+					      V4L2_SUBDEV_FORMAT_ACTIVE);
+		informat = __resizer_get_format(res, NULL, RESZ_PAD_SINK,
+					      V4L2_SUBDEV_FORMAT_ACTIVE);
+		resizer_set_crop_params(res, informat, outformat);
+		res->applycrop = 0;
+	}
+
+	resizer_isr_buffer(res);
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP video operations
+ */
+
+static int resizer_video_queue(struct isp_video *video,
+			       struct isp_buffer *buffer)
+{
+	struct isp_res_device *res = &video->isp->isp_res;
+
+	if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		resizer_set_inaddr(res, buffer->isp_addr);
+
+	/*
+	 * We now have a buffer queued on the output. Despite what the
+	 * TRM says, the resizer can't be restarted immediately.
+	 * Enabling it in one shot mode in the middle of a frame (or at
+	 * least asynchronously to the frame) results in the output
+	 * being shifted randomly left/right and up/down, as if the
+	 * hardware didn't synchronize itself to the beginning of the
+	 * frame correctly.
+	 *
+	 * Restart the resizer on the next sync interrupt if running in
+	 * continuous mode or when starting the stream.
+	 */
+	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		resizer_set_outaddr(res, buffer->isp_addr);
+
+	return 0;
+}
+
+static const struct isp_video_operations resizer_video_ops = {
+	.queue = resizer_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+/*
+ * resizer_set_stream - Enable/Disable streaming on resizer subdev
+ * @sd: ISP resizer V4L2 subdev
+ * @enable: 1 == Enable, 0 == Disable
+ *
+ * The resizer hardware can't be enabled without a memory buffer to write to.
+ * As the s_stream operation is called in response to a STREAMON call without
+ * any buffer queued yet, just update the state field and return immediately.
+ * The resizer will be enabled in resizer_video_queue().
+ */
+static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct isp_res_device *res = v4l2_get_subdevdata(sd);
+	struct isp_video *video_out = &res->video_out;
+	struct isp_device *isp = to_isp_device(res);
+	struct device *dev = to_device(res);
+
+	if (res->state == ISP_PIPELINE_STREAM_STOPPED) {
+		if (enable == ISP_PIPELINE_STREAM_STOPPED)
+			return 0;
+
+		omap3isp_subclk_enable(isp, OMAP3_ISP_SUBCLK_RESIZER);
+		resizer_configure(res);
+		resizer_print_status(res);
+	}
+
+	switch (enable) {
+	case ISP_PIPELINE_STREAM_CONTINUOUS:
+		omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_RESIZER_WRITE);
+		if (video_out->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED) {
+			resizer_enable_oneshot(res);
+			isp_video_dmaqueue_flags_clr(video_out);
+		}
+		break;
+
+	case ISP_PIPELINE_STREAM_SINGLESHOT:
+		if (res->input == RESIZER_INPUT_MEMORY)
+			omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_RESIZER_READ);
+		omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_RESIZER_WRITE);
+
+		resizer_enable_oneshot(res);
+		break;
+
+	case ISP_PIPELINE_STREAM_STOPPED:
+		if (omap3isp_module_sync_idle(&sd->entity, &res->wait,
+					      &res->stopping))
+			dev_dbg(dev, "%s: module stop timeout.\n", sd->name);
+		omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_RESIZER_READ |
+				OMAP3_ISP_SBL_RESIZER_WRITE);
+		omap3isp_subclk_disable(isp, OMAP3_ISP_SUBCLK_RESIZER);
+		isp_video_dmaqueue_flags_clr(video_out);
+		break;
+	}
+
+	res->state = enable;
+	return 0;
+}
+
+/*
+ * resizer_try_crop - mangles crop parameters.
+ */
+static void resizer_try_crop(const struct v4l2_mbus_framefmt *sink,
+			     const struct v4l2_mbus_framefmt *source,
+			     struct v4l2_rect *crop)
+{
+	const unsigned int spv = DEFAULT_PHASE;
+	const unsigned int sph = DEFAULT_PHASE;
+
+	/* Crop rectangle is constrained by the output size so that zoom ratio
+	 * cannot exceed +/-4.0.
+	 */
+	unsigned int min_width =
+		((32 * sph + (source->width - 1) * 64 + 16) >> 8) + 7;
+	unsigned int min_height =
+		((32 * spv + (source->height - 1) * 64 + 16) >> 8) + 4;
+	unsigned int max_width =
+		((64 * sph + (source->width - 1) * 1024 + 32) >> 8) + 7;
+	unsigned int max_height =
+		((64 * spv + (source->height - 1) * 1024 + 32) >> 8) + 7;
+
+	crop->width = clamp_t(u32, crop->width, min_width, max_width);
+	crop->height = clamp_t(u32, crop->height, min_height, max_height);
+
+	/* Crop can not go beyond of the input rectangle */
+	crop->left = clamp_t(u32, crop->left, 0, sink->width - MIN_IN_WIDTH);
+	crop->width = clamp_t(u32, crop->width, MIN_IN_WIDTH,
+			      sink->width - crop->left);
+	crop->top = clamp_t(u32, crop->top, 0, sink->height - MIN_IN_HEIGHT);
+	crop->height = clamp_t(u32, crop->height, MIN_IN_HEIGHT,
+			       sink->height - crop->top);
+}
+
+/*
+ * resizer_get_selection - Retrieve a selection rectangle on a pad
+ * @sd: ISP resizer V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangles are the crop rectangles on the sink pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int resizer_get_selection(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_fh *fh,
+				 struct v4l2_subdev_selection *sel)
+{
+	struct isp_res_device *res = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format_source;
+	struct v4l2_mbus_framefmt *format_sink;
+	struct resizer_ratio ratio;
+
+	if (sel->pad != RESZ_PAD_SINK)
+		return -EINVAL;
+
+	format_sink = __resizer_get_format(res, fh, RESZ_PAD_SINK,
+					   sel->which);
+	format_source = __resizer_get_format(res, fh, RESZ_PAD_SOURCE,
+					     sel->which);
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = INT_MAX;
+		sel->r.height = INT_MAX;
+
+		resizer_try_crop(format_sink, format_source, &sel->r);
+		resizer_calc_ratios(res, &sel->r, format_source, &ratio);
+		break;
+
+	case V4L2_SEL_TGT_CROP:
+		sel->r = *__resizer_get_crop(res, fh, sel->which);
+		resizer_calc_ratios(res, &sel->r, format_source, &ratio);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * resizer_set_selection - Set a selection rectangle on a pad
+ * @sd: ISP resizer V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangle is the actual crop rectangle on the sink pad.
+ *
+ * FIXME: This function currently behaves as if the KEEP_CONFIG selection flag
+ * was always set.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int resizer_set_selection(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_fh *fh,
+				 struct v4l2_subdev_selection *sel)
+{
+	struct isp_res_device *res = v4l2_get_subdevdata(sd);
+	struct isp_device *isp = to_isp_device(res);
+	struct v4l2_mbus_framefmt *format_sink, *format_source;
+	struct resizer_ratio ratio;
+
+	if (sel->target != V4L2_SEL_TGT_CROP ||
+	    sel->pad != RESZ_PAD_SINK)
+		return -EINVAL;
+
+	format_sink = __resizer_get_format(res, fh, RESZ_PAD_SINK,
+					   sel->which);
+	format_source = __resizer_get_format(res, fh, RESZ_PAD_SOURCE,
+					     sel->which);
+
+	dev_dbg(isp->dev, "%s: L=%d,T=%d,W=%d,H=%d,which=%d\n", __func__,
+		sel->r.left, sel->r.top, sel->r.width, sel->r.height,
+		sel->which);
+
+	dev_dbg(isp->dev, "%s: input=%dx%d, output=%dx%d\n", __func__,
+		format_sink->width, format_sink->height,
+		format_source->width, format_source->height);
+
+	/* Clamp the crop rectangle to the bounds, and then mangle it further to
+	 * fulfill the TRM equations. Store the clamped but otherwise unmangled
+	 * rectangle to avoid cropping the input multiple times: when an
+	 * application sets the output format, the current crop rectangle is
+	 * mangled during crop rectangle computation, which would lead to a new,
+	 * smaller input crop rectangle every time the output size is set if we
+	 * stored the mangled rectangle.
+	 */
+	resizer_try_crop(format_sink, format_source, &sel->r);
+	*__resizer_get_crop(res, fh, sel->which) = sel->r;
+	resizer_calc_ratios(res, &sel->r, format_source, &ratio);
+
+	if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+		return 0;
+
+	res->ratio = ratio;
+	res->crop.active = sel->r;
+
+	/*
+	 * set_selection can be called while streaming is on. In this case the
+	 * crop values will be set in the next IRQ.
+	 */
+	if (res->state != ISP_PIPELINE_STREAM_STOPPED)
+		res->applycrop = 1;
+
+	return 0;
+}
+
+/* resizer pixel formats */
+static const unsigned int resizer_formats[] = {
+	V4L2_MBUS_FMT_UYVY8_1X16,
+	V4L2_MBUS_FMT_YUYV8_1X16,
+};
+
+static unsigned int resizer_max_in_width(struct isp_res_device *res)
+{
+	struct isp_device *isp = to_isp_device(res);
+
+	if (res->input == RESIZER_INPUT_MEMORY) {
+		return MAX_IN_WIDTH_MEMORY_MODE;
+	} else {
+		if (isp->revision == ISP_REVISION_1_0)
+			return MAX_IN_WIDTH_ONTHEFLY_MODE_ES1;
+		else
+			return MAX_IN_WIDTH_ONTHEFLY_MODE_ES2;
+	}
+}
+
+/*
+ * resizer_try_format - Handle try format by pad subdev method
+ * @res   : ISP resizer device
+ * @fh    : V4L2 subdev file handle
+ * @pad   : pad num
+ * @fmt   : pointer to v4l2 format structure
+ * @which : wanted subdev format
+ */
+static void resizer_try_format(struct isp_res_device *res,
+			       struct v4l2_subdev_fh *fh, unsigned int pad,
+			       struct v4l2_mbus_framefmt *fmt,
+			       enum v4l2_subdev_format_whence which)
+{
+	struct v4l2_mbus_framefmt *format;
+	struct resizer_ratio ratio;
+	struct v4l2_rect crop;
+
+	switch (pad) {
+	case RESZ_PAD_SINK:
+		if (fmt->code != V4L2_MBUS_FMT_YUYV8_1X16 &&
+		    fmt->code != V4L2_MBUS_FMT_UYVY8_1X16)
+			fmt->code = V4L2_MBUS_FMT_YUYV8_1X16;
+
+		fmt->width = clamp_t(u32, fmt->width, MIN_IN_WIDTH,
+				     resizer_max_in_width(res));
+		fmt->height = clamp_t(u32, fmt->height, MIN_IN_HEIGHT,
+				      MAX_IN_HEIGHT);
+		break;
+
+	case RESZ_PAD_SOURCE:
+		format = __resizer_get_format(res, fh, RESZ_PAD_SINK, which);
+		fmt->code = format->code;
+
+		crop = *__resizer_get_crop(res, fh, which);
+		resizer_calc_ratios(res, &crop, fmt, &ratio);
+		break;
+	}
+
+	fmt->colorspace = V4L2_COLORSPACE_JPEG;
+	fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * resizer_enum_mbus_code - Handle pixel format enumeration
+ * @sd     : pointer to v4l2 subdev structure
+ * @fh     : V4L2 subdev file handle
+ * @code   : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
+				  struct v4l2_subdev_fh *fh,
+				  struct v4l2_subdev_mbus_code_enum *code)
+{
+	struct isp_res_device *res = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	if (code->pad == RESZ_PAD_SINK) {
+		if (code->index >= ARRAY_SIZE(resizer_formats))
+			return -EINVAL;
+
+		code->code = resizer_formats[code->index];
+	} else {
+		if (code->index != 0)
+			return -EINVAL;
+
+		format = __resizer_get_format(res, fh, RESZ_PAD_SINK,
+					      V4L2_SUBDEV_FORMAT_TRY);
+		code->code = format->code;
+	}
+
+	return 0;
+}
+
+static int resizer_enum_frame_size(struct v4l2_subdev *sd,
+				   struct v4l2_subdev_fh *fh,
+				   struct v4l2_subdev_frame_size_enum *fse)
+{
+	struct isp_res_device *res = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt format;
+
+	if (fse->index != 0)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = 1;
+	format.height = 1;
+	resizer_try_format(res, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+	fse->min_width = format.width;
+	fse->min_height = format.height;
+
+	if (format.code != fse->code)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = -1;
+	format.height = -1;
+	resizer_try_format(res, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+	fse->max_width = format.width;
+	fse->max_height = format.height;
+
+	return 0;
+}
+
+/*
+ * resizer_get_format - Handle get format by pads subdev method
+ * @sd    : pointer to v4l2 subdev structure
+ * @fh    : V4L2 subdev file handle
+ * @fmt   : pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			      struct v4l2_subdev_format *fmt)
+{
+	struct isp_res_device *res = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __resizer_get_format(res, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	fmt->format = *format;
+	return 0;
+}
+
+/*
+ * resizer_set_format - Handle set format by pads subdev method
+ * @sd    : pointer to v4l2 subdev structure
+ * @fh    : V4L2 subdev file handle
+ * @fmt   : pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			      struct v4l2_subdev_format *fmt)
+{
+	struct isp_res_device *res = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+	struct v4l2_rect *crop;
+
+	format = __resizer_get_format(res, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	resizer_try_format(res, fh, fmt->pad, &fmt->format, fmt->which);
+	*format = fmt->format;
+
+	if (fmt->pad == RESZ_PAD_SINK) {
+		/* reset crop rectangle */
+		crop = __resizer_get_crop(res, fh, fmt->which);
+		crop->left = 0;
+		crop->top = 0;
+		crop->width = fmt->format.width;
+		crop->height = fmt->format.height;
+
+		/* Propagate the format from sink to source */
+		format = __resizer_get_format(res, fh, RESZ_PAD_SOURCE,
+					      fmt->which);
+		*format = fmt->format;
+		resizer_try_format(res, fh, RESZ_PAD_SOURCE, format,
+				   fmt->which);
+	}
+
+	if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+		/* Compute and store the active crop rectangle and resizer
+		 * ratios. format already points to the source pad active
+		 * format.
+		 */
+		res->crop.active = res->crop.request;
+		resizer_calc_ratios(res, &res->crop.active, format,
+				       &res->ratio);
+	}
+
+	return 0;
+}
+
+/*
+ * resizer_init_formats - Initialize formats on all pads
+ * @sd: ISP resizer V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int resizer_init_formats(struct v4l2_subdev *sd,
+				struct v4l2_subdev_fh *fh)
+{
+	struct v4l2_subdev_format format;
+
+	memset(&format, 0, sizeof(format));
+	format.pad = RESZ_PAD_SINK;
+	format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+	format.format.code = V4L2_MBUS_FMT_YUYV8_1X16;
+	format.format.width = 4096;
+	format.format.height = 4096;
+	resizer_set_format(sd, fh, &format);
+
+	return 0;
+}
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops resizer_v4l2_video_ops = {
+	.s_stream = resizer_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
+	.enum_mbus_code = resizer_enum_mbus_code,
+	.enum_frame_size = resizer_enum_frame_size,
+	.get_fmt = resizer_get_format,
+	.set_fmt = resizer_set_format,
+	.get_selection = resizer_get_selection,
+	.set_selection = resizer_set_selection,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops resizer_v4l2_ops = {
+	.video = &resizer_v4l2_video_ops,
+	.pad = &resizer_v4l2_pad_ops,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops resizer_v4l2_internal_ops = {
+	.open = resizer_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * resizer_link_setup - Setup resizer connections.
+ * @entity : Pointer to media entity structure
+ * @local  : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags  : Link flags
+ * return -EINVAL or zero on success
+ */
+static int resizer_link_setup(struct media_entity *entity,
+			      const struct media_pad *local,
+			      const struct media_pad *remote, u32 flags)
+{
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+	struct isp_res_device *res = v4l2_get_subdevdata(sd);
+
+	switch (local->index | media_entity_type(remote->entity)) {
+	case RESZ_PAD_SINK | MEDIA_ENT_T_DEVNODE:
+		/* read from memory */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (res->input == RESIZER_INPUT_VP)
+				return -EBUSY;
+			res->input = RESIZER_INPUT_MEMORY;
+		} else {
+			if (res->input == RESIZER_INPUT_MEMORY)
+				res->input = RESIZER_INPUT_NONE;
+		}
+		break;
+
+	case RESZ_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+		/* read from ccdc or previewer */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (res->input == RESIZER_INPUT_MEMORY)
+				return -EBUSY;
+			res->input = RESIZER_INPUT_VP;
+		} else {
+			if (res->input == RESIZER_INPUT_VP)
+				res->input = RESIZER_INPUT_NONE;
+		}
+		break;
+
+	case RESZ_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
+		/* resizer always write to memory */
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations resizer_media_ops = {
+	.link_setup = resizer_link_setup,
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+void omap3isp_resizer_unregister_entities(struct isp_res_device *res)
+{
+	v4l2_device_unregister_subdev(&res->subdev);
+	omap3isp_video_unregister(&res->video_in);
+	omap3isp_video_unregister(&res->video_out);
+}
+
+int omap3isp_resizer_register_entities(struct isp_res_device *res,
+				       struct v4l2_device *vdev)
+{
+	int ret;
+
+	/* Register the subdev and video nodes. */
+	ret = v4l2_device_register_subdev(vdev, &res->subdev);
+	if (ret < 0)
+		goto error;
+
+	ret = omap3isp_video_register(&res->video_in, vdev);
+	if (ret < 0)
+		goto error;
+
+	ret = omap3isp_video_register(&res->video_out, vdev);
+	if (ret < 0)
+		goto error;
+
+	return 0;
+
+error:
+	omap3isp_resizer_unregister_entities(res);
+	return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP resizer initialization and cleanup
+ */
+
+/*
+ * resizer_init_entities - Initialize resizer subdev and media entity.
+ * @res : Pointer to resizer device structure
+ * return -ENOMEM or zero on success
+ */
+static int resizer_init_entities(struct isp_res_device *res)
+{
+	struct v4l2_subdev *sd = &res->subdev;
+	struct media_pad *pads = res->pads;
+	struct media_entity *me = &sd->entity;
+	int ret;
+
+	res->input = RESIZER_INPUT_NONE;
+
+	v4l2_subdev_init(sd, &resizer_v4l2_ops);
+	sd->internal_ops = &resizer_v4l2_internal_ops;
+	strlcpy(sd->name, "OMAP3 ISP resizer", sizeof(sd->name));
+	sd->grp_id = 1 << 16;	/* group ID for isp subdevs */
+	v4l2_set_subdevdata(sd, res);
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	pads[RESZ_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	pads[RESZ_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+	me->ops = &resizer_media_ops;
+	ret = media_entity_init(me, RESZ_PADS_NUM, pads, 0);
+	if (ret < 0)
+		return ret;
+
+	resizer_init_formats(sd, NULL);
+
+	res->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	res->video_in.ops = &resizer_video_ops;
+	res->video_in.isp = to_isp_device(res);
+	res->video_in.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3;
+	res->video_in.bpl_alignment = 32;
+	res->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	res->video_out.ops = &resizer_video_ops;
+	res->video_out.isp = to_isp_device(res);
+	res->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3;
+	res->video_out.bpl_alignment = 32;
+
+	ret = omap3isp_video_init(&res->video_in, "resizer");
+	if (ret < 0)
+		goto error_video_in;
+
+	ret = omap3isp_video_init(&res->video_out, "resizer");
+	if (ret < 0)
+		goto error_video_out;
+
+	res->video_out.video.entity.flags |= MEDIA_ENT_FL_DEFAULT;
+
+	/* Connect the video nodes to the resizer subdev. */
+	ret = media_entity_create_link(&res->video_in.video.entity, 0,
+			&res->subdev.entity, RESZ_PAD_SINK, 0);
+	if (ret < 0)
+		goto error_link;
+
+	ret = media_entity_create_link(&res->subdev.entity, RESZ_PAD_SOURCE,
+			&res->video_out.video.entity, 0, 0);
+	if (ret < 0)
+		goto error_link;
+
+	return 0;
+
+error_link:
+	omap3isp_video_cleanup(&res->video_out);
+error_video_out:
+	omap3isp_video_cleanup(&res->video_in);
+error_video_in:
+	media_entity_cleanup(&res->subdev.entity);
+	return ret;
+}
+
+/*
+ * isp_resizer_init - Resizer initialization.
+ * @isp : Pointer to ISP device
+ * return -ENOMEM or zero on success
+ */
+int omap3isp_resizer_init(struct isp_device *isp)
+{
+	struct isp_res_device *res = &isp->isp_res;
+
+	init_waitqueue_head(&res->wait);
+	atomic_set(&res->stopping, 0);
+	return resizer_init_entities(res);
+}
+
+void omap3isp_resizer_cleanup(struct isp_device *isp)
+{
+	struct isp_res_device *res = &isp->isp_res;
+
+	omap3isp_video_cleanup(&res->video_in);
+	omap3isp_video_cleanup(&res->video_out);
+	media_entity_cleanup(&res->subdev.entity);
+}
diff --git a/drivers/media/platform/omap3isp/ispresizer.h b/drivers/media/platform/omap3isp/ispresizer.h
new file mode 100644
index 0000000..70c1c0e
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispresizer.h
@@ -0,0 +1,146 @@
+/*
+ * ispresizer.h
+ *
+ * TI OMAP3 ISP - Resizer module
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_RESIZER_H
+#define OMAP3_ISP_RESIZER_H
+
+#include <linux/types.h>
+
+/*
+ * Constants for filter coefficents count
+ */
+#define COEFF_CNT		32
+
+/*
+ * struct isprsz_coef - Structure for resizer filter coeffcients.
+ * @h_filter_coef_4tap: Horizontal filter coefficients for 8-phase/4-tap
+ *			mode (.5x-4x)
+ * @v_filter_coef_4tap: Vertical filter coefficients for 8-phase/4-tap
+ *			mode (.5x-4x)
+ * @h_filter_coef_7tap: Horizontal filter coefficients for 4-phase/7-tap
+ *			mode (.25x-.5x)
+ * @v_filter_coef_7tap: Vertical filter coefficients for 4-phase/7-tap
+ *			mode (.25x-.5x)
+ */
+struct isprsz_coef {
+	u16 h_filter_coef_4tap[32];
+	u16 v_filter_coef_4tap[32];
+	/* Every 8th value is a dummy value in the following arrays: */
+	u16 h_filter_coef_7tap[32];
+	u16 v_filter_coef_7tap[32];
+};
+
+/* Chrominance horizontal algorithm */
+enum resizer_chroma_algo {
+	RSZ_THE_SAME = 0,	/* Chrominance the same as Luminance */
+	RSZ_BILINEAR = 1,	/* Chrominance uses bilinear interpolation */
+};
+
+/* Resizer input type select */
+enum resizer_colors_type {
+	RSZ_YUV422 = 0,		/* YUV422 color is interleaved */
+	RSZ_COLOR8 = 1,		/* Color separate data on 8 bits */
+};
+
+/*
+ * Structure for horizontal and vertical resizing value
+ */
+struct resizer_ratio {
+	u32 horz;
+	u32 vert;
+};
+
+/*
+ * Structure for luminance enhancer parameters.
+ */
+struct resizer_luma_yenh {
+	u8 algo;		/* algorithm select. */
+	u8 gain;		/* maximum gain. */
+	u8 slope;		/* slope. */
+	u8 core;		/* core offset. */
+};
+
+enum resizer_input_entity {
+	RESIZER_INPUT_NONE,
+	RESIZER_INPUT_VP,	/* input video port - prev or ccdc */
+	RESIZER_INPUT_MEMORY,
+};
+
+/* Sink and source resizer pads */
+#define RESZ_PAD_SINK			0
+#define RESZ_PAD_SOURCE			1
+#define RESZ_PADS_NUM			2
+
+/*
+ * struct isp_res_device - OMAP3 ISP resizer module
+ * @crop.request: Crop rectangle requested by the user
+ * @crop.active: Active crop rectangle (based on hardware requirements)
+ */
+struct isp_res_device {
+	struct v4l2_subdev subdev;
+	struct media_pad pads[RESZ_PADS_NUM];
+	struct v4l2_mbus_framefmt formats[RESZ_PADS_NUM];
+
+	enum resizer_input_entity input;
+	struct isp_video video_in;
+	struct isp_video video_out;
+
+	u32 addr_base;   /* stored source buffer address in memory mode */
+	u32 crop_offset; /* additional offset for crop in memory mode */
+	struct resizer_ratio ratio;
+	int pm_state;
+	unsigned int applycrop:1;
+	enum isp_pipeline_stream_state state;
+	wait_queue_head_t wait;
+	atomic_t stopping;
+
+	struct {
+		struct v4l2_rect request;
+		struct v4l2_rect active;
+	} crop;
+};
+
+struct isp_device;
+
+int omap3isp_resizer_init(struct isp_device *isp);
+void omap3isp_resizer_cleanup(struct isp_device *isp);
+
+int omap3isp_resizer_register_entities(struct isp_res_device *res,
+				       struct v4l2_device *vdev);
+void omap3isp_resizer_unregister_entities(struct isp_res_device *res);
+void omap3isp_resizer_isr_frame_sync(struct isp_res_device *res);
+void omap3isp_resizer_isr(struct isp_res_device *isp_res);
+
+void omap3isp_resizer_max_rate(struct isp_res_device *res,
+			       unsigned int *max_rate);
+
+void omap3isp_resizer_suspend(struct isp_res_device *isp_res);
+
+void omap3isp_resizer_resume(struct isp_res_device *isp_res);
+
+int omap3isp_resizer_busy(struct isp_res_device *isp_res);
+
+#endif	/* OMAP3_ISP_RESIZER_H */
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
new file mode 100644
index 0000000..b8640be
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -0,0 +1,1102 @@
+/*
+ * ispstat.c
+ *
+ * TI OMAP3 ISP - Statistics core
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ *	     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "isp.h"
+
+#define IS_COHERENT_BUF(stat)	((stat)->dma_ch >= 0)
+
+/*
+ * MAGIC_SIZE must always be the greatest common divisor of
+ * AEWB_PACKET_SIZE and AF_PAXEL_SIZE.
+ */
+#define MAGIC_SIZE		16
+#define MAGIC_NUM		0x55
+
+/* HACK: AF module seems to be writing one more paxel data than it should. */
+#define AF_EXTRA_DATA		OMAP3ISP_AF_PAXEL_SIZE
+
+/*
+ * HACK: H3A modules go to an invalid state after have a SBL overflow. It makes
+ * the next buffer to start to be written in the same point where the overflow
+ * occurred instead of the configured address. The only known way to make it to
+ * go back to a valid state is having a valid buffer processing. Of course it
+ * requires at least a doubled buffer size to avoid an access to invalid memory
+ * region. But it does not fix everything. It may happen more than one
+ * consecutive SBL overflows. In that case, it might be unpredictable how many
+ * buffers the allocated memory should fit. For that case, a recover
+ * configuration was created. It produces the minimum buffer size for each H3A
+ * module and decrease the change for more SBL overflows. This recover state
+ * will be enabled every time a SBL overflow occur. As the output buffer size
+ * isn't big, it's possible to have an extra size able to fit many recover
+ * buffers making it extreamily unlikely to have an access to invalid memory
+ * region.
+ */
+#define NUM_H3A_RECOVER_BUFS	10
+
+/*
+ * HACK: Because of HW issues the generic layer sometimes need to have
+ * different behaviour for different statistic modules.
+ */
+#define IS_H3A_AF(stat)		((stat) == &(stat)->isp->isp_af)
+#define IS_H3A_AEWB(stat)	((stat) == &(stat)->isp->isp_aewb)
+#define IS_H3A(stat)		(IS_H3A_AF(stat) || IS_H3A_AEWB(stat))
+
+static void __isp_stat_buf_sync_magic(struct ispstat *stat,
+				      struct ispstat_buffer *buf,
+				      u32 buf_size, enum dma_data_direction dir,
+				      void (*dma_sync)(struct device *,
+					dma_addr_t, unsigned long, size_t,
+					enum dma_data_direction))
+{
+	struct device *dev = stat->isp->dev;
+	struct page *pg;
+	dma_addr_t dma_addr;
+	u32 offset;
+
+	/* Initial magic words */
+	pg = vmalloc_to_page(buf->virt_addr);
+	dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
+	dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir);
+
+	/* Final magic words */
+	pg = vmalloc_to_page(buf->virt_addr + buf_size);
+	dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
+	offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK;
+	dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir);
+}
+
+static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
+					       struct ispstat_buffer *buf,
+					       u32 buf_size,
+					       enum dma_data_direction dir)
+{
+	if (IS_COHERENT_BUF(stat))
+		return;
+
+	__isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
+				  dma_sync_single_range_for_device);
+}
+
+static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat,
+					    struct ispstat_buffer *buf,
+					    u32 buf_size,
+					    enum dma_data_direction dir)
+{
+	if (IS_COHERENT_BUF(stat))
+		return;
+
+	__isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
+				  dma_sync_single_range_for_cpu);
+}
+
+static int isp_stat_buf_check_magic(struct ispstat *stat,
+				    struct ispstat_buffer *buf)
+{
+	const u32 buf_size = IS_H3A_AF(stat) ?
+			     buf->buf_size + AF_EXTRA_DATA : buf->buf_size;
+	u8 *w;
+	u8 *end;
+	int ret = -EINVAL;
+
+	isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
+
+	/* Checking initial magic numbers. They shouldn't be here anymore. */
+	for (w = buf->virt_addr, end = w + MAGIC_SIZE; w < end; w++)
+		if (likely(*w != MAGIC_NUM))
+			ret = 0;
+
+	if (ret) {
+		dev_dbg(stat->isp->dev, "%s: beginning magic check does not "
+					"match.\n", stat->subdev.name);
+		return ret;
+	}
+
+	/* Checking magic numbers at the end. They must be still here. */
+	for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE;
+	     w < end; w++) {
+		if (unlikely(*w != MAGIC_NUM)) {
+			dev_dbg(stat->isp->dev, "%s: endding magic check does "
+				"not match.\n", stat->subdev.name);
+			return -EINVAL;
+		}
+	}
+
+	isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
+					   DMA_FROM_DEVICE);
+
+	return 0;
+}
+
+static void isp_stat_buf_insert_magic(struct ispstat *stat,
+				      struct ispstat_buffer *buf)
+{
+	const u32 buf_size = IS_H3A_AF(stat) ?
+			     stat->buf_size + AF_EXTRA_DATA : stat->buf_size;
+
+	isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
+
+	/*
+	 * Inserting MAGIC_NUM at the beginning and end of the buffer.
+	 * buf->buf_size is set only after the buffer is queued. For now the
+	 * right buf_size for the current configuration is pointed by
+	 * stat->buf_size.
+	 */
+	memset(buf->virt_addr, MAGIC_NUM, MAGIC_SIZE);
+	memset(buf->virt_addr + buf_size, MAGIC_NUM, MAGIC_SIZE);
+
+	isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
+					   DMA_BIDIRECTIONAL);
+}
+
+static void isp_stat_buf_sync_for_device(struct ispstat *stat,
+					 struct ispstat_buffer *buf)
+{
+	if (IS_COHERENT_BUF(stat))
+		return;
+
+	dma_sync_sg_for_device(stat->isp->dev, buf->iovm->sgt->sgl,
+			       buf->iovm->sgt->nents, DMA_FROM_DEVICE);
+}
+
+static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
+				      struct ispstat_buffer *buf)
+{
+	if (IS_COHERENT_BUF(stat))
+		return;
+
+	dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl,
+			    buf->iovm->sgt->nents, DMA_FROM_DEVICE);
+}
+
+static void isp_stat_buf_clear(struct ispstat *stat)
+{
+	int i;
+
+	for (i = 0; i < STAT_MAX_BUFS; i++)
+		stat->buf[i].empty = 1;
+}
+
+static struct ispstat_buffer *
+__isp_stat_buf_find(struct ispstat *stat, int look_empty)
+{
+	struct ispstat_buffer *found = NULL;
+	int i;
+
+	for (i = 0; i < STAT_MAX_BUFS; i++) {
+		struct ispstat_buffer *curr = &stat->buf[i];
+
+		/*
+		 * Don't select the buffer which is being copied to
+		 * userspace or used by the module.
+		 */
+		if (curr == stat->locked_buf || curr == stat->active_buf)
+			continue;
+
+		/* Don't select uninitialised buffers if it's not required */
+		if (!look_empty && curr->empty)
+			continue;
+
+		/* Pick uninitialised buffer over anything else if look_empty */
+		if (curr->empty) {
+			found = curr;
+			break;
+		}
+
+		/* Choose the oldest buffer */
+		if (!found ||
+		    (s32)curr->frame_number - (s32)found->frame_number < 0)
+			found = curr;
+	}
+
+	return found;
+}
+
+static inline struct ispstat_buffer *
+isp_stat_buf_find_oldest(struct ispstat *stat)
+{
+	return __isp_stat_buf_find(stat, 0);
+}
+
+static inline struct ispstat_buffer *
+isp_stat_buf_find_oldest_or_empty(struct ispstat *stat)
+{
+	return __isp_stat_buf_find(stat, 1);
+}
+
+static int isp_stat_buf_queue(struct ispstat *stat)
+{
+	if (!stat->active_buf)
+		return STAT_NO_BUF;
+
+	do_gettimeofday(&stat->active_buf->ts);
+
+	stat->active_buf->buf_size = stat->buf_size;
+	if (isp_stat_buf_check_magic(stat, stat->active_buf)) {
+		dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n",
+			stat->subdev.name);
+		return STAT_NO_BUF;
+	}
+	stat->active_buf->config_counter = stat->config_counter;
+	stat->active_buf->frame_number = stat->frame_number;
+	stat->active_buf->empty = 0;
+	stat->active_buf = NULL;
+
+	return STAT_BUF_DONE;
+}
+
+/* Get next free buffer to write the statistics to and mark it active. */
+static void isp_stat_buf_next(struct ispstat *stat)
+{
+	if (unlikely(stat->active_buf))
+		/* Overwriting unused active buffer */
+		dev_dbg(stat->isp->dev, "%s: new buffer requested without "
+					"queuing active one.\n",
+					stat->subdev.name);
+	else
+		stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat);
+}
+
+static void isp_stat_buf_release(struct ispstat *stat)
+{
+	unsigned long flags;
+
+	isp_stat_buf_sync_for_device(stat, stat->locked_buf);
+	spin_lock_irqsave(&stat->isp->stat_lock, flags);
+	stat->locked_buf = NULL;
+	spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+}
+
+/* Get buffer to userspace. */
+static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
+					       struct omap3isp_stat_data *data)
+{
+	int rval = 0;
+	unsigned long flags;
+	struct ispstat_buffer *buf;
+
+	spin_lock_irqsave(&stat->isp->stat_lock, flags);
+
+	while (1) {
+		buf = isp_stat_buf_find_oldest(stat);
+		if (!buf) {
+			spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+			dev_dbg(stat->isp->dev, "%s: cannot find a buffer.\n",
+				stat->subdev.name);
+			return ERR_PTR(-EBUSY);
+		}
+		if (isp_stat_buf_check_magic(stat, buf)) {
+			dev_dbg(stat->isp->dev, "%s: current buffer has "
+				"corrupted data\n.", stat->subdev.name);
+			/* Mark empty because it doesn't have valid data. */
+			buf->empty = 1;
+		} else {
+			/* Buffer isn't corrupted. */
+			break;
+		}
+	}
+
+	stat->locked_buf = buf;
+
+	spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+
+	if (buf->buf_size > data->buf_size) {
+		dev_warn(stat->isp->dev, "%s: userspace's buffer size is "
+					 "not enough.\n", stat->subdev.name);
+		isp_stat_buf_release(stat);
+		return ERR_PTR(-EINVAL);
+	}
+
+	isp_stat_buf_sync_for_cpu(stat, buf);
+
+	rval = copy_to_user(data->buf,
+			    buf->virt_addr,
+			    buf->buf_size);
+
+	if (rval) {
+		dev_info(stat->isp->dev,
+			 "%s: failed copying %d bytes of stat data\n",
+			 stat->subdev.name, rval);
+		buf = ERR_PTR(-EFAULT);
+		isp_stat_buf_release(stat);
+	}
+
+	return buf;
+}
+
+static void isp_stat_bufs_free(struct ispstat *stat)
+{
+	struct isp_device *isp = stat->isp;
+	int i;
+
+	for (i = 0; i < STAT_MAX_BUFS; i++) {
+		struct ispstat_buffer *buf = &stat->buf[i];
+
+		if (!IS_COHERENT_BUF(stat)) {
+			if (IS_ERR_OR_NULL((void *)buf->iommu_addr))
+				continue;
+			if (buf->iovm)
+				dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
+					     buf->iovm->sgt->nents,
+					     DMA_FROM_DEVICE);
+			omap_iommu_vfree(isp->domain, isp->dev,
+							buf->iommu_addr);
+		} else {
+			if (!buf->virt_addr)
+				continue;
+			dma_free_coherent(stat->isp->dev, stat->buf_alloc_size,
+					  buf->virt_addr, buf->dma_addr);
+		}
+		buf->iommu_addr = 0;
+		buf->iovm = NULL;
+		buf->dma_addr = 0;
+		buf->virt_addr = NULL;
+		buf->empty = 1;
+	}
+
+	dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n",
+		stat->subdev.name);
+
+	stat->buf_alloc_size = 0;
+	stat->active_buf = NULL;
+}
+
+static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
+{
+	struct isp_device *isp = stat->isp;
+	int i;
+
+	stat->buf_alloc_size = size;
+
+	for (i = 0; i < STAT_MAX_BUFS; i++) {
+		struct ispstat_buffer *buf = &stat->buf[i];
+		struct iovm_struct *iovm;
+
+		WARN_ON(buf->dma_addr);
+		buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
+							size, IOMMU_FLAG);
+		if (IS_ERR((void *)buf->iommu_addr)) {
+			dev_err(stat->isp->dev,
+				 "%s: Can't acquire memory for "
+				 "buffer %d\n", stat->subdev.name, i);
+			isp_stat_bufs_free(stat);
+			return -ENOMEM;
+		}
+
+		iovm = omap_find_iovm_area(isp->dev, buf->iommu_addr);
+		if (!iovm ||
+		    !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
+				DMA_FROM_DEVICE)) {
+			isp_stat_bufs_free(stat);
+			return -ENOMEM;
+		}
+		buf->iovm = iovm;
+
+		buf->virt_addr = omap_da_to_va(stat->isp->dev,
+					  (u32)buf->iommu_addr);
+		buf->empty = 1;
+		dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
+			"iommu_addr=0x%08lx virt_addr=0x%08lx",
+			stat->subdev.name, i, buf->iommu_addr,
+			(unsigned long)buf->virt_addr);
+	}
+
+	return 0;
+}
+
+static int isp_stat_bufs_alloc_dma(struct ispstat *stat, unsigned int size)
+{
+	int i;
+
+	stat->buf_alloc_size = size;
+
+	for (i = 0; i < STAT_MAX_BUFS; i++) {
+		struct ispstat_buffer *buf = &stat->buf[i];
+
+		WARN_ON(buf->iommu_addr);
+		buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size,
+					&buf->dma_addr, GFP_KERNEL | GFP_DMA);
+
+		if (!buf->virt_addr || !buf->dma_addr) {
+			dev_info(stat->isp->dev,
+				 "%s: Can't acquire memory for "
+				 "DMA buffer %d\n", stat->subdev.name, i);
+			isp_stat_bufs_free(stat);
+			return -ENOMEM;
+		}
+		buf->empty = 1;
+
+		dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
+			"dma_addr=0x%08lx virt_addr=0x%08lx\n",
+			stat->subdev.name, i, (unsigned long)buf->dma_addr,
+			(unsigned long)buf->virt_addr);
+	}
+
+	return 0;
+}
+
+static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&stat->isp->stat_lock, flags);
+
+	BUG_ON(stat->locked_buf != NULL);
+
+	/* Are the old buffers big enough? */
+	if (stat->buf_alloc_size >= size) {
+		spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+		return 0;
+	}
+
+	if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) {
+		dev_info(stat->isp->dev,
+			 "%s: trying to allocate memory when busy\n",
+			 stat->subdev.name);
+		spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+		return -EBUSY;
+	}
+
+	spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+
+	isp_stat_bufs_free(stat);
+
+	if (IS_COHERENT_BUF(stat))
+		return isp_stat_bufs_alloc_dma(stat, size);
+	else
+		return isp_stat_bufs_alloc_iommu(stat, size);
+}
+
+static void isp_stat_queue_event(struct ispstat *stat, int err)
+{
+	struct video_device *vdev = stat->subdev.devnode;
+	struct v4l2_event event;
+	struct omap3isp_stat_event_status *status = (void *)event.u.data;
+
+	memset(&event, 0, sizeof(event));
+	if (!err) {
+		status->frame_number = stat->frame_number;
+		status->config_counter = stat->config_counter;
+	} else {
+		status->buf_err = 1;
+	}
+	event.type = stat->event_type;
+	v4l2_event_queue(vdev, &event);
+}
+
+
+/*
+ * omap3isp_stat_request_statistics - Request statistics.
+ * @data: Pointer to return statistics data.
+ *
+ * Returns 0 if successful.
+ */
+int omap3isp_stat_request_statistics(struct ispstat *stat,
+				     struct omap3isp_stat_data *data)
+{
+	struct ispstat_buffer *buf;
+
+	if (stat->state != ISPSTAT_ENABLED) {
+		dev_dbg(stat->isp->dev, "%s: engine not enabled.\n",
+			stat->subdev.name);
+		return -EINVAL;
+	}
+
+	mutex_lock(&stat->ioctl_lock);
+	buf = isp_stat_buf_get(stat, data);
+	if (IS_ERR(buf)) {
+		mutex_unlock(&stat->ioctl_lock);
+		return PTR_ERR(buf);
+	}
+
+	data->ts = buf->ts;
+	data->config_counter = buf->config_counter;
+	data->frame_number = buf->frame_number;
+	data->buf_size = buf->buf_size;
+
+	buf->empty = 1;
+	isp_stat_buf_release(stat);
+	mutex_unlock(&stat->ioctl_lock);
+
+	return 0;
+}
+
+/*
+ * omap3isp_stat_config - Receives new statistic engine configuration.
+ * @new_conf: Pointer to config structure.
+ *
+ * Returns 0 if successful, -EINVAL if new_conf pointer is NULL, -ENOMEM if
+ * was unable to allocate memory for the buffer, or other errors if parameters
+ * are invalid.
+ */
+int omap3isp_stat_config(struct ispstat *stat, void *new_conf)
+{
+	int ret;
+	unsigned long irqflags;
+	struct ispstat_generic_config *user_cfg = new_conf;
+	u32 buf_size = user_cfg->buf_size;
+
+	if (!new_conf) {
+		dev_dbg(stat->isp->dev, "%s: configuration is NULL\n",
+			stat->subdev.name);
+		return -EINVAL;
+	}
+
+	mutex_lock(&stat->ioctl_lock);
+
+	dev_dbg(stat->isp->dev, "%s: configuring module with buffer "
+		"size=0x%08lx\n", stat->subdev.name, (unsigned long)buf_size);
+
+	ret = stat->ops->validate_params(stat, new_conf);
+	if (ret) {
+		mutex_unlock(&stat->ioctl_lock);
+		dev_dbg(stat->isp->dev, "%s: configuration values are "
+					"invalid.\n", stat->subdev.name);
+		return ret;
+	}
+
+	if (buf_size != user_cfg->buf_size)
+		dev_dbg(stat->isp->dev, "%s: driver has corrected buffer size "
+			"request to 0x%08lx\n", stat->subdev.name,
+			(unsigned long)user_cfg->buf_size);
+
+	/*
+	 * Hack: H3A modules may need a doubled buffer size to avoid access
+	 * to a invalid memory address after a SBL overflow.
+	 * The buffer size is always PAGE_ALIGNED.
+	 * Hack 2: MAGIC_SIZE is added to buf_size so a magic word can be
+	 * inserted at the end to data integrity check purpose.
+	 * Hack 3: AF module writes one paxel data more than it should, so
+	 * the buffer allocation must consider it to avoid invalid memory
+	 * access.
+	 * Hack 4: H3A need to allocate extra space for the recover state.
+	 */
+	if (IS_H3A(stat)) {
+		buf_size = user_cfg->buf_size * 2 + MAGIC_SIZE;
+		if (IS_H3A_AF(stat))
+			/*
+			 * Adding one extra paxel data size for each recover
+			 * buffer + 2 regular ones.
+			 */
+			buf_size += AF_EXTRA_DATA * (NUM_H3A_RECOVER_BUFS + 2);
+		if (stat->recover_priv) {
+			struct ispstat_generic_config *recover_cfg =
+				stat->recover_priv;
+			buf_size += recover_cfg->buf_size *
+				    NUM_H3A_RECOVER_BUFS;
+		}
+		buf_size = PAGE_ALIGN(buf_size);
+	} else { /* Histogram */
+		buf_size = PAGE_ALIGN(user_cfg->buf_size + MAGIC_SIZE);
+	}
+
+	ret = isp_stat_bufs_alloc(stat, buf_size);
+	if (ret) {
+		mutex_unlock(&stat->ioctl_lock);
+		return ret;
+	}
+
+	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
+	stat->ops->set_params(stat, new_conf);
+	spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+
+	/*
+	 * Returning the right future config_counter for this setup, so
+	 * userspace can *know* when it has been applied.
+	 */
+	user_cfg->config_counter = stat->config_counter + stat->inc_config;
+
+	/* Module has a valid configuration. */
+	stat->configured = 1;
+	dev_dbg(stat->isp->dev, "%s: module has been successfully "
+		"configured.\n", stat->subdev.name);
+
+	mutex_unlock(&stat->ioctl_lock);
+
+	return 0;
+}
+
+/*
+ * isp_stat_buf_process - Process statistic buffers.
+ * @buf_state: points out if buffer is ready to be processed. It's necessary
+ *	       because histogram needs to copy the data from internal memory
+ *	       before be able to process the buffer.
+ */
+static int isp_stat_buf_process(struct ispstat *stat, int buf_state)
+{
+	int ret = STAT_NO_BUF;
+
+	if (!atomic_add_unless(&stat->buf_err, -1, 0) &&
+	    buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) {
+		ret = isp_stat_buf_queue(stat);
+		isp_stat_buf_next(stat);
+	}
+
+	return ret;
+}
+
+int omap3isp_stat_pcr_busy(struct ispstat *stat)
+{
+	return stat->ops->busy(stat);
+}
+
+int omap3isp_stat_busy(struct ispstat *stat)
+{
+	return omap3isp_stat_pcr_busy(stat) | stat->buf_processing |
+		(stat->state != ISPSTAT_DISABLED);
+}
+
+/*
+ * isp_stat_pcr_enable - Disables/Enables statistic engines.
+ * @pcr_enable: 0/1 - Disables/Enables the engine.
+ *
+ * Must be called from ISP driver when the module is idle and synchronized
+ * with CCDC.
+ */
+static void isp_stat_pcr_enable(struct ispstat *stat, u8 pcr_enable)
+{
+	if ((stat->state != ISPSTAT_ENABLING &&
+	     stat->state != ISPSTAT_ENABLED) && pcr_enable)
+		/* Userspace has disabled the module. Aborting. */
+		return;
+
+	stat->ops->enable(stat, pcr_enable);
+	if (stat->state == ISPSTAT_DISABLING && !pcr_enable)
+		stat->state = ISPSTAT_DISABLED;
+	else if (stat->state == ISPSTAT_ENABLING && pcr_enable)
+		stat->state = ISPSTAT_ENABLED;
+}
+
+void omap3isp_stat_suspend(struct ispstat *stat)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&stat->isp->stat_lock, flags);
+
+	if (stat->state != ISPSTAT_DISABLED)
+		stat->ops->enable(stat, 0);
+	if (stat->state == ISPSTAT_ENABLED)
+		stat->state = ISPSTAT_SUSPENDED;
+
+	spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+}
+
+void omap3isp_stat_resume(struct ispstat *stat)
+{
+	/* Module will be re-enabled with its pipeline */
+	if (stat->state == ISPSTAT_SUSPENDED)
+		stat->state = ISPSTAT_ENABLING;
+}
+
+static void isp_stat_try_enable(struct ispstat *stat)
+{
+	unsigned long irqflags;
+
+	if (stat->priv == NULL)
+		/* driver wasn't initialised */
+		return;
+
+	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
+	if (stat->state == ISPSTAT_ENABLING && !stat->buf_processing &&
+	    stat->buf_alloc_size) {
+		/*
+		 * Userspace's requested to enable the engine but it wasn't yet.
+		 * Let's do that now.
+		 */
+		stat->update = 1;
+		isp_stat_buf_next(stat);
+		stat->ops->setup_regs(stat, stat->priv);
+		isp_stat_buf_insert_magic(stat, stat->active_buf);
+
+		/*
+		 * H3A module has some hw issues which forces the driver to
+		 * ignore next buffers even if it was disabled in the meantime.
+		 * On the other hand, Histogram shouldn't ignore buffers anymore
+		 * if it's being enabled.
+		 */
+		if (!IS_H3A(stat))
+			atomic_set(&stat->buf_err, 0);
+
+		isp_stat_pcr_enable(stat, 1);
+		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+		dev_dbg(stat->isp->dev, "%s: module is enabled.\n",
+			stat->subdev.name);
+	} else {
+		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+	}
+}
+
+void omap3isp_stat_isr_frame_sync(struct ispstat *stat)
+{
+	isp_stat_try_enable(stat);
+}
+
+void omap3isp_stat_sbl_overflow(struct ispstat *stat)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
+	/*
+	 * Due to a H3A hw issue which prevents the next buffer to start from
+	 * the correct memory address, 2 buffers must be ignored.
+	 */
+	atomic_set(&stat->buf_err, 2);
+
+	/*
+	 * If more than one SBL overflow happen in a row, H3A module may access
+	 * invalid memory region.
+	 * stat->sbl_ovl_recover is set to tell to the driver to temporarily use
+	 * a soft configuration which helps to avoid consecutive overflows.
+	 */
+	if (stat->recover_priv)
+		stat->sbl_ovl_recover = 1;
+	spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+}
+
+/*
+ * omap3isp_stat_enable - Disable/Enable statistic engine as soon as possible
+ * @enable: 0/1 - Disables/Enables the engine.
+ *
+ * Client should configure all the module registers before this.
+ * This function can be called from a userspace request.
+ */
+int omap3isp_stat_enable(struct ispstat *stat, u8 enable)
+{
+	unsigned long irqflags;
+
+	dev_dbg(stat->isp->dev, "%s: user wants to %s module.\n",
+		stat->subdev.name, enable ? "enable" : "disable");
+
+	/* Prevent enabling while configuring */
+	mutex_lock(&stat->ioctl_lock);
+
+	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
+
+	if (!stat->configured && enable) {
+		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+		mutex_unlock(&stat->ioctl_lock);
+		dev_dbg(stat->isp->dev, "%s: cannot enable module as it's "
+			"never been successfully configured so far.\n",
+			stat->subdev.name);
+		return -EINVAL;
+	}
+
+	if (enable) {
+		if (stat->state == ISPSTAT_DISABLING)
+			/* Previous disabling request wasn't done yet */
+			stat->state = ISPSTAT_ENABLED;
+		else if (stat->state == ISPSTAT_DISABLED)
+			/* Module is now being enabled */
+			stat->state = ISPSTAT_ENABLING;
+	} else {
+		if (stat->state == ISPSTAT_ENABLING) {
+			/* Previous enabling request wasn't done yet */
+			stat->state = ISPSTAT_DISABLED;
+		} else if (stat->state == ISPSTAT_ENABLED) {
+			/* Module is now being disabled */
+			stat->state = ISPSTAT_DISABLING;
+			isp_stat_buf_clear(stat);
+		}
+	}
+
+	spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+	mutex_unlock(&stat->ioctl_lock);
+
+	return 0;
+}
+
+int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+	struct ispstat *stat = v4l2_get_subdevdata(subdev);
+
+	if (enable) {
+		/*
+		 * Only set enable PCR bit if the module was previously
+		 * enabled through ioct.
+		 */
+		isp_stat_try_enable(stat);
+	} else {
+		unsigned long flags;
+		/* Disable PCR bit and config enable field */
+		omap3isp_stat_enable(stat, 0);
+		spin_lock_irqsave(&stat->isp->stat_lock, flags);
+		stat->ops->enable(stat, 0);
+		spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
+
+		/*
+		 * If module isn't busy, a new interrupt may come or not to
+		 * set the state to DISABLED. As Histogram needs to read its
+		 * internal memory to clear it, let interrupt handler
+		 * responsible of changing state to DISABLED. If the last
+		 * interrupt is coming, it's still safe as the handler will
+		 * ignore the second time when state is already set to DISABLED.
+		 * It's necessary to synchronize Histogram with streamoff, once
+		 * the module may be considered idle before last SDMA transfer
+		 * starts if we return here.
+		 */
+		if (!omap3isp_stat_pcr_busy(stat))
+			omap3isp_stat_isr(stat);
+
+		dev_dbg(stat->isp->dev, "%s: module is being disabled\n",
+			stat->subdev.name);
+	}
+
+	return 0;
+}
+
+/*
+ * __stat_isr - Interrupt handler for statistic drivers
+ */
+static void __stat_isr(struct ispstat *stat, int from_dma)
+{
+	int ret = STAT_BUF_DONE;
+	int buf_processing;
+	unsigned long irqflags;
+	struct isp_pipeline *pipe;
+
+	/*
+	 * stat->buf_processing must be set before disable module. It's
+	 * necessary to not inform too early the buffers aren't busy in case
+	 * of SDMA is going to be used.
+	 */
+	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
+	if (stat->state == ISPSTAT_DISABLED) {
+		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+		return;
+	}
+	buf_processing = stat->buf_processing;
+	stat->buf_processing = 1;
+	stat->ops->enable(stat, 0);
+
+	if (buf_processing && !from_dma) {
+		if (stat->state == ISPSTAT_ENABLED) {
+			spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+			dev_err(stat->isp->dev,
+				"%s: interrupt occurred when module was still "
+				"processing a buffer.\n", stat->subdev.name);
+			ret = STAT_NO_BUF;
+			goto out;
+		} else {
+			/*
+			 * Interrupt handler was called from streamoff when
+			 * the module wasn't busy anymore to ensure it is being
+			 * disabled after process last buffer. If such buffer
+			 * processing has already started, no need to do
+			 * anything else.
+			 */
+			spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+
+	/* If it's busy we can't process this buffer anymore */
+	if (!omap3isp_stat_pcr_busy(stat)) {
+		if (!from_dma && stat->ops->buf_process)
+			/* Module still need to copy data to buffer. */
+			ret = stat->ops->buf_process(stat);
+		if (ret == STAT_BUF_WAITING_DMA)
+			/* Buffer is not ready yet */
+			return;
+
+		spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
+
+		/*
+		 * Histogram needs to read its internal memory to clear it
+		 * before be disabled. For that reason, common statistic layer
+		 * can return only after call stat's buf_process() operator.
+		 */
+		if (stat->state == ISPSTAT_DISABLING) {
+			stat->state = ISPSTAT_DISABLED;
+			spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+			stat->buf_processing = 0;
+			return;
+		}
+		pipe = to_isp_pipeline(&stat->subdev.entity);
+		stat->frame_number = atomic_read(&pipe->frame_number);
+
+		/*
+		 * Before this point, 'ret' stores the buffer's status if it's
+		 * ready to be processed. Afterwards, it holds the status if
+		 * it was processed successfully.
+		 */
+		ret = isp_stat_buf_process(stat, ret);
+
+		if (likely(!stat->sbl_ovl_recover)) {
+			stat->ops->setup_regs(stat, stat->priv);
+		} else {
+			/*
+			 * Using recover config to increase the chance to have
+			 * a good buffer processing and make the H3A module to
+			 * go back to a valid state.
+			 */
+			stat->update = 1;
+			stat->ops->setup_regs(stat, stat->recover_priv);
+			stat->sbl_ovl_recover = 0;
+
+			/*
+			 * Set 'update' in case of the module needs to use
+			 * regular configuration after next buffer.
+			 */
+			stat->update = 1;
+		}
+
+		isp_stat_buf_insert_magic(stat, stat->active_buf);
+
+		/*
+		 * Hack: H3A modules may access invalid memory address or send
+		 * corrupted data to userspace if more than 1 SBL overflow
+		 * happens in a row without re-writing its buffer's start memory
+		 * address in the meantime. Such situation is avoided if the
+		 * module is not immediately re-enabled when the ISR misses the
+		 * timing to process the buffer and to setup the registers.
+		 * Because of that, pcr_enable(1) was moved to inside this 'if'
+		 * block. But the next interruption will still happen as during
+		 * pcr_enable(0) the module was busy.
+		 */
+		isp_stat_pcr_enable(stat, 1);
+		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
+	} else {
+		/*
+		 * If a SBL overflow occurs and the H3A driver misses the timing
+		 * to process the buffer, stat->buf_err is set and won't be
+		 * cleared now. So the next buffer will be correctly ignored.
+		 * It's necessary due to a hw issue which makes the next H3A
+		 * buffer to start from the memory address where the previous
+		 * one stopped, instead of start where it was configured to.
+		 * Do not "stat->buf_err = 0" here.
+		 */
+
+		if (stat->ops->buf_process)
+			/*
+			 * Driver may need to erase current data prior to
+			 * process a new buffer. If it misses the timing, the
+			 * next buffer might be wrong. So should be ignored.
+			 * It happens only for Histogram.
+			 */
+			atomic_set(&stat->buf_err, 1);
+
+		ret = STAT_NO_BUF;
+		dev_dbg(stat->isp->dev, "%s: cannot process buffer, "
+					"device is busy.\n", stat->subdev.name);
+	}
+
+out:
+	stat->buf_processing = 0;
+	isp_stat_queue_event(stat, ret != STAT_BUF_DONE);
+}
+
+void omap3isp_stat_isr(struct ispstat *stat)
+{
+	__stat_isr(stat, 0);
+}
+
+void omap3isp_stat_dma_isr(struct ispstat *stat)
+{
+	__stat_isr(stat, 1);
+}
+
+int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
+				  struct v4l2_fh *fh,
+				  struct v4l2_event_subscription *sub)
+{
+	struct ispstat *stat = v4l2_get_subdevdata(subdev);
+
+	if (sub->type != stat->event_type)
+		return -EINVAL;
+
+	return v4l2_event_subscribe(fh, sub, STAT_NEVENTS, NULL);
+}
+
+int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
+				    struct v4l2_fh *fh,
+				    struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_unsubscribe(fh, sub);
+}
+
+void omap3isp_stat_unregister_entities(struct ispstat *stat)
+{
+	v4l2_device_unregister_subdev(&stat->subdev);
+}
+
+int omap3isp_stat_register_entities(struct ispstat *stat,
+				    struct v4l2_device *vdev)
+{
+	return v4l2_device_register_subdev(vdev, &stat->subdev);
+}
+
+static int isp_stat_init_entities(struct ispstat *stat, const char *name,
+				  const struct v4l2_subdev_ops *sd_ops)
+{
+	struct v4l2_subdev *subdev = &stat->subdev;
+	struct media_entity *me = &subdev->entity;
+
+	v4l2_subdev_init(subdev, sd_ops);
+	snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
+	subdev->grp_id = 1 << 16;	/* group ID for isp subdevs */
+	subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+	v4l2_set_subdevdata(subdev, stat);
+
+	stat->pad.flags = MEDIA_PAD_FL_SINK;
+	me->ops = NULL;
+
+	return media_entity_init(me, 1, &stat->pad, 0);
+}
+
+int omap3isp_stat_init(struct ispstat *stat, const char *name,
+		       const struct v4l2_subdev_ops *sd_ops)
+{
+	int ret;
+
+	stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL);
+	if (!stat->buf)
+		return -ENOMEM;
+
+	isp_stat_buf_clear(stat);
+	mutex_init(&stat->ioctl_lock);
+	atomic_set(&stat->buf_err, 0);
+
+	ret = isp_stat_init_entities(stat, name, sd_ops);
+	if (ret < 0) {
+		mutex_destroy(&stat->ioctl_lock);
+		kfree(stat->buf);
+	}
+
+	return ret;
+}
+
+void omap3isp_stat_cleanup(struct ispstat *stat)
+{
+	media_entity_cleanup(&stat->subdev.entity);
+	mutex_destroy(&stat->ioctl_lock);
+	isp_stat_bufs_free(stat);
+	kfree(stat->buf);
+}
diff --git a/drivers/media/platform/omap3isp/ispstat.h b/drivers/media/platform/omap3isp/ispstat.h
new file mode 100644
index 0000000..9b7c865
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispstat.h
@@ -0,0 +1,169 @@
+/*
+ * ispstat.h
+ *
+ * TI OMAP3 ISP - Statistics core
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc
+ *
+ * Contacts: David Cohen <dacohen@gmail.com>
+ *	     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_STAT_H
+#define OMAP3_ISP_STAT_H
+
+#include <linux/types.h>
+#include <linux/omap3isp.h>
+#include <plat/dma.h>
+#include <media/v4l2-event.h>
+
+#include "isp.h"
+#include "ispvideo.h"
+
+#define STAT_MAX_BUFS		5
+#define STAT_NEVENTS		8
+
+#define STAT_BUF_DONE		0	/* Buffer is ready */
+#define STAT_NO_BUF		1	/* An error has occurred */
+#define STAT_BUF_WAITING_DMA	2	/* Histogram only: DMA is running */
+
+struct ispstat;
+
+struct ispstat_buffer {
+	unsigned long iommu_addr;
+	struct iovm_struct *iovm;
+	void *virt_addr;
+	dma_addr_t dma_addr;
+	struct timeval ts;
+	u32 buf_size;
+	u32 frame_number;
+	u16 config_counter;
+	u8 empty;
+};
+
+struct ispstat_ops {
+	/*
+	 * Validate new params configuration.
+	 * new_conf->buf_size value must be changed to the exact buffer size
+	 * necessary for the new configuration if it's smaller.
+	 */
+	int (*validate_params)(struct ispstat *stat, void *new_conf);
+
+	/*
+	 * Save new params configuration.
+	 * stat->priv->buf_size value must be set to the exact buffer size for
+	 * the new configuration.
+	 * stat->update is set to 1 if new configuration is different than
+	 * current one.
+	 */
+	void (*set_params)(struct ispstat *stat, void *new_conf);
+
+	/* Apply stored configuration. */
+	void (*setup_regs)(struct ispstat *stat, void *priv);
+
+	/* Enable/Disable module. */
+	void (*enable)(struct ispstat *stat, int enable);
+
+	/* Verify is module is busy. */
+	int (*busy)(struct ispstat *stat);
+
+	/* Used for specific operations during generic buf process task. */
+	int (*buf_process)(struct ispstat *stat);
+};
+
+enum ispstat_state_t {
+	ISPSTAT_DISABLED = 0,
+	ISPSTAT_DISABLING,
+	ISPSTAT_ENABLED,
+	ISPSTAT_ENABLING,
+	ISPSTAT_SUSPENDED,
+};
+
+struct ispstat {
+	struct v4l2_subdev subdev;
+	struct media_pad pad;	/* sink pad */
+
+	/* Control */
+	unsigned configured:1;
+	unsigned update:1;
+	unsigned buf_processing:1;
+	unsigned sbl_ovl_recover:1;
+	u8 inc_config;
+	atomic_t buf_err;
+	enum ispstat_state_t state;	/* enabling/disabling state */
+	struct omap_dma_channel_params dma_config;
+	struct isp_device *isp;
+	void *priv;		/* pointer to priv config struct */
+	void *recover_priv;	/* pointer to recover priv configuration */
+	struct mutex ioctl_lock; /* serialize private ioctl */
+
+	const struct ispstat_ops *ops;
+
+	/* Buffer */
+	u8 wait_acc_frames;
+	u16 config_counter;
+	u32 frame_number;
+	u32 buf_size;
+	u32 buf_alloc_size;
+	int dma_ch;
+	unsigned long event_type;
+	struct ispstat_buffer *buf;
+	struct ispstat_buffer *active_buf;
+	struct ispstat_buffer *locked_buf;
+};
+
+struct ispstat_generic_config {
+	/*
+	 * Fields must be in the same order as in:
+	 *  - omap3isp_h3a_aewb_config
+	 *  - omap3isp_h3a_af_config
+	 *  - omap3isp_hist_config
+	 */
+	u32 buf_size;
+	u16 config_counter;
+};
+
+int omap3isp_stat_config(struct ispstat *stat, void *new_conf);
+int omap3isp_stat_request_statistics(struct ispstat *stat,
+				     struct omap3isp_stat_data *data);
+int omap3isp_stat_init(struct ispstat *stat, const char *name,
+		       const struct v4l2_subdev_ops *sd_ops);
+void omap3isp_stat_cleanup(struct ispstat *stat);
+int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
+				  struct v4l2_fh *fh,
+				  struct v4l2_event_subscription *sub);
+int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
+				    struct v4l2_fh *fh,
+				    struct v4l2_event_subscription *sub);
+int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable);
+
+int omap3isp_stat_busy(struct ispstat *stat);
+int omap3isp_stat_pcr_busy(struct ispstat *stat);
+void omap3isp_stat_suspend(struct ispstat *stat);
+void omap3isp_stat_resume(struct ispstat *stat);
+int omap3isp_stat_enable(struct ispstat *stat, u8 enable);
+void omap3isp_stat_sbl_overflow(struct ispstat *stat);
+void omap3isp_stat_isr(struct ispstat *stat);
+void omap3isp_stat_isr_frame_sync(struct ispstat *stat);
+void omap3isp_stat_dma_isr(struct ispstat *stat);
+int omap3isp_stat_register_entities(struct ispstat *stat,
+				    struct v4l2_device *vdev);
+void omap3isp_stat_unregister_entities(struct ispstat *stat);
+
+#endif /* OMAP3_ISP_STAT_H */
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
new file mode 100644
index 0000000..3a5085e
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -0,0 +1,1403 @@
+/*
+ * ispvideo.c
+ *
+ * TI OMAP3 ISP - Generic video node
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/clk.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <plat/iommu.h>
+#include <plat/iovmm.h>
+#include <plat/omap-pm.h>
+
+#include "ispvideo.h"
+#include "isp.h"
+
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+/*
+ * NOTE: When adding new media bus codes, always remember to add
+ * corresponding in-memory formats to the table below!!!
+ */
+static struct isp_format_info formats[] = {
+	{ V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
+	  V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
+	  V4L2_PIX_FMT_GREY, 8, 1, },
+	{ V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
+	  V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
+	  V4L2_PIX_FMT_Y10, 10, 2, },
+	{ V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
+	  V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
+	  V4L2_PIX_FMT_Y12, 12, 2, },
+	{ V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
+	  V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
+	  V4L2_PIX_FMT_SBGGR8, 8, 1, },
+	{ V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
+	  V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
+	  V4L2_PIX_FMT_SGBRG8, 8, 1, },
+	{ V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
+	  V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
+	  V4L2_PIX_FMT_SGRBG8, 8, 1, },
+	{ V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
+	  V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
+	  V4L2_PIX_FMT_SRGGB8, 8, 1, },
+	{ V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
+	  V4L2_MBUS_FMT_SBGGR10_1X10, 0,
+	  V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, },
+	{ V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
+	  V4L2_MBUS_FMT_SGBRG10_1X10, 0,
+	  V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, },
+	{ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+	  V4L2_MBUS_FMT_SGRBG10_1X10, 0,
+	  V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, },
+	{ V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
+	  V4L2_MBUS_FMT_SRGGB10_1X10, 0,
+	  V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, },
+	{ V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
+	  V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
+	  V4L2_PIX_FMT_SBGGR10, 10, 2, },
+	{ V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
+	  V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
+	  V4L2_PIX_FMT_SGBRG10, 10, 2, },
+	{ V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
+	  V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
+	  V4L2_PIX_FMT_SGRBG10, 10, 2, },
+	{ V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
+	  V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
+	  V4L2_PIX_FMT_SRGGB10, 10, 2, },
+	{ V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
+	  V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
+	  V4L2_PIX_FMT_SBGGR12, 12, 2, },
+	{ V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
+	  V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
+	  V4L2_PIX_FMT_SGBRG12, 12, 2, },
+	{ V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
+	  V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
+	  V4L2_PIX_FMT_SGRBG12, 12, 2, },
+	{ V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
+	  V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
+	  V4L2_PIX_FMT_SRGGB12, 12, 2, },
+	{ V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
+	  V4L2_MBUS_FMT_UYVY8_1X16, 0,
+	  V4L2_PIX_FMT_UYVY, 16, 2, },
+	{ V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
+	  V4L2_MBUS_FMT_YUYV8_1X16, 0,
+	  V4L2_PIX_FMT_YUYV, 16, 2, },
+	{ V4L2_MBUS_FMT_UYVY8_2X8, V4L2_MBUS_FMT_UYVY8_2X8,
+	  V4L2_MBUS_FMT_UYVY8_2X8, 0,
+	  V4L2_PIX_FMT_UYVY, 8, 2, },
+	{ V4L2_MBUS_FMT_YUYV8_2X8, V4L2_MBUS_FMT_YUYV8_2X8,
+	  V4L2_MBUS_FMT_YUYV8_2X8, 0,
+	  V4L2_PIX_FMT_YUYV, 8, 2, },
+	/* Empty entry to catch the unsupported pixel code (0) used by the CCDC
+	 * module and avoid NULL pointer dereferences.
+	 */
+	{ 0, }
+};
+
+const struct isp_format_info *
+omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+		if (formats[i].code == code)
+			return &formats[i];
+	}
+
+	return NULL;
+}
+
+/*
+ * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
+ * @video: ISP video instance
+ * @mbus: v4l2_mbus_framefmt format (input)
+ * @pix: v4l2_pix_format format (output)
+ *
+ * Fill the output pix structure with information from the input mbus format.
+ * The bytesperline and sizeimage fields are computed from the requested bytes
+ * per line value in the pix format and information from the video instance.
+ *
+ * Return the number of padding bytes at end of line.
+ */
+static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
+					  const struct v4l2_mbus_framefmt *mbus,
+					  struct v4l2_pix_format *pix)
+{
+	unsigned int bpl = pix->bytesperline;
+	unsigned int min_bpl;
+	unsigned int i;
+
+	memset(pix, 0, sizeof(*pix));
+	pix->width = mbus->width;
+	pix->height = mbus->height;
+
+	for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+		if (formats[i].code == mbus->code)
+			break;
+	}
+
+	if (WARN_ON(i == ARRAY_SIZE(formats)))
+		return 0;
+
+	min_bpl = pix->width * formats[i].bpp;
+
+	/* Clamp the requested bytes per line value. If the maximum bytes per
+	 * line value is zero, the module doesn't support user configurable line
+	 * sizes. Override the requested value with the minimum in that case.
+	 */
+	if (video->bpl_max)
+		bpl = clamp(bpl, min_bpl, video->bpl_max);
+	else
+		bpl = min_bpl;
+
+	if (!video->bpl_zero_padding || bpl != min_bpl)
+		bpl = ALIGN(bpl, video->bpl_alignment);
+
+	pix->pixelformat = formats[i].pixelformat;
+	pix->bytesperline = bpl;
+	pix->sizeimage = pix->bytesperline * pix->height;
+	pix->colorspace = mbus->colorspace;
+	pix->field = mbus->field;
+
+	return bpl - min_bpl;
+}
+
+static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
+				  struct v4l2_mbus_framefmt *mbus)
+{
+	unsigned int i;
+
+	memset(mbus, 0, sizeof(*mbus));
+	mbus->width = pix->width;
+	mbus->height = pix->height;
+
+	/* Skip the last format in the loop so that it will be selected if no
+	 * match is found.
+	 */
+	for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
+		if (formats[i].pixelformat == pix->pixelformat)
+			break;
+	}
+
+	mbus->code = formats[i].code;
+	mbus->colorspace = pix->colorspace;
+	mbus->field = pix->field;
+}
+
+static struct v4l2_subdev *
+isp_video_remote_subdev(struct isp_video *video, u32 *pad)
+{
+	struct media_pad *remote;
+
+	remote = media_entity_remote_source(&video->pad);
+
+	if (remote == NULL ||
+	    media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+		return NULL;
+
+	if (pad)
+		*pad = remote->index;
+
+	return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+/* Return a pointer to the ISP video instance at the far end of the pipeline. */
+static int isp_video_get_graph_data(struct isp_video *video,
+				    struct isp_pipeline *pipe)
+{
+	struct media_entity_graph graph;
+	struct media_entity *entity = &video->video.entity;
+	struct media_device *mdev = entity->parent;
+	struct isp_video *far_end = NULL;
+
+	mutex_lock(&mdev->graph_mutex);
+	media_entity_graph_walk_start(&graph, entity);
+
+	while ((entity = media_entity_graph_walk_next(&graph))) {
+		struct isp_video *__video;
+
+		pipe->entities |= 1 << entity->id;
+
+		if (far_end != NULL)
+			continue;
+
+		if (entity == &video->video.entity)
+			continue;
+
+		if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
+			continue;
+
+		__video = to_isp_video(media_entity_to_video_device(entity));
+		if (__video->type != video->type)
+			far_end = __video;
+	}
+
+	mutex_unlock(&mdev->graph_mutex);
+
+	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		pipe->input = far_end;
+		pipe->output = video;
+	} else {
+		if (far_end == NULL)
+			return -EPIPE;
+
+		pipe->input = video;
+		pipe->output = far_end;
+	}
+
+	return 0;
+}
+
+/*
+ * Validate a pipeline by checking both ends of all links for format
+ * discrepancies.
+ *
+ * Compute the minimum time per frame value as the maximum of time per frame
+ * limits reported by every block in the pipeline.
+ *
+ * Return 0 if all formats match, or -EPIPE if at least one link is found with
+ * different formats on its two ends or if the pipeline doesn't start with a
+ * video source (either a subdev with no input pad, or a non-subdev entity).
+ */
+static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
+{
+	struct isp_device *isp = pipe->output->isp;
+	struct media_pad *pad;
+	struct v4l2_subdev *subdev;
+
+	subdev = isp_video_remote_subdev(pipe->output, NULL);
+	if (subdev == NULL)
+		return -EPIPE;
+
+	while (1) {
+		/* Retrieve the sink format */
+		pad = &subdev->entity.pads[0];
+		if (!(pad->flags & MEDIA_PAD_FL_SINK))
+			break;
+
+		/* Update the maximum frame rate */
+		if (subdev == &isp->isp_res.subdev)
+			omap3isp_resizer_max_rate(&isp->isp_res,
+						  &pipe->max_rate);
+
+		/* Retrieve the source format. Return an error if no source
+		 * entity can be found, and stop checking the pipeline if the
+		 * source entity isn't a subdev.
+		 */
+		pad = media_entity_remote_source(pad);
+		if (pad == NULL)
+			return -EPIPE;
+
+		if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+			break;
+
+		subdev = media_entity_to_v4l2_subdev(pad->entity);
+	}
+
+	return 0;
+}
+
+static int
+__isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
+{
+	struct v4l2_subdev_format fmt;
+	struct v4l2_subdev *subdev;
+	u32 pad;
+	int ret;
+
+	subdev = isp_video_remote_subdev(video, &pad);
+	if (subdev == NULL)
+		return -EINVAL;
+
+	mutex_lock(&video->mutex);
+
+	fmt.pad = pad;
+	fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+	if (ret == -ENOIOCTLCMD)
+		ret = -EINVAL;
+
+	mutex_unlock(&video->mutex);
+
+	if (ret)
+		return ret;
+
+	format->type = video->type;
+	return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
+}
+
+static int
+isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
+{
+	struct v4l2_format format;
+	int ret;
+
+	memcpy(&format, &vfh->format, sizeof(format));
+	ret = __isp_video_get_format(video, &format);
+	if (ret < 0)
+		return ret;
+
+	if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
+	    vfh->format.fmt.pix.height != format.fmt.pix.height ||
+	    vfh->format.fmt.pix.width != format.fmt.pix.width ||
+	    vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
+	    vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
+		return -EINVAL;
+
+	return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * IOMMU management
+ */
+
+#define IOMMU_FLAG	(IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
+
+/*
+ * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
+ * @dev: Device pointer specific to the OMAP3 ISP.
+ * @sglist: Pointer to source Scatter gather list to allocate.
+ * @sglen: Number of elements of the scatter-gatter list.
+ *
+ * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
+ * we ran out of memory.
+ */
+static dma_addr_t
+ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
+{
+	struct sg_table *sgt;
+	u32 da;
+
+	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+	if (sgt == NULL)
+		return -ENOMEM;
+
+	sgt->sgl = (struct scatterlist *)sglist;
+	sgt->nents = sglen;
+	sgt->orig_nents = sglen;
+
+	da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
+	if (IS_ERR_VALUE(da))
+		kfree(sgt);
+
+	return da;
+}
+
+/*
+ * ispmmu_vunmap - Unmap a device address from the ISP MMU
+ * @dev: Device pointer specific to the OMAP3 ISP.
+ * @da: Device address generated from a ispmmu_vmap call.
+ */
+static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
+{
+	struct sg_table *sgt;
+
+	sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
+	kfree(sgt);
+}
+
+/* -----------------------------------------------------------------------------
+ * Video queue operations
+ */
+
+static void isp_video_queue_prepare(struct isp_video_queue *queue,
+				    unsigned int *nbuffers, unsigned int *size)
+{
+	struct isp_video_fh *vfh =
+		container_of(queue, struct isp_video_fh, queue);
+	struct isp_video *video = vfh->video;
+
+	*size = vfh->format.fmt.pix.sizeimage;
+	if (*size == 0)
+		return;
+
+	*nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
+}
+
+static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
+{
+	struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
+	struct isp_buffer *buffer = to_isp_buffer(buf);
+	struct isp_video *video = vfh->video;
+
+	if (buffer->isp_addr) {
+		ispmmu_vunmap(video->isp, buffer->isp_addr);
+		buffer->isp_addr = 0;
+	}
+}
+
+static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
+{
+	struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
+	struct isp_buffer *buffer = to_isp_buffer(buf);
+	struct isp_video *video = vfh->video;
+	unsigned long addr;
+
+	addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
+	if (IS_ERR_VALUE(addr))
+		return -EIO;
+
+	if (!IS_ALIGNED(addr, 32)) {
+		dev_dbg(video->isp->dev, "Buffer address must be "
+			"aligned to 32 bytes boundary.\n");
+		ispmmu_vunmap(video->isp, buffer->isp_addr);
+		return -EINVAL;
+	}
+
+	buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
+	buffer->isp_addr = addr;
+	return 0;
+}
+
+/*
+ * isp_video_buffer_queue - Add buffer to streaming queue
+ * @buf: Video buffer
+ *
+ * In memory-to-memory mode, start streaming on the pipeline if buffers are
+ * queued on both the input and the output, if the pipeline isn't already busy.
+ * If the pipeline is busy, it will be restarted in the output module interrupt
+ * handler.
+ */
+static void isp_video_buffer_queue(struct isp_video_buffer *buf)
+{
+	struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
+	struct isp_buffer *buffer = to_isp_buffer(buf);
+	struct isp_video *video = vfh->video;
+	struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
+	enum isp_pipeline_state state;
+	unsigned long flags;
+	unsigned int empty;
+	unsigned int start;
+
+	empty = list_empty(&video->dmaqueue);
+	list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
+
+	if (empty) {
+		if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+			state = ISP_PIPELINE_QUEUE_OUTPUT;
+		else
+			state = ISP_PIPELINE_QUEUE_INPUT;
+
+		spin_lock_irqsave(&pipe->lock, flags);
+		pipe->state |= state;
+		video->ops->queue(video, buffer);
+		video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
+
+		start = isp_pipeline_ready(pipe);
+		if (start)
+			pipe->state |= ISP_PIPELINE_STREAM;
+		spin_unlock_irqrestore(&pipe->lock, flags);
+
+		if (start)
+			omap3isp_pipeline_set_stream(pipe,
+						ISP_PIPELINE_STREAM_SINGLESHOT);
+	}
+}
+
+static const struct isp_video_queue_operations isp_video_queue_ops = {
+	.queue_prepare = &isp_video_queue_prepare,
+	.buffer_prepare = &isp_video_buffer_prepare,
+	.buffer_queue = &isp_video_buffer_queue,
+	.buffer_cleanup = &isp_video_buffer_cleanup,
+};
+
+/*
+ * omap3isp_video_buffer_next - Complete the current buffer and return the next
+ * @video: ISP video object
+ *
+ * Remove the current video buffer from the DMA queue and fill its timestamp,
+ * field count and state fields before waking up its completion handler.
+ *
+ * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no
+ * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise.
+ * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE.
+ *
+ * The DMA queue is expected to contain at least one buffer.
+ *
+ * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
+ * empty.
+ */
+struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
+{
+	struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
+	struct isp_video_queue *queue = video->queue;
+	enum isp_pipeline_state state;
+	struct isp_video_buffer *buf;
+	unsigned long flags;
+	struct timespec ts;
+
+	spin_lock_irqsave(&queue->irqlock, flags);
+	if (WARN_ON(list_empty(&video->dmaqueue))) {
+		spin_unlock_irqrestore(&queue->irqlock, flags);
+		return NULL;
+	}
+
+	buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
+			       irqlist);
+	list_del(&buf->irqlist);
+	spin_unlock_irqrestore(&queue->irqlock, flags);
+
+	ktime_get_ts(&ts);
+	buf->vbuf.timestamp.tv_sec = ts.tv_sec;
+	buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+
+	/* Do frame number propagation only if this is the output video node.
+	 * Frame number either comes from the CSI receivers or it gets
+	 * incremented here if H3A is not active.
+	 * Note: There is no guarantee that the output buffer will finish
+	 * first, so the input number might lag behind by 1 in some cases.
+	 */
+	if (video == pipe->output && !pipe->do_propagation)
+		buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
+	else
+		buf->vbuf.sequence = atomic_read(&pipe->frame_number);
+
+	/* Report pipeline errors to userspace on the capture device side. */
+	if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
+		buf->state = ISP_BUF_STATE_ERROR;
+		pipe->error = false;
+	} else {
+		buf->state = ISP_BUF_STATE_DONE;
+	}
+
+	wake_up(&buf->wait);
+
+	if (list_empty(&video->dmaqueue)) {
+		if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+			state = ISP_PIPELINE_QUEUE_OUTPUT
+			      | ISP_PIPELINE_STREAM;
+		else
+			state = ISP_PIPELINE_QUEUE_INPUT
+			      | ISP_PIPELINE_STREAM;
+
+		spin_lock_irqsave(&pipe->lock, flags);
+		pipe->state &= ~state;
+		if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
+			video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
+		spin_unlock_irqrestore(&pipe->lock, flags);
+		return NULL;
+	}
+
+	if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
+		spin_lock_irqsave(&pipe->lock, flags);
+		pipe->state &= ~ISP_PIPELINE_STREAM;
+		spin_unlock_irqrestore(&pipe->lock, flags);
+	}
+
+	buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
+			       irqlist);
+	buf->state = ISP_BUF_STATE_ACTIVE;
+	return to_isp_buffer(buf);
+}
+
+/*
+ * omap3isp_video_resume - Perform resume operation on the buffers
+ * @video: ISP video object
+ * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
+ *
+ * This function is intended to be used on suspend/resume scenario. It
+ * requests video queue layer to discard buffers marked as DONE if it's in
+ * continuous mode and requests ISP modules to queue again the ACTIVE buffer
+ * if there's any.
+ */
+void omap3isp_video_resume(struct isp_video *video, int continuous)
+{
+	struct isp_buffer *buf = NULL;
+
+	if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		omap3isp_video_queue_discard_done(video->queue);
+
+	if (!list_empty(&video->dmaqueue)) {
+		buf = list_first_entry(&video->dmaqueue,
+				       struct isp_buffer, buffer.irqlist);
+		video->ops->queue(video, buf);
+		video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
+	} else {
+		if (continuous)
+			video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
+	}
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int
+isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+	struct isp_video *video = video_drvdata(file);
+
+	strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, video->video.name, sizeof(cap->card));
+	strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
+
+	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+	else
+		cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+
+	return 0;
+}
+
+static int
+isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+	struct isp_video_fh *vfh = to_isp_video_fh(fh);
+	struct isp_video *video = video_drvdata(file);
+
+	if (format->type != video->type)
+		return -EINVAL;
+
+	mutex_lock(&video->mutex);
+	*format = vfh->format;
+	mutex_unlock(&video->mutex);
+
+	return 0;
+}
+
+static int
+isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+	struct isp_video_fh *vfh = to_isp_video_fh(fh);
+	struct isp_video *video = video_drvdata(file);
+	struct v4l2_mbus_framefmt fmt;
+
+	if (format->type != video->type)
+		return -EINVAL;
+
+	mutex_lock(&video->mutex);
+
+	/* Fill the bytesperline and sizeimage fields by converting to media bus
+	 * format and back to pixel format.
+	 */
+	isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
+	isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
+
+	vfh->format = *format;
+
+	mutex_unlock(&video->mutex);
+	return 0;
+}
+
+static int
+isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+	struct isp_video *video = video_drvdata(file);
+	struct v4l2_subdev_format fmt;
+	struct v4l2_subdev *subdev;
+	u32 pad;
+	int ret;
+
+	if (format->type != video->type)
+		return -EINVAL;
+
+	subdev = isp_video_remote_subdev(video, &pad);
+	if (subdev == NULL)
+		return -EINVAL;
+
+	isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
+
+	fmt.pad = pad;
+	fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+	if (ret)
+		return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+	isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
+	return 0;
+}
+
+static int
+isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
+{
+	struct isp_video *video = video_drvdata(file);
+	struct v4l2_subdev *subdev;
+	int ret;
+
+	subdev = isp_video_remote_subdev(video, NULL);
+	if (subdev == NULL)
+		return -EINVAL;
+
+	mutex_lock(&video->mutex);
+	ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
+	mutex_unlock(&video->mutex);
+
+	return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+}
+
+static int
+isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+	struct isp_video *video = video_drvdata(file);
+	struct v4l2_subdev_format format;
+	struct v4l2_subdev *subdev;
+	u32 pad;
+	int ret;
+
+	subdev = isp_video_remote_subdev(video, &pad);
+	if (subdev == NULL)
+		return -EINVAL;
+
+	/* Try the get crop operation first and fallback to get format if not
+	 * implemented.
+	 */
+	ret = v4l2_subdev_call(subdev, video, g_crop, crop);
+	if (ret != -ENOIOCTLCMD)
+		return ret;
+
+	format.pad = pad;
+	format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
+	if (ret < 0)
+		return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+	crop->c.left = 0;
+	crop->c.top = 0;
+	crop->c.width = format.format.width;
+	crop->c.height = format.format.height;
+
+	return 0;
+}
+
+static int
+isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+	struct isp_video *video = video_drvdata(file);
+	struct v4l2_subdev *subdev;
+	int ret;
+
+	subdev = isp_video_remote_subdev(video, NULL);
+	if (subdev == NULL)
+		return -EINVAL;
+
+	mutex_lock(&video->mutex);
+	ret = v4l2_subdev_call(subdev, video, s_crop, crop);
+	mutex_unlock(&video->mutex);
+
+	return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+}
+
+static int
+isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+	struct isp_video_fh *vfh = to_isp_video_fh(fh);
+	struct isp_video *video = video_drvdata(file);
+
+	if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+	    video->type != a->type)
+		return -EINVAL;
+
+	memset(a, 0, sizeof(*a));
+	a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+	a->parm.output.timeperframe = vfh->timeperframe;
+
+	return 0;
+}
+
+static int
+isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+	struct isp_video_fh *vfh = to_isp_video_fh(fh);
+	struct isp_video *video = video_drvdata(file);
+
+	if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+	    video->type != a->type)
+		return -EINVAL;
+
+	if (a->parm.output.timeperframe.denominator == 0)
+		a->parm.output.timeperframe.denominator = 1;
+
+	vfh->timeperframe = a->parm.output.timeperframe;
+
+	return 0;
+}
+
+static int
+isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
+{
+	struct isp_video_fh *vfh = to_isp_video_fh(fh);
+
+	return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
+}
+
+static int
+isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+	struct isp_video_fh *vfh = to_isp_video_fh(fh);
+
+	return omap3isp_video_queue_querybuf(&vfh->queue, b);
+}
+
+static int
+isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+	struct isp_video_fh *vfh = to_isp_video_fh(fh);
+
+	return omap3isp_video_queue_qbuf(&vfh->queue, b);
+}
+
+static int
+isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+	struct isp_video_fh *vfh = to_isp_video_fh(fh);
+
+	return omap3isp_video_queue_dqbuf(&vfh->queue, b,
+					  file->f_flags & O_NONBLOCK);
+}
+
+static int isp_video_check_external_subdevs(struct isp_video *video,
+					    struct isp_pipeline *pipe)
+{
+	struct isp_device *isp = video->isp;
+	struct media_entity *ents[] = {
+		&isp->isp_csi2a.subdev.entity,
+		&isp->isp_csi2c.subdev.entity,
+		&isp->isp_ccp2.subdev.entity,
+		&isp->isp_ccdc.subdev.entity
+	};
+	struct media_pad *source_pad;
+	struct media_entity *source = NULL;
+	struct media_entity *sink;
+	struct v4l2_subdev_format fmt;
+	struct v4l2_ext_controls ctrls;
+	struct v4l2_ext_control ctrl;
+	unsigned int i;
+	int ret = 0;
+
+	for (i = 0; i < ARRAY_SIZE(ents); i++) {
+		/* Is the entity part of the pipeline? */
+		if (!(pipe->entities & (1 << ents[i]->id)))
+			continue;
+
+		/* ISP entities have always sink pad == 0. Find source. */
+		source_pad = media_entity_remote_source(&ents[i]->pads[0]);
+		if (source_pad == NULL)
+			continue;
+
+		source = source_pad->entity;
+		sink = ents[i];
+		break;
+	}
+
+	if (!source) {
+		dev_warn(isp->dev, "can't find source, failing now\n");
+		return ret;
+	}
+
+	if (media_entity_type(source) != MEDIA_ENT_T_V4L2_SUBDEV)
+		return 0;
+
+	pipe->external = media_entity_to_v4l2_subdev(source);
+
+	fmt.pad = source_pad->index;
+	fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+	ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink),
+			       pad, get_fmt, NULL, &fmt);
+	if (unlikely(ret < 0)) {
+		dev_warn(isp->dev, "get_fmt returned null!\n");
+		return ret;
+	}
+
+	pipe->external_width =
+		omap3isp_video_format_info(fmt.format.code)->width;
+
+	memset(&ctrls, 0, sizeof(ctrls));
+	memset(&ctrl, 0, sizeof(ctrl));
+
+	ctrl.id = V4L2_CID_PIXEL_RATE;
+
+	ctrls.count = 1;
+	ctrls.controls = &ctrl;
+
+	ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls);
+	if (ret < 0) {
+		dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
+			 pipe->external->name);
+		return ret;
+	}
+
+	pipe->external_rate = ctrl.value64;
+
+	if (pipe->entities & (1 << isp->isp_ccdc.subdev.entity.id)) {
+		unsigned int rate = UINT_MAX;
+		/*
+		 * Check that maximum allowed CCDC pixel rate isn't
+		 * exceeded by the pixel rate.
+		 */
+		omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
+		if (pipe->external_rate > rate)
+			return -ENOSPC;
+	}
+
+	return 0;
+}
+
+/*
+ * Stream management
+ *
+ * Every ISP pipeline has a single input and a single output. The input can be
+ * either a sensor or a video node. The output is always a video node.
+ *
+ * As every pipeline has an output video node, the ISP video objects at the
+ * pipeline output stores the pipeline state. It tracks the streaming state of
+ * both the input and output, as well as the availability of buffers.
+ *
+ * In sensor-to-memory mode, frames are always available at the pipeline input.
+ * Starting the sensor usually requires I2C transfers and must be done in
+ * interruptible context. The pipeline is started and stopped synchronously
+ * to the stream on/off commands. All modules in the pipeline will get their
+ * subdev set stream handler called. The module at the end of the pipeline must
+ * delay starting the hardware until buffers are available at its output.
+ *
+ * In memory-to-memory mode, starting/stopping the stream requires
+ * synchronization between the input and output. ISP modules can't be stopped
+ * in the middle of a frame, and at least some of the modules seem to become
+ * busy as soon as they're started, even if they don't receive a frame start
+ * event. For that reason frames need to be processed in single-shot mode. The
+ * driver needs to wait until a frame is completely processed and written to
+ * memory before restarting the pipeline for the next frame. Pipelined
+ * processing might be possible but requires more testing.
+ *
+ * Stream start must be delayed until buffers are available at both the input
+ * and output. The pipeline must be started in the videobuf queue callback with
+ * the buffers queue spinlock held. The modules subdev set stream operation must
+ * not sleep.
+ */
+static int
+isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+	struct isp_video_fh *vfh = to_isp_video_fh(fh);
+	struct isp_video *video = video_drvdata(file);
+	enum isp_pipeline_state state;
+	struct isp_pipeline *pipe;
+	unsigned long flags;
+	int ret;
+
+	if (type != video->type)
+		return -EINVAL;
+
+	mutex_lock(&video->stream_lock);
+
+	if (video->streaming) {
+		mutex_unlock(&video->stream_lock);
+		return -EBUSY;
+	}
+
+	/* Start streaming on the pipeline. No link touching an entity in the
+	 * pipeline can be activated or deactivated once streaming is started.
+	 */
+	pipe = video->video.entity.pipe
+	     ? to_isp_pipeline(&video->video.entity) : &video->pipe;
+
+	pipe->entities = 0;
+
+	if (video->isp->pdata->set_constraints)
+		video->isp->pdata->set_constraints(video->isp, true);
+	pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
+	pipe->max_rate = pipe->l3_ick;
+
+	ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
+	if (ret < 0)
+		goto err_pipeline_start;
+
+	/* Verify that the currently configured format matches the output of
+	 * the connected subdev.
+	 */
+	ret = isp_video_check_format(video, vfh);
+	if (ret < 0)
+		goto err_check_format;
+
+	video->bpl_padding = ret;
+	video->bpl_value = vfh->format.fmt.pix.bytesperline;
+
+	ret = isp_video_get_graph_data(video, pipe);
+	if (ret < 0)
+		goto err_check_format;
+
+	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
+	else
+		state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
+
+	ret = isp_video_check_external_subdevs(video, pipe);
+	if (ret < 0)
+		goto err_check_format;
+
+	/* Validate the pipeline and update its state. */
+	ret = isp_video_validate_pipeline(pipe);
+	if (ret < 0)
+		goto err_check_format;
+
+	pipe->error = false;
+
+	spin_lock_irqsave(&pipe->lock, flags);
+	pipe->state &= ~ISP_PIPELINE_STREAM;
+	pipe->state |= state;
+	spin_unlock_irqrestore(&pipe->lock, flags);
+
+	/* Set the maximum time per frame as the value requested by userspace.
+	 * This is a soft limit that can be overridden if the hardware doesn't
+	 * support the request limit.
+	 */
+	if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		pipe->max_timeperframe = vfh->timeperframe;
+
+	video->queue = &vfh->queue;
+	INIT_LIST_HEAD(&video->dmaqueue);
+	atomic_set(&pipe->frame_number, -1);
+
+	ret = omap3isp_video_queue_streamon(&vfh->queue);
+	if (ret < 0)
+		goto err_check_format;
+
+	/* In sensor-to-memory mode, the stream can be started synchronously
+	 * to the stream on command. In memory-to-memory mode, it will be
+	 * started when buffers are queued on both the input and output.
+	 */
+	if (pipe->input == NULL) {
+		ret = omap3isp_pipeline_set_stream(pipe,
+					      ISP_PIPELINE_STREAM_CONTINUOUS);
+		if (ret < 0)
+			goto err_set_stream;
+		spin_lock_irqsave(&video->queue->irqlock, flags);
+		if (list_empty(&video->dmaqueue))
+			video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
+		spin_unlock_irqrestore(&video->queue->irqlock, flags);
+	}
+
+	video->streaming = 1;
+
+	mutex_unlock(&video->stream_lock);
+	return 0;
+
+err_set_stream:
+	omap3isp_video_queue_streamoff(&vfh->queue);
+err_check_format:
+	media_entity_pipeline_stop(&video->video.entity);
+err_pipeline_start:
+	if (video->isp->pdata->set_constraints)
+		video->isp->pdata->set_constraints(video->isp, false);
+	/* The DMA queue must be emptied here, otherwise CCDC interrupts that
+	 * will get triggered the next time the CCDC is powered up will try to
+	 * access buffers that might have been freed but still present in the
+	 * DMA queue. This can easily get triggered if the above
+	 * omap3isp_pipeline_set_stream() call fails on a system with a
+	 * free-running sensor.
+	 */
+	INIT_LIST_HEAD(&video->dmaqueue);
+	video->queue = NULL;
+
+	mutex_unlock(&video->stream_lock);
+	return ret;
+}
+
+static int
+isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+	struct isp_video_fh *vfh = to_isp_video_fh(fh);
+	struct isp_video *video = video_drvdata(file);
+	struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
+	enum isp_pipeline_state state;
+	unsigned int streaming;
+	unsigned long flags;
+
+	if (type != video->type)
+		return -EINVAL;
+
+	mutex_lock(&video->stream_lock);
+
+	/* Make sure we're not streaming yet. */
+	mutex_lock(&vfh->queue.lock);
+	streaming = vfh->queue.streaming;
+	mutex_unlock(&vfh->queue.lock);
+
+	if (!streaming)
+		goto done;
+
+	/* Update the pipeline state. */
+	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		state = ISP_PIPELINE_STREAM_OUTPUT
+		      | ISP_PIPELINE_QUEUE_OUTPUT;
+	else
+		state = ISP_PIPELINE_STREAM_INPUT
+		      | ISP_PIPELINE_QUEUE_INPUT;
+
+	spin_lock_irqsave(&pipe->lock, flags);
+	pipe->state &= ~state;
+	spin_unlock_irqrestore(&pipe->lock, flags);
+
+	/* Stop the stream. */
+	omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
+	omap3isp_video_queue_streamoff(&vfh->queue);
+	video->queue = NULL;
+	video->streaming = 0;
+
+	if (video->isp->pdata->set_constraints)
+		video->isp->pdata->set_constraints(video->isp, false);
+	media_entity_pipeline_stop(&video->video.entity);
+
+done:
+	mutex_unlock(&video->stream_lock);
+	return 0;
+}
+
+static int
+isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
+{
+	if (input->index > 0)
+		return -EINVAL;
+
+	strlcpy(input->name, "camera", sizeof(input->name));
+	input->type = V4L2_INPUT_TYPE_CAMERA;
+
+	return 0;
+}
+
+static int
+isp_video_g_input(struct file *file, void *fh, unsigned int *input)
+{
+	*input = 0;
+
+	return 0;
+}
+
+static int
+isp_video_s_input(struct file *file, void *fh, unsigned int input)
+{
+	return input == 0 ? 0 : -EINVAL;
+}
+
+static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
+	.vidioc_querycap		= isp_video_querycap,
+	.vidioc_g_fmt_vid_cap		= isp_video_get_format,
+	.vidioc_s_fmt_vid_cap		= isp_video_set_format,
+	.vidioc_try_fmt_vid_cap		= isp_video_try_format,
+	.vidioc_g_fmt_vid_out		= isp_video_get_format,
+	.vidioc_s_fmt_vid_out		= isp_video_set_format,
+	.vidioc_try_fmt_vid_out		= isp_video_try_format,
+	.vidioc_cropcap			= isp_video_cropcap,
+	.vidioc_g_crop			= isp_video_get_crop,
+	.vidioc_s_crop			= isp_video_set_crop,
+	.vidioc_g_parm			= isp_video_get_param,
+	.vidioc_s_parm			= isp_video_set_param,
+	.vidioc_reqbufs			= isp_video_reqbufs,
+	.vidioc_querybuf		= isp_video_querybuf,
+	.vidioc_qbuf			= isp_video_qbuf,
+	.vidioc_dqbuf			= isp_video_dqbuf,
+	.vidioc_streamon		= isp_video_streamon,
+	.vidioc_streamoff		= isp_video_streamoff,
+	.vidioc_enum_input		= isp_video_enum_input,
+	.vidioc_g_input			= isp_video_g_input,
+	.vidioc_s_input			= isp_video_s_input,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 file operations
+ */
+
+static int isp_video_open(struct file *file)
+{
+	struct isp_video *video = video_drvdata(file);
+	struct isp_video_fh *handle;
+	int ret = 0;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (handle == NULL)
+		return -ENOMEM;
+
+	v4l2_fh_init(&handle->vfh, &video->video);
+	v4l2_fh_add(&handle->vfh);
+
+	/* If this is the first user, initialise the pipeline. */
+	if (omap3isp_get(video->isp) == NULL) {
+		ret = -EBUSY;
+		goto done;
+	}
+
+	ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
+	if (ret < 0) {
+		omap3isp_put(video->isp);
+		goto done;
+	}
+
+	omap3isp_video_queue_init(&handle->queue, video->type,
+				  &isp_video_queue_ops, video->isp->dev,
+				  sizeof(struct isp_buffer));
+
+	memset(&handle->format, 0, sizeof(handle->format));
+	handle->format.type = video->type;
+	handle->timeperframe.denominator = 1;
+
+	handle->video = video;
+	file->private_data = &handle->vfh;
+
+done:
+	if (ret < 0) {
+		v4l2_fh_del(&handle->vfh);
+		kfree(handle);
+	}
+
+	return ret;
+}
+
+static int isp_video_release(struct file *file)
+{
+	struct isp_video *video = video_drvdata(file);
+	struct v4l2_fh *vfh = file->private_data;
+	struct isp_video_fh *handle = to_isp_video_fh(vfh);
+
+	/* Disable streaming and free the buffers queue resources. */
+	isp_video_streamoff(file, vfh, video->type);
+
+	mutex_lock(&handle->queue.lock);
+	omap3isp_video_queue_cleanup(&handle->queue);
+	mutex_unlock(&handle->queue.lock);
+
+	omap3isp_pipeline_pm_use(&video->video.entity, 0);
+
+	/* Release the file handle. */
+	v4l2_fh_del(vfh);
+	kfree(handle);
+	file->private_data = NULL;
+
+	omap3isp_put(video->isp);
+
+	return 0;
+}
+
+static unsigned int isp_video_poll(struct file *file, poll_table *wait)
+{
+	struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
+	struct isp_video_queue *queue = &vfh->queue;
+
+	return omap3isp_video_queue_poll(queue, file, wait);
+}
+
+static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
+
+	return omap3isp_video_queue_mmap(&vfh->queue, vma);
+}
+
+static struct v4l2_file_operations isp_video_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = video_ioctl2,
+	.open = isp_video_open,
+	.release = isp_video_release,
+	.poll = isp_video_poll,
+	.mmap = isp_video_mmap,
+};
+
+/* -----------------------------------------------------------------------------
+ * ISP video core
+ */
+
+static const struct isp_video_operations isp_video_dummy_ops = {
+};
+
+int omap3isp_video_init(struct isp_video *video, const char *name)
+{
+	const char *direction;
+	int ret;
+
+	switch (video->type) {
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+		direction = "output";
+		video->pad.flags = MEDIA_PAD_FL_SINK;
+		break;
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+		direction = "input";
+		video->pad.flags = MEDIA_PAD_FL_SOURCE;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
+	if (ret < 0)
+		return ret;
+
+	mutex_init(&video->mutex);
+	atomic_set(&video->active, 0);
+
+	spin_lock_init(&video->pipe.lock);
+	mutex_init(&video->stream_lock);
+
+	/* Initialize the video device. */
+	if (video->ops == NULL)
+		video->ops = &isp_video_dummy_ops;
+
+	video->video.fops = &isp_video_fops;
+	snprintf(video->video.name, sizeof(video->video.name),
+		 "OMAP3 ISP %s %s", name, direction);
+	video->video.vfl_type = VFL_TYPE_GRABBER;
+	video->video.release = video_device_release_empty;
+	video->video.ioctl_ops = &isp_video_ioctl_ops;
+	video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
+
+	video_set_drvdata(&video->video, video);
+
+	return 0;
+}
+
+void omap3isp_video_cleanup(struct isp_video *video)
+{
+	media_entity_cleanup(&video->video.entity);
+	mutex_destroy(&video->stream_lock);
+	mutex_destroy(&video->mutex);
+}
+
+int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
+{
+	int ret;
+
+	video->video.v4l2_dev = vdev;
+
+	ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
+	if (ret < 0)
+		printk(KERN_ERR "%s: could not register video device (%d)\n",
+			__func__, ret);
+
+	return ret;
+}
+
+void omap3isp_video_unregister(struct isp_video *video)
+{
+	if (video_is_registered(&video->video))
+		video_unregister_device(&video->video);
+}
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
new file mode 100644
index 0000000..1ad470e
--- /dev/null
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -0,0 +1,216 @@
+/*
+ * ispvideo.h
+ *
+ * TI OMAP3 ISP - Generic video node
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef OMAP3_ISP_VIDEO_H
+#define OMAP3_ISP_VIDEO_H
+
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+
+#include "ispqueue.h"
+
+#define ISP_VIDEO_DRIVER_NAME		"ispvideo"
+#define ISP_VIDEO_DRIVER_VERSION	"0.0.2"
+
+struct isp_device;
+struct isp_video;
+struct v4l2_mbus_framefmt;
+struct v4l2_pix_format;
+
+/*
+ * struct isp_format_info - ISP media bus format information
+ * @code: V4L2 media bus format code
+ * @truncated: V4L2 media bus format code for the same format truncated to 10
+ *	bits. Identical to @code if the format is 10 bits wide or less.
+ * @uncompressed: V4L2 media bus format code for the corresponding uncompressed
+ *	format. Identical to @code if the format is not DPCM compressed.
+ * @flavor: V4L2 media bus format code for the same pixel layout but
+ *	shifted to be 8 bits per pixel. =0 if format is not shiftable.
+ * @pixelformat: V4L2 pixel format FCC identifier
+ * @width: Bits per pixel (when transferred over a bus)
+ * @bpp: Bytes per pixel (when stored in memory)
+ */
+struct isp_format_info {
+	enum v4l2_mbus_pixelcode code;
+	enum v4l2_mbus_pixelcode truncated;
+	enum v4l2_mbus_pixelcode uncompressed;
+	enum v4l2_mbus_pixelcode flavor;
+	u32 pixelformat;
+	unsigned int width;
+	unsigned int bpp;
+};
+
+enum isp_pipeline_stream_state {
+	ISP_PIPELINE_STREAM_STOPPED = 0,
+	ISP_PIPELINE_STREAM_CONTINUOUS = 1,
+	ISP_PIPELINE_STREAM_SINGLESHOT = 2,
+};
+
+enum isp_pipeline_state {
+	/* The stream has been started on the input video node. */
+	ISP_PIPELINE_STREAM_INPUT = 1,
+	/* The stream has been started on the output video node. */
+	ISP_PIPELINE_STREAM_OUTPUT = 2,
+	/* At least one buffer is queued on the input video node. */
+	ISP_PIPELINE_QUEUE_INPUT = 4,
+	/* At least one buffer is queued on the output video node. */
+	ISP_PIPELINE_QUEUE_OUTPUT = 8,
+	/* The input entity is idle, ready to be started. */
+	ISP_PIPELINE_IDLE_INPUT = 16,
+	/* The output entity is idle, ready to be started. */
+	ISP_PIPELINE_IDLE_OUTPUT = 32,
+	/* The pipeline is currently streaming. */
+	ISP_PIPELINE_STREAM = 64,
+};
+
+/*
+ * struct isp_pipeline - An ISP hardware pipeline
+ * @error: A hardware error occurred during capture
+ * @entities: Bitmask of entities in the pipeline (indexed by entity ID)
+ */
+struct isp_pipeline {
+	struct media_pipeline pipe;
+	spinlock_t lock;		/* Pipeline state and queue flags */
+	unsigned int state;
+	enum isp_pipeline_stream_state stream_state;
+	struct isp_video *input;
+	struct isp_video *output;
+	u32 entities;
+	unsigned long l3_ick;
+	unsigned int max_rate;
+	atomic_t frame_number;
+	bool do_propagation; /* of frame number */
+	bool error;
+	struct v4l2_fract max_timeperframe;
+	struct v4l2_subdev *external;
+	unsigned int external_rate;
+	unsigned int external_width;
+};
+
+#define to_isp_pipeline(__e) \
+	container_of((__e)->pipe, struct isp_pipeline, pipe)
+
+static inline int isp_pipeline_ready(struct isp_pipeline *pipe)
+{
+	return pipe->state == (ISP_PIPELINE_STREAM_INPUT |
+			       ISP_PIPELINE_STREAM_OUTPUT |
+			       ISP_PIPELINE_QUEUE_INPUT |
+			       ISP_PIPELINE_QUEUE_OUTPUT |
+			       ISP_PIPELINE_IDLE_INPUT |
+			       ISP_PIPELINE_IDLE_OUTPUT);
+}
+
+/*
+ * struct isp_buffer - ISP buffer
+ * @buffer: ISP video buffer
+ * @isp_addr: MMU mapped address (a.k.a. device address) of the buffer.
+ */
+struct isp_buffer {
+	struct isp_video_buffer buffer;
+	dma_addr_t isp_addr;
+};
+
+#define to_isp_buffer(buf)	container_of(buf, struct isp_buffer, buffer)
+
+enum isp_video_dmaqueue_flags {
+	/* Set if DMA queue becomes empty when ISP_PIPELINE_STREAM_CONTINUOUS */
+	ISP_VIDEO_DMAQUEUE_UNDERRUN = (1 << 0),
+	/* Set when queuing buffer to an empty DMA queue */
+	ISP_VIDEO_DMAQUEUE_QUEUED = (1 << 1),
+};
+
+#define isp_video_dmaqueue_flags_clr(video)	\
+			({ (video)->dmaqueue_flags = 0; })
+
+/*
+ * struct isp_video_operations - ISP video operations
+ * @queue:	Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
+ *		if there was no buffer previously queued.
+ */
+struct isp_video_operations {
+	int(*queue)(struct isp_video *video, struct isp_buffer *buffer);
+};
+
+struct isp_video {
+	struct video_device video;
+	enum v4l2_buf_type type;
+	struct media_pad pad;
+
+	struct mutex mutex;		/* format and crop settings */
+	atomic_t active;
+
+	struct isp_device *isp;
+
+	unsigned int capture_mem;
+	unsigned int bpl_alignment;	/* alignment value */
+	unsigned int bpl_zero_padding;	/* whether the alignment is optional */
+	unsigned int bpl_max;		/* maximum bytes per line value */
+	unsigned int bpl_value;		/* bytes per line value */
+	unsigned int bpl_padding;	/* padding at end of line */
+
+	/* Entity video node streaming */
+	unsigned int streaming:1;
+
+	/* Pipeline state */
+	struct isp_pipeline pipe;
+	struct mutex stream_lock;	/* pipeline and stream states */
+
+	/* Video buffers queue */
+	struct isp_video_queue *queue;
+	struct list_head dmaqueue;
+	enum isp_video_dmaqueue_flags dmaqueue_flags;
+
+	const struct isp_video_operations *ops;
+};
+
+#define to_isp_video(vdev)	container_of(vdev, struct isp_video, video)
+
+struct isp_video_fh {
+	struct v4l2_fh vfh;
+	struct isp_video *video;
+	struct isp_video_queue queue;
+	struct v4l2_format format;
+	struct v4l2_fract timeperframe;
+};
+
+#define to_isp_video_fh(fh)	container_of(fh, struct isp_video_fh, vfh)
+#define isp_video_queue_to_isp_video_fh(q) \
+				container_of(q, struct isp_video_fh, queue)
+
+int omap3isp_video_init(struct isp_video *video, const char *name);
+void omap3isp_video_cleanup(struct isp_video *video);
+int omap3isp_video_register(struct isp_video *video,
+			    struct v4l2_device *vdev);
+void omap3isp_video_unregister(struct isp_video *video);
+struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video);
+void omap3isp_video_resume(struct isp_video *video, int continuous);
+struct media_pad *omap3isp_video_remote_pad(struct isp_video *video);
+
+const struct isp_format_info *
+omap3isp_video_format_info(enum v4l2_mbus_pixelcode code);
+
+#endif /* OMAP3_ISP_VIDEO_H */
diff --git a/drivers/media/platform/omap3isp/luma_enhance_table.h b/drivers/media/platform/omap3isp/luma_enhance_table.h
new file mode 100644
index 0000000..098b45e
--- /dev/null
+++ b/drivers/media/platform/omap3isp/luma_enhance_table.h
@@ -0,0 +1,42 @@
+/*
+ * luma_enhance_table.h
+ *
+ * TI OMAP3 ISP - Luminance enhancement table
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+1047552, 1047552, 1047552, 1047552, 1047552, 1047552, 1047552, 1047552,
+1047552, 1047552, 1047552, 1047552, 1047552, 1047552, 1047552, 1047552,
+1047552, 1047552, 1047552, 1047552, 1047552, 1047552, 1047552, 1047552,
+1047552, 1047552, 1047552, 1047552, 1048575, 1047551, 1046527, 1045503,
+1044479, 1043455, 1042431, 1041407, 1040383, 1039359, 1038335, 1037311,
+1036287, 1035263, 1034239, 1033215, 1032191, 1031167, 1030143, 1028096,
+1028096, 1028096, 1028096, 1028096, 1028096, 1028096, 1028096, 1028096,
+1028096, 1028100, 1032196, 1036292, 1040388, 1044484,       0,       0,
+      0,       5,    5125,   10245,   15365,   20485,   25605,   30720,
+  30720,   30720,   30720,   30720,   30720,   30720,   30720,   30720,
+  30720,   30720,   31743,   30719,   29695,   28671,   27647,   26623,
+  25599,   24575,   23551,   22527,   21503,   20479,   19455,   18431,
+  17407,   16383,   15359,   14335,   13311,   12287,   11263,   10239,
+   9215,    8191,    7167,    6143,    5119,    4095,    3071,    1024,
+   1024,    1024,    1024,    1024,    1024,    1024,    1024,    1024,
+   1024,    1024,    1024,    1024,    1024,    1024,    1024,    1024
diff --git a/drivers/media/platform/omap3isp/noise_filter_table.h b/drivers/media/platform/omap3isp/noise_filter_table.h
new file mode 100644
index 0000000..d50451a
--- /dev/null
+++ b/drivers/media/platform/omap3isp/noise_filter_table.h
@@ -0,0 +1,30 @@
+/*
+ * noise_filter_table.h
+ *
+ * TI OMAP3 ISP - Noise filter table
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *	     Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
+31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
new file mode 100644
index 0000000..9c21e01
--- /dev/null
+++ b/drivers/media/platform/pxa_camera.c
@@ -0,0 +1,1852 @@
+/*
+ * V4L2 Driver for PXA camera host
+ *
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf-dma-sg.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+
+#include <linux/videodev2.h>
+
+#include <mach/dma.h>
+#include <mach/camera.h>
+
+#define PXA_CAM_VERSION "0.0.6"
+#define PXA_CAM_DRV_NAME "pxa27x-camera"
+
+/* Camera Interface */
+#define CICR0		0x0000
+#define CICR1		0x0004
+#define CICR2		0x0008
+#define CICR3		0x000C
+#define CICR4		0x0010
+#define CISR		0x0014
+#define CIFR		0x0018
+#define CITOR		0x001C
+#define CIBR0		0x0028
+#define CIBR1		0x0030
+#define CIBR2		0x0038
+
+#define CICR0_DMAEN	(1 << 31)	/* DMA request enable */
+#define CICR0_PAR_EN	(1 << 30)	/* Parity enable */
+#define CICR0_SL_CAP_EN	(1 << 29)	/* Capture enable for slave mode */
+#define CICR0_ENB	(1 << 28)	/* Camera interface enable */
+#define CICR0_DIS	(1 << 27)	/* Camera interface disable */
+#define CICR0_SIM	(0x7 << 24)	/* Sensor interface mode mask */
+#define CICR0_TOM	(1 << 9)	/* Time-out mask */
+#define CICR0_RDAVM	(1 << 8)	/* Receive-data-available mask */
+#define CICR0_FEM	(1 << 7)	/* FIFO-empty mask */
+#define CICR0_EOLM	(1 << 6)	/* End-of-line mask */
+#define CICR0_PERRM	(1 << 5)	/* Parity-error mask */
+#define CICR0_QDM	(1 << 4)	/* Quick-disable mask */
+#define CICR0_CDM	(1 << 3)	/* Disable-done mask */
+#define CICR0_SOFM	(1 << 2)	/* Start-of-frame mask */
+#define CICR0_EOFM	(1 << 1)	/* End-of-frame mask */
+#define CICR0_FOM	(1 << 0)	/* FIFO-overrun mask */
+
+#define CICR1_TBIT	(1 << 31)	/* Transparency bit */
+#define CICR1_RGBT_CONV	(0x3 << 29)	/* RGBT conversion mask */
+#define CICR1_PPL	(0x7ff << 15)	/* Pixels per line mask */
+#define CICR1_RGB_CONV	(0x7 << 12)	/* RGB conversion mask */
+#define CICR1_RGB_F	(1 << 11)	/* RGB format */
+#define CICR1_YCBCR_F	(1 << 10)	/* YCbCr format */
+#define CICR1_RGB_BPP	(0x7 << 7)	/* RGB bis per pixel mask */
+#define CICR1_RAW_BPP	(0x3 << 5)	/* Raw bis per pixel mask */
+#define CICR1_COLOR_SP	(0x3 << 3)	/* Color space mask */
+#define CICR1_DW	(0x7 << 0)	/* Data width mask */
+
+#define CICR2_BLW	(0xff << 24)	/* Beginning-of-line pixel clock
+					   wait count mask */
+#define CICR2_ELW	(0xff << 16)	/* End-of-line pixel clock
+					   wait count mask */
+#define CICR2_HSW	(0x3f << 10)	/* Horizontal sync pulse width mask */
+#define CICR2_BFPW	(0x3f << 3)	/* Beginning-of-frame pixel clock
+					   wait count mask */
+#define CICR2_FSW	(0x7 << 0)	/* Frame stabilization
+					   wait count mask */
+
+#define CICR3_BFW	(0xff << 24)	/* Beginning-of-frame line clock
+					   wait count mask */
+#define CICR3_EFW	(0xff << 16)	/* End-of-frame line clock
+					   wait count mask */
+#define CICR3_VSW	(0x3f << 10)	/* Vertical sync pulse width mask */
+#define CICR3_BFPW	(0x3f << 3)	/* Beginning-of-frame pixel clock
+					   wait count mask */
+#define CICR3_LPF	(0x7ff << 0)	/* Lines per frame mask */
+
+#define CICR4_MCLK_DLY	(0x3 << 24)	/* MCLK Data Capture Delay mask */
+#define CICR4_PCLK_EN	(1 << 23)	/* Pixel clock enable */
+#define CICR4_PCP	(1 << 22)	/* Pixel clock polarity */
+#define CICR4_HSP	(1 << 21)	/* Horizontal sync polarity */
+#define CICR4_VSP	(1 << 20)	/* Vertical sync polarity */
+#define CICR4_MCLK_EN	(1 << 19)	/* MCLK enable */
+#define CICR4_FR_RATE	(0x7 << 8)	/* Frame rate mask */
+#define CICR4_DIV	(0xff << 0)	/* Clock divisor mask */
+
+#define CISR_FTO	(1 << 15)	/* FIFO time-out */
+#define CISR_RDAV_2	(1 << 14)	/* Channel 2 receive data available */
+#define CISR_RDAV_1	(1 << 13)	/* Channel 1 receive data available */
+#define CISR_RDAV_0	(1 << 12)	/* Channel 0 receive data available */
+#define CISR_FEMPTY_2	(1 << 11)	/* Channel 2 FIFO empty */
+#define CISR_FEMPTY_1	(1 << 10)	/* Channel 1 FIFO empty */
+#define CISR_FEMPTY_0	(1 << 9)	/* Channel 0 FIFO empty */
+#define CISR_EOL	(1 << 8)	/* End of line */
+#define CISR_PAR_ERR	(1 << 7)	/* Parity error */
+#define CISR_CQD	(1 << 6)	/* Camera interface quick disable */
+#define CISR_CDD	(1 << 5)	/* Camera interface disable done */
+#define CISR_SOF	(1 << 4)	/* Start of frame */
+#define CISR_EOF	(1 << 3)	/* End of frame */
+#define CISR_IFO_2	(1 << 2)	/* FIFO overrun for Channel 2 */
+#define CISR_IFO_1	(1 << 1)	/* FIFO overrun for Channel 1 */
+#define CISR_IFO_0	(1 << 0)	/* FIFO overrun for Channel 0 */
+
+#define CIFR_FLVL2	(0x7f << 23)	/* FIFO 2 level mask */
+#define CIFR_FLVL1	(0x7f << 16)	/* FIFO 1 level mask */
+#define CIFR_FLVL0	(0xff << 8)	/* FIFO 0 level mask */
+#define CIFR_THL_0	(0x3 << 4)	/* Threshold Level for Channel 0 FIFO */
+#define CIFR_RESET_F	(1 << 3)	/* Reset input FIFOs */
+#define CIFR_FEN2	(1 << 2)	/* FIFO enable for channel 2 */
+#define CIFR_FEN1	(1 << 1)	/* FIFO enable for channel 1 */
+#define CIFR_FEN0	(1 << 0)	/* FIFO enable for channel 0 */
+
+#define CICR0_SIM_MP	(0 << 24)
+#define CICR0_SIM_SP	(1 << 24)
+#define CICR0_SIM_MS	(2 << 24)
+#define CICR0_SIM_EP	(3 << 24)
+#define CICR0_SIM_ES	(4 << 24)
+
+#define CICR1_DW_VAL(x)   ((x) & CICR1_DW)	    /* Data bus width */
+#define CICR1_PPL_VAL(x)  (((x) << 15) & CICR1_PPL) /* Pixels per line */
+#define CICR1_COLOR_SP_VAL(x)	(((x) << 3) & CICR1_COLOR_SP)	/* color space */
+#define CICR1_RGB_BPP_VAL(x)	(((x) << 7) & CICR1_RGB_BPP)	/* bpp for rgb */
+#define CICR1_RGBT_CONV_VAL(x)	(((x) << 29) & CICR1_RGBT_CONV)	/* rgbt conv */
+
+#define CICR2_BLW_VAL(x)  (((x) << 24) & CICR2_BLW) /* Beginning-of-line pixel clock wait count */
+#define CICR2_ELW_VAL(x)  (((x) << 16) & CICR2_ELW) /* End-of-line pixel clock wait count */
+#define CICR2_HSW_VAL(x)  (((x) << 10) & CICR2_HSW) /* Horizontal sync pulse width */
+#define CICR2_BFPW_VAL(x) (((x) << 3) & CICR2_BFPW) /* Beginning-of-frame pixel clock wait count */
+#define CICR2_FSW_VAL(x)  (((x) << 0) & CICR2_FSW)  /* Frame stabilization wait count */
+
+#define CICR3_BFW_VAL(x)  (((x) << 24) & CICR3_BFW) /* Beginning-of-frame line clock wait count  */
+#define CICR3_EFW_VAL(x)  (((x) << 16) & CICR3_EFW) /* End-of-frame line clock wait count */
+#define CICR3_VSW_VAL(x)  (((x) << 11) & CICR3_VSW) /* Vertical sync pulse width */
+#define CICR3_LPF_VAL(x)  (((x) << 0) & CICR3_LPF)  /* Lines per frame */
+
+#define CICR0_IRQ_MASK (CICR0_TOM | CICR0_RDAVM | CICR0_FEM | CICR0_EOLM | \
+			CICR0_PERRM | CICR0_QDM | CICR0_CDM | CICR0_SOFM | \
+			CICR0_EOFM | CICR0_FOM)
+
+/*
+ * Structures
+ */
+enum pxa_camera_active_dma {
+	DMA_Y = 0x1,
+	DMA_U = 0x2,
+	DMA_V = 0x4,
+};
+
+/* descriptor needed for the PXA DMA engine */
+struct pxa_cam_dma {
+	dma_addr_t		sg_dma;
+	struct pxa_dma_desc	*sg_cpu;
+	size_t			sg_size;
+	int			sglen;
+};
+
+/* buffer for one video frame */
+struct pxa_buffer {
+	/* common v4l buffer stuff -- must be first */
+	struct videobuf_buffer		vb;
+	enum v4l2_mbus_pixelcode	code;
+	/* our descriptor lists for Y, U and V channels */
+	struct pxa_cam_dma		dmas[3];
+	int				inwork;
+	enum pxa_camera_active_dma	active_dma;
+};
+
+struct pxa_camera_dev {
+	struct soc_camera_host	soc_host;
+	/*
+	 * PXA27x is only supposed to handle one camera on its Quick Capture
+	 * interface. If anyone ever builds hardware to enable more than
+	 * one camera, they will have to modify this driver too
+	 */
+	struct soc_camera_device *icd;
+	struct clk		*clk;
+
+	unsigned int		irq;
+	void __iomem		*base;
+
+	int			channels;
+	unsigned int		dma_chans[3];
+
+	struct pxacamera_platform_data *pdata;
+	struct resource		*res;
+	unsigned long		platform_flags;
+	unsigned long		ciclk;
+	unsigned long		mclk;
+	u32			mclk_divisor;
+	u16			width_flags;	/* max 10 bits */
+
+	struct list_head	capture;
+
+	spinlock_t		lock;
+
+	struct pxa_buffer	*active;
+	struct pxa_dma_desc	*sg_tail[3];
+
+	u32			save_cicr[5];
+};
+
+struct pxa_cam {
+	unsigned long flags;
+};
+
+static const char *pxa_cam_driver_description = "PXA_Camera";
+
+static unsigned int vid_limit = 16;	/* Video memory limit, in Mb */
+
+/*
+ *  Videobuf operations
+ */
+static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
+			      unsigned int *size)
+{
+	struct soc_camera_device *icd = vq->priv_data;
+
+	dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
+
+	*size = icd->sizeimage;
+
+	if (0 == *count)
+		*count = 32;
+	if (*size * *count > vid_limit * 1024 * 1024)
+		*count = (vid_limit * 1024 * 1024) / *size;
+
+	return 0;
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
+{
+	struct soc_camera_device *icd = vq->priv_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
+	int i;
+
+	BUG_ON(in_interrupt());
+
+	dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+		&buf->vb, buf->vb.baddr, buf->vb.bsize);
+
+	/*
+	 * This waits until this buffer is out of danger, i.e., until it is no
+	 * longer in STATE_QUEUED or STATE_ACTIVE
+	 */
+	videobuf_waiton(vq, &buf->vb, 0, 0);
+	videobuf_dma_unmap(vq->dev, dma);
+	videobuf_dma_free(dma);
+
+	for (i = 0; i < ARRAY_SIZE(buf->dmas); i++) {
+		if (buf->dmas[i].sg_cpu)
+			dma_free_coherent(ici->v4l2_dev.dev,
+					  buf->dmas[i].sg_size,
+					  buf->dmas[i].sg_cpu,
+					  buf->dmas[i].sg_dma);
+		buf->dmas[i].sg_cpu = NULL;
+	}
+
+	buf->vb.state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int calculate_dma_sglen(struct scatterlist *sglist, int sglen,
+			       int sg_first_ofs, int size)
+{
+	int i, offset, dma_len, xfer_len;
+	struct scatterlist *sg;
+
+	offset = sg_first_ofs;
+	for_each_sg(sglist, sg, sglen, i) {
+		dma_len = sg_dma_len(sg);
+
+		/* PXA27x Developer's Manual 27.4.4.1: round up to 8 bytes */
+		xfer_len = roundup(min(dma_len - offset, size), 8);
+
+		size = max(0, size - xfer_len);
+		offset = 0;
+		if (size == 0)
+			break;
+	}
+
+	BUG_ON(size != 0);
+	return i + 1;
+}
+
+/**
+ * pxa_init_dma_channel - init dma descriptors
+ * @pcdev: pxa camera device
+ * @buf: pxa buffer to find pxa dma channel
+ * @dma: dma video buffer
+ * @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V')
+ * @cibr: camera Receive Buffer Register
+ * @size: bytes to transfer
+ * @sg_first: first element of sg_list
+ * @sg_first_ofs: offset in first element of sg_list
+ *
+ * Prepares the pxa dma descriptors to transfer one camera channel.
+ * Beware sg_first and sg_first_ofs are both input and output parameters.
+ *
+ * Returns 0 or -ENOMEM if no coherent memory is available
+ */
+static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
+				struct pxa_buffer *buf,
+				struct videobuf_dmabuf *dma, int channel,
+				int cibr, int size,
+				struct scatterlist **sg_first, int *sg_first_ofs)
+{
+	struct pxa_cam_dma *pxa_dma = &buf->dmas[channel];
+	struct device *dev = pcdev->soc_host.v4l2_dev.dev;
+	struct scatterlist *sg;
+	int i, offset, sglen;
+	int dma_len = 0, xfer_len = 0;
+
+	if (pxa_dma->sg_cpu)
+		dma_free_coherent(dev, pxa_dma->sg_size,
+				  pxa_dma->sg_cpu, pxa_dma->sg_dma);
+
+	sglen = calculate_dma_sglen(*sg_first, dma->sglen,
+				    *sg_first_ofs, size);
+
+	pxa_dma->sg_size = (sglen + 1) * sizeof(struct pxa_dma_desc);
+	pxa_dma->sg_cpu = dma_alloc_coherent(dev, pxa_dma->sg_size,
+					     &pxa_dma->sg_dma, GFP_KERNEL);
+	if (!pxa_dma->sg_cpu)
+		return -ENOMEM;
+
+	pxa_dma->sglen = sglen;
+	offset = *sg_first_ofs;
+
+	dev_dbg(dev, "DMA: sg_first=%p, sglen=%d, ofs=%d, dma.desc=%x\n",
+		*sg_first, sglen, *sg_first_ofs, pxa_dma->sg_dma);
+
+
+	for_each_sg(*sg_first, sg, sglen, i) {
+		dma_len = sg_dma_len(sg);
+
+		/* PXA27x Developer's Manual 27.4.4.1: round up to 8 bytes */
+		xfer_len = roundup(min(dma_len - offset, size), 8);
+
+		size = max(0, size - xfer_len);
+
+		pxa_dma->sg_cpu[i].dsadr = pcdev->res->start + cibr;
+		pxa_dma->sg_cpu[i].dtadr = sg_dma_address(sg) + offset;
+		pxa_dma->sg_cpu[i].dcmd =
+			DCMD_FLOWSRC | DCMD_BURST8 | DCMD_INCTRGADDR | xfer_len;
+#ifdef DEBUG
+		if (!i)
+			pxa_dma->sg_cpu[i].dcmd |= DCMD_STARTIRQEN;
+#endif
+		pxa_dma->sg_cpu[i].ddadr =
+			pxa_dma->sg_dma + (i + 1) * sizeof(struct pxa_dma_desc);
+
+		dev_vdbg(dev, "DMA: desc.%08x->@phys=0x%08x, len=%d\n",
+			 pxa_dma->sg_dma + i * sizeof(struct pxa_dma_desc),
+			 sg_dma_address(sg) + offset, xfer_len);
+		offset = 0;
+
+		if (size == 0)
+			break;
+	}
+
+	pxa_dma->sg_cpu[sglen].ddadr = DDADR_STOP;
+	pxa_dma->sg_cpu[sglen].dcmd  = DCMD_FLOWSRC | DCMD_BURST8 | DCMD_ENDIRQEN;
+
+	/*
+	 * Handle 1 special case :
+	 *  - in 3 planes (YUV422P format), we might finish with xfer_len equal
+	 *    to dma_len (end on PAGE boundary). In this case, the sg element
+	 *    for next plane should be the next after the last used to store the
+	 *    last scatter gather RAM page
+	 */
+	if (xfer_len >= dma_len) {
+		*sg_first_ofs = xfer_len - dma_len;
+		*sg_first = sg_next(sg);
+	} else {
+		*sg_first_ofs = xfer_len;
+		*sg_first = sg;
+	}
+
+	return 0;
+}
+
+static void pxa_videobuf_set_actdma(struct pxa_camera_dev *pcdev,
+				    struct pxa_buffer *buf)
+{
+	buf->active_dma = DMA_Y;
+	if (pcdev->channels == 3)
+		buf->active_dma |= DMA_U | DMA_V;
+}
+
+/*
+ * Please check the DMA prepared buffer structure in :
+ *   Documentation/video4linux/pxa_camera.txt
+ * Please check also in pxa_camera_check_link_miss() to understand why DMA chain
+ * modification while DMA chain is running will work anyway.
+ */
+static int pxa_videobuf_prepare(struct videobuf_queue *vq,
+		struct videobuf_buffer *vb, enum v4l2_field field)
+{
+	struct soc_camera_device *icd = vq->priv_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct pxa_camera_dev *pcdev = ici->priv;
+	struct device *dev = pcdev->soc_host.v4l2_dev.dev;
+	struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
+	int ret;
+	int size_y, size_u = 0, size_v = 0;
+
+	dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+		vb, vb->baddr, vb->bsize);
+
+	/* Added list head initialization on alloc */
+	WARN_ON(!list_empty(&vb->queue));
+
+#ifdef DEBUG
+	/*
+	 * This can be useful if you want to see if we actually fill
+	 * the buffer with something
+	 */
+	memset((void *)vb->baddr, 0xaa, vb->bsize);
+#endif
+
+	BUG_ON(NULL == icd->current_fmt);
+
+	/*
+	 * I think, in buf_prepare you only have to protect global data,
+	 * the actual buffer is yours
+	 */
+	buf->inwork = 1;
+
+	if (buf->code	!= icd->current_fmt->code ||
+	    vb->width	!= icd->user_width ||
+	    vb->height	!= icd->user_height ||
+	    vb->field	!= field) {
+		buf->code	= icd->current_fmt->code;
+		vb->width	= icd->user_width;
+		vb->height	= icd->user_height;
+		vb->field	= field;
+		vb->state	= VIDEOBUF_NEEDS_INIT;
+	}
+
+	vb->size = icd->sizeimage;
+	if (0 != vb->baddr && vb->bsize < vb->size) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (vb->state == VIDEOBUF_NEEDS_INIT) {
+		int size = vb->size;
+		int next_ofs = 0;
+		struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+		struct scatterlist *sg;
+
+		ret = videobuf_iolock(vq, vb, NULL);
+		if (ret)
+			goto fail;
+
+		if (pcdev->channels == 3) {
+			size_y = size / 2;
+			size_u = size_v = size / 4;
+		} else {
+			size_y = size;
+		}
+
+		sg = dma->sglist;
+
+		/* init DMA for Y channel */
+		ret = pxa_init_dma_channel(pcdev, buf, dma, 0, CIBR0, size_y,
+					   &sg, &next_ofs);
+		if (ret) {
+			dev_err(dev, "DMA initialization for Y/RGB failed\n");
+			goto fail;
+		}
+
+		/* init DMA for U channel */
+		if (size_u)
+			ret = pxa_init_dma_channel(pcdev, buf, dma, 1, CIBR1,
+						   size_u, &sg, &next_ofs);
+		if (ret) {
+			dev_err(dev, "DMA initialization for U failed\n");
+			goto fail_u;
+		}
+
+		/* init DMA for V channel */
+		if (size_v)
+			ret = pxa_init_dma_channel(pcdev, buf, dma, 2, CIBR2,
+						   size_v, &sg, &next_ofs);
+		if (ret) {
+			dev_err(dev, "DMA initialization for V failed\n");
+			goto fail_v;
+		}
+
+		vb->state = VIDEOBUF_PREPARED;
+	}
+
+	buf->inwork = 0;
+	pxa_videobuf_set_actdma(pcdev, buf);
+
+	return 0;
+
+fail_v:
+	dma_free_coherent(dev, buf->dmas[1].sg_size,
+			  buf->dmas[1].sg_cpu, buf->dmas[1].sg_dma);
+fail_u:
+	dma_free_coherent(dev, buf->dmas[0].sg_size,
+			  buf->dmas[0].sg_cpu, buf->dmas[0].sg_dma);
+fail:
+	free_buffer(vq, buf);
+out:
+	buf->inwork = 0;
+	return ret;
+}
+
+/**
+ * pxa_dma_start_channels - start DMA channel for active buffer
+ * @pcdev: pxa camera device
+ *
+ * Initialize DMA channels to the beginning of the active video buffer, and
+ * start these channels.
+ */
+static void pxa_dma_start_channels(struct pxa_camera_dev *pcdev)
+{
+	int i;
+	struct pxa_buffer *active;
+
+	active = pcdev->active;
+
+	for (i = 0; i < pcdev->channels; i++) {
+		dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+			"%s (channel=%d) ddadr=%08x\n", __func__,
+			i, active->dmas[i].sg_dma);
+		DDADR(pcdev->dma_chans[i]) = active->dmas[i].sg_dma;
+		DCSR(pcdev->dma_chans[i]) = DCSR_RUN;
+	}
+}
+
+static void pxa_dma_stop_channels(struct pxa_camera_dev *pcdev)
+{
+	int i;
+
+	for (i = 0; i < pcdev->channels; i++) {
+		dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+			"%s (channel=%d)\n", __func__, i);
+		DCSR(pcdev->dma_chans[i]) = 0;
+	}
+}
+
+static void pxa_dma_add_tail_buf(struct pxa_camera_dev *pcdev,
+				 struct pxa_buffer *buf)
+{
+	int i;
+	struct pxa_dma_desc *buf_last_desc;
+
+	for (i = 0; i < pcdev->channels; i++) {
+		buf_last_desc = buf->dmas[i].sg_cpu + buf->dmas[i].sglen;
+		buf_last_desc->ddadr = DDADR_STOP;
+
+		if (pcdev->sg_tail[i])
+			/* Link the new buffer to the old tail */
+			pcdev->sg_tail[i]->ddadr = buf->dmas[i].sg_dma;
+
+		/* Update the channel tail */
+		pcdev->sg_tail[i] = buf_last_desc;
+	}
+}
+
+/**
+ * pxa_camera_start_capture - start video capturing
+ * @pcdev: camera device
+ *
+ * Launch capturing. DMA channels should not be active yet. They should get
+ * activated at the end of frame interrupt, to capture only whole frames, and
+ * never begin the capture of a partial frame.
+ */
+static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev)
+{
+	unsigned long cicr0;
+
+	dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__);
+	/* Enable End-Of-Frame Interrupt */
+	cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB;
+	cicr0 &= ~CICR0_EOFM;
+	__raw_writel(cicr0, pcdev->base + CICR0);
+}
+
+static void pxa_camera_stop_capture(struct pxa_camera_dev *pcdev)
+{
+	unsigned long cicr0;
+
+	pxa_dma_stop_channels(pcdev);
+
+	cicr0 = __raw_readl(pcdev->base + CICR0) & ~CICR0_ENB;
+	__raw_writel(cicr0, pcdev->base + CICR0);
+
+	pcdev->active = NULL;
+	dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__);
+}
+
+/* Called under spinlock_irqsave(&pcdev->lock, ...) */
+static void pxa_videobuf_queue(struct videobuf_queue *vq,
+			       struct videobuf_buffer *vb)
+{
+	struct soc_camera_device *icd = vq->priv_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct pxa_camera_dev *pcdev = ici->priv;
+	struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
+
+	dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d active=%p\n",
+		__func__, vb, vb->baddr, vb->bsize, pcdev->active);
+
+	list_add_tail(&vb->queue, &pcdev->capture);
+
+	vb->state = VIDEOBUF_ACTIVE;
+	pxa_dma_add_tail_buf(pcdev, buf);
+
+	if (!pcdev->active)
+		pxa_camera_start_capture(pcdev);
+}
+
+static void pxa_videobuf_release(struct videobuf_queue *vq,
+				 struct videobuf_buffer *vb)
+{
+	struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
+#ifdef DEBUG
+	struct soc_camera_device *icd = vq->priv_data;
+	struct device *dev = icd->parent;
+
+	dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+		vb, vb->baddr, vb->bsize);
+
+	switch (vb->state) {
+	case VIDEOBUF_ACTIVE:
+		dev_dbg(dev, "%s (active)\n", __func__);
+		break;
+	case VIDEOBUF_QUEUED:
+		dev_dbg(dev, "%s (queued)\n", __func__);
+		break;
+	case VIDEOBUF_PREPARED:
+		dev_dbg(dev, "%s (prepared)\n", __func__);
+		break;
+	default:
+		dev_dbg(dev, "%s (unknown)\n", __func__);
+		break;
+	}
+#endif
+
+	free_buffer(vq, buf);
+}
+
+static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
+			      struct videobuf_buffer *vb,
+			      struct pxa_buffer *buf)
+{
+	int i;
+
+	/* _init is used to debug races, see comment in pxa_camera_reqbufs() */
+	list_del_init(&vb->queue);
+	vb->state = VIDEOBUF_DONE;
+	do_gettimeofday(&vb->ts);
+	vb->field_count++;
+	wake_up(&vb->done);
+	dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s dequeud buffer (vb=0x%p)\n",
+		__func__, vb);
+
+	if (list_empty(&pcdev->capture)) {
+		pxa_camera_stop_capture(pcdev);
+		for (i = 0; i < pcdev->channels; i++)
+			pcdev->sg_tail[i] = NULL;
+		return;
+	}
+
+	pcdev->active = list_entry(pcdev->capture.next,
+				   struct pxa_buffer, vb.queue);
+}
+
+/**
+ * pxa_camera_check_link_miss - check missed DMA linking
+ * @pcdev: camera device
+ *
+ * The DMA chaining is done with DMA running. This means a tiny temporal window
+ * remains, where a buffer is queued on the chain, while the chain is already
+ * stopped. This means the tailed buffer would never be transferred by DMA.
+ * This function restarts the capture for this corner case, where :
+ *  - DADR() == DADDR_STOP
+ *  - a videobuffer is queued on the pcdev->capture list
+ *
+ * Please check the "DMA hot chaining timeslice issue" in
+ *   Documentation/video4linux/pxa_camera.txt
+ *
+ * Context: should only be called within the dma irq handler
+ */
+static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev)
+{
+	int i, is_dma_stopped = 1;
+
+	for (i = 0; i < pcdev->channels; i++)
+		if (DDADR(pcdev->dma_chans[i]) != DDADR_STOP)
+			is_dma_stopped = 0;
+	dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+		"%s : top queued buffer=%p, dma_stopped=%d\n",
+		__func__, pcdev->active, is_dma_stopped);
+	if (pcdev->active && is_dma_stopped)
+		pxa_camera_start_capture(pcdev);
+}
+
+static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev,
+			       enum pxa_camera_active_dma act_dma)
+{
+	struct device *dev = pcdev->soc_host.v4l2_dev.dev;
+	struct pxa_buffer *buf;
+	unsigned long flags;
+	u32 status, camera_status, overrun;
+	struct videobuf_buffer *vb;
+
+	spin_lock_irqsave(&pcdev->lock, flags);
+
+	status = DCSR(channel);
+	DCSR(channel) = status;
+
+	camera_status = __raw_readl(pcdev->base + CISR);
+	overrun = CISR_IFO_0;
+	if (pcdev->channels == 3)
+		overrun |= CISR_IFO_1 | CISR_IFO_2;
+
+	if (status & DCSR_BUSERR) {
+		dev_err(dev, "DMA Bus Error IRQ!\n");
+		goto out;
+	}
+
+	if (!(status & (DCSR_ENDINTR | DCSR_STARTINTR))) {
+		dev_err(dev, "Unknown DMA IRQ source, status: 0x%08x\n",
+			status);
+		goto out;
+	}
+
+	/*
+	 * pcdev->active should not be NULL in DMA irq handler.
+	 *
+	 * But there is one corner case : if capture was stopped due to an
+	 * overrun of channel 1, and at that same channel 2 was completed.
+	 *
+	 * When handling the overrun in DMA irq for channel 1, we'll stop the
+	 * capture and restart it (and thus set pcdev->active to NULL). But the
+	 * DMA irq handler will already be pending for channel 2. So on entering
+	 * the DMA irq handler for channel 2 there will be no active buffer, yet
+	 * that is normal.
+	 */
+	if (!pcdev->active)
+		goto out;
+
+	vb = &pcdev->active->vb;
+	buf = container_of(vb, struct pxa_buffer, vb);
+	WARN_ON(buf->inwork || list_empty(&vb->queue));
+
+	dev_dbg(dev, "%s channel=%d %s%s(vb=0x%p) dma.desc=%x\n",
+		__func__, channel, status & DCSR_STARTINTR ? "SOF " : "",
+		status & DCSR_ENDINTR ? "EOF " : "", vb, DDADR(channel));
+
+	if (status & DCSR_ENDINTR) {
+		/*
+		 * It's normal if the last frame creates an overrun, as there
+		 * are no more DMA descriptors to fetch from QCI fifos
+		 */
+		if (camera_status & overrun &&
+		    !list_is_last(pcdev->capture.next, &pcdev->capture)) {
+			dev_dbg(dev, "FIFO overrun! CISR: %x\n",
+				camera_status);
+			pxa_camera_stop_capture(pcdev);
+			pxa_camera_start_capture(pcdev);
+			goto out;
+		}
+		buf->active_dma &= ~act_dma;
+		if (!buf->active_dma) {
+			pxa_camera_wakeup(pcdev, vb, buf);
+			pxa_camera_check_link_miss(pcdev);
+		}
+	}
+
+out:
+	spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static void pxa_camera_dma_irq_y(int channel, void *data)
+{
+	struct pxa_camera_dev *pcdev = data;
+	pxa_camera_dma_irq(channel, pcdev, DMA_Y);
+}
+
+static void pxa_camera_dma_irq_u(int channel, void *data)
+{
+	struct pxa_camera_dev *pcdev = data;
+	pxa_camera_dma_irq(channel, pcdev, DMA_U);
+}
+
+static void pxa_camera_dma_irq_v(int channel, void *data)
+{
+	struct pxa_camera_dev *pcdev = data;
+	pxa_camera_dma_irq(channel, pcdev, DMA_V);
+}
+
+static struct videobuf_queue_ops pxa_videobuf_ops = {
+	.buf_setup      = pxa_videobuf_setup,
+	.buf_prepare    = pxa_videobuf_prepare,
+	.buf_queue      = pxa_videobuf_queue,
+	.buf_release    = pxa_videobuf_release,
+};
+
+static void pxa_camera_init_videobuf(struct videobuf_queue *q,
+			      struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct pxa_camera_dev *pcdev = ici->priv;
+
+	/*
+	 * We must pass NULL as dev pointer, then all pci_* dma operations
+	 * transform to normal dma_* ones.
+	 */
+	videobuf_queue_sg_init(q, &pxa_videobuf_ops, NULL, &pcdev->lock,
+				V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+				sizeof(struct pxa_buffer), icd, &icd->video_lock);
+}
+
+static u32 mclk_get_divisor(struct platform_device *pdev,
+			    struct pxa_camera_dev *pcdev)
+{
+	unsigned long mclk = pcdev->mclk;
+	struct device *dev = &pdev->dev;
+	u32 div;
+	unsigned long lcdclk;
+
+	lcdclk = clk_get_rate(pcdev->clk);
+	pcdev->ciclk = lcdclk;
+
+	/* mclk <= ciclk / 4 (27.4.2) */
+	if (mclk > lcdclk / 4) {
+		mclk = lcdclk / 4;
+		dev_warn(dev, "Limiting master clock to %lu\n", mclk);
+	}
+
+	/* We verify mclk != 0, so if anyone breaks it, here comes their Oops */
+	div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
+
+	/* If we're not supplying MCLK, leave it at 0 */
+	if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+		pcdev->mclk = lcdclk / (2 * (div + 1));
+
+	dev_dbg(dev, "LCD clock %luHz, target freq %luHz, divisor %u\n",
+		lcdclk, mclk, div);
+
+	return div;
+}
+
+static void recalculate_fifo_timeout(struct pxa_camera_dev *pcdev,
+				     unsigned long pclk)
+{
+	/* We want a timeout > 1 pixel time, not ">=" */
+	u32 ciclk_per_pixel = pcdev->ciclk / pclk + 1;
+
+	__raw_writel(ciclk_per_pixel, pcdev->base + CITOR);
+}
+
+static void pxa_camera_activate(struct pxa_camera_dev *pcdev)
+{
+	u32 cicr4 = 0;
+
+	/* disable all interrupts */
+	__raw_writel(0x3ff, pcdev->base + CICR0);
+
+	if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
+		cicr4 |= CICR4_PCLK_EN;
+	if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+		cicr4 |= CICR4_MCLK_EN;
+	if (pcdev->platform_flags & PXA_CAMERA_PCP)
+		cicr4 |= CICR4_PCP;
+	if (pcdev->platform_flags & PXA_CAMERA_HSP)
+		cicr4 |= CICR4_HSP;
+	if (pcdev->platform_flags & PXA_CAMERA_VSP)
+		cicr4 |= CICR4_VSP;
+
+	__raw_writel(pcdev->mclk_divisor | cicr4, pcdev->base + CICR4);
+
+	if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+		/* Initialise the timeout under the assumption pclk = mclk */
+		recalculate_fifo_timeout(pcdev, pcdev->mclk);
+	else
+		/* "Safe default" - 13MHz */
+		recalculate_fifo_timeout(pcdev, 13000000);
+
+	clk_prepare_enable(pcdev->clk);
+}
+
+static void pxa_camera_deactivate(struct pxa_camera_dev *pcdev)
+{
+	clk_disable_unprepare(pcdev->clk);
+}
+
+static irqreturn_t pxa_camera_irq(int irq, void *data)
+{
+	struct pxa_camera_dev *pcdev = data;
+	unsigned long status, cifr, cicr0;
+	struct pxa_buffer *buf;
+	struct videobuf_buffer *vb;
+
+	status = __raw_readl(pcdev->base + CISR);
+	dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+		"Camera interrupt status 0x%lx\n", status);
+
+	if (!status)
+		return IRQ_NONE;
+
+	__raw_writel(status, pcdev->base + CISR);
+
+	if (status & CISR_EOF) {
+		/* Reset the FIFOs */
+		cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
+		__raw_writel(cifr, pcdev->base + CIFR);
+
+		pcdev->active = list_first_entry(&pcdev->capture,
+					   struct pxa_buffer, vb.queue);
+		vb = &pcdev->active->vb;
+		buf = container_of(vb, struct pxa_buffer, vb);
+		pxa_videobuf_set_actdma(pcdev, buf);
+
+		pxa_dma_start_channels(pcdev);
+
+		cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_EOFM;
+		__raw_writel(cicr0, pcdev->base + CICR0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * The following two functions absolutely depend on the fact, that
+ * there can be only one camera on PXA quick capture interface
+ * Called with .video_lock held
+ */
+static int pxa_camera_add_device(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct pxa_camera_dev *pcdev = ici->priv;
+
+	if (pcdev->icd)
+		return -EBUSY;
+
+	pxa_camera_activate(pcdev);
+
+	pcdev->icd = icd;
+
+	dev_info(icd->parent, "PXA Camera driver attached to camera %d\n",
+		 icd->devnum);
+
+	return 0;
+}
+
+/* Called with .video_lock held */
+static void pxa_camera_remove_device(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct pxa_camera_dev *pcdev = ici->priv;
+
+	BUG_ON(icd != pcdev->icd);
+
+	dev_info(icd->parent, "PXA Camera driver detached from camera %d\n",
+		 icd->devnum);
+
+	/* disable capture, disable interrupts */
+	__raw_writel(0x3ff, pcdev->base + CICR0);
+
+	/* Stop DMA engine */
+	DCSR(pcdev->dma_chans[0]) = 0;
+	DCSR(pcdev->dma_chans[1]) = 0;
+	DCSR(pcdev->dma_chans[2]) = 0;
+
+	pxa_camera_deactivate(pcdev);
+
+	pcdev->icd = NULL;
+}
+
+static int test_platform_param(struct pxa_camera_dev *pcdev,
+			       unsigned char buswidth, unsigned long *flags)
+{
+	/*
+	 * Platform specified synchronization and pixel clock polarities are
+	 * only a recommendation and are only used during probing. The PXA270
+	 * quick capture interface supports both.
+	 */
+	*flags = (pcdev->platform_flags & PXA_CAMERA_MASTER ?
+		  V4L2_MBUS_MASTER : V4L2_MBUS_SLAVE) |
+		V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+		V4L2_MBUS_HSYNC_ACTIVE_LOW |
+		V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+		V4L2_MBUS_VSYNC_ACTIVE_LOW |
+		V4L2_MBUS_DATA_ACTIVE_HIGH |
+		V4L2_MBUS_PCLK_SAMPLE_RISING |
+		V4L2_MBUS_PCLK_SAMPLE_FALLING;
+
+	/* If requested data width is supported by the platform, use it */
+	if ((1 << (buswidth - 1)) & pcdev->width_flags)
+		return 0;
+
+	return -EINVAL;
+}
+
+static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
+				  unsigned long flags, __u32 pixfmt)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct pxa_camera_dev *pcdev = ici->priv;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	unsigned long dw, bpp;
+	u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0, y_skip_top;
+	int ret = v4l2_subdev_call(sd, sensor, g_skip_top_lines, &y_skip_top);
+
+	if (ret < 0)
+		y_skip_top = 0;
+
+	/*
+	 * Datawidth is now guaranteed to be equal to one of the three values.
+	 * We fix bit-per-pixel equal to data-width...
+	 */
+	switch (icd->current_fmt->host_fmt->bits_per_sample) {
+	case 10:
+		dw = 4;
+		bpp = 0x40;
+		break;
+	case 9:
+		dw = 3;
+		bpp = 0x20;
+		break;
+	default:
+		/*
+		 * Actually it can only be 8 now,
+		 * default is just to silence compiler warnings
+		 */
+	case 8:
+		dw = 2;
+		bpp = 0;
+	}
+
+	if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
+		cicr4 |= CICR4_PCLK_EN;
+	if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+		cicr4 |= CICR4_MCLK_EN;
+	if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+		cicr4 |= CICR4_PCP;
+	if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+		cicr4 |= CICR4_HSP;
+	if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+		cicr4 |= CICR4_VSP;
+
+	cicr0 = __raw_readl(pcdev->base + CICR0);
+	if (cicr0 & CICR0_ENB)
+		__raw_writel(cicr0 & ~CICR0_ENB, pcdev->base + CICR0);
+
+	cicr1 = CICR1_PPL_VAL(icd->user_width - 1) | bpp | dw;
+
+	switch (pixfmt) {
+	case V4L2_PIX_FMT_YUV422P:
+		pcdev->channels = 3;
+		cicr1 |= CICR1_YCBCR_F;
+		/*
+		 * Normally, pxa bus wants as input UYVY format. We allow all
+		 * reorderings of the YUV422 format, as no processing is done,
+		 * and the YUV stream is just passed through without any
+		 * transformation. Note that UYVY is the only format that
+		 * should be used if pxa framebuffer Overlay2 is used.
+		 */
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_VYUY:
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_YVYU:
+		cicr1 |= CICR1_COLOR_SP_VAL(2);
+		break;
+	case V4L2_PIX_FMT_RGB555:
+		cicr1 |= CICR1_RGB_BPP_VAL(1) | CICR1_RGBT_CONV_VAL(2) |
+			CICR1_TBIT | CICR1_COLOR_SP_VAL(1);
+		break;
+	case V4L2_PIX_FMT_RGB565:
+		cicr1 |= CICR1_COLOR_SP_VAL(1) | CICR1_RGB_BPP_VAL(2);
+		break;
+	}
+
+	cicr2 = 0;
+	cicr3 = CICR3_LPF_VAL(icd->user_height - 1) |
+		CICR3_BFW_VAL(min((u32)255, y_skip_top));
+	cicr4 |= pcdev->mclk_divisor;
+
+	__raw_writel(cicr1, pcdev->base + CICR1);
+	__raw_writel(cicr2, pcdev->base + CICR2);
+	__raw_writel(cicr3, pcdev->base + CICR3);
+	__raw_writel(cicr4, pcdev->base + CICR4);
+
+	/* CIF interrupts are not used, only DMA */
+	cicr0 = (cicr0 & CICR0_ENB) | (pcdev->platform_flags & PXA_CAMERA_MASTER ?
+		CICR0_SIM_MP : (CICR0_SL_CAP_EN | CICR0_SIM_SP));
+	cicr0 |= CICR0_DMAEN | CICR0_IRQ_MASK;
+	__raw_writel(cicr0, pcdev->base + CICR0);
+}
+
+static int pxa_camera_set_bus_param(struct soc_camera_device *icd)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct pxa_camera_dev *pcdev = ici->priv;
+	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+	u32 pixfmt = icd->current_fmt->host_fmt->fourcc;
+	unsigned long bus_flags, common_flags;
+	int ret;
+	struct pxa_cam *cam = icd->host_priv;
+
+	ret = test_platform_param(pcdev, icd->current_fmt->host_fmt->bits_per_sample,
+				  &bus_flags);
+	if (ret < 0)
+		return ret;
+
+	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+	if (!ret) {
+		common_flags = soc_mbus_config_compatible(&cfg,
+							  bus_flags);
+		if (!common_flags) {
+			dev_warn(icd->parent,
+				 "Flags incompatible: camera 0x%x, host 0x%lx\n",
+				 cfg.flags, bus_flags);
+			return -EINVAL;
+		}
+	} else if (ret != -ENOIOCTLCMD) {
+		return ret;
+	} else {
+		common_flags = bus_flags;
+	}
+
+	pcdev->channels = 1;
+
+	/* Make choises, based on platform preferences */
+	if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+	    (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
+		if (pcdev->platform_flags & PXA_CAMERA_HSP)
+			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+		else
+			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
+	}
+
+	if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+	    (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
+		if (pcdev->platform_flags & PXA_CAMERA_VSP)
+			common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+		else
+			common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
+	}
+
+	if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+	    (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
+		if (pcdev->platform_flags & PXA_CAMERA_PCP)
+			common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
+		else
+			common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
+	}
+
+	cfg.flags = common_flags;
+	ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+	if (ret < 0 && ret != -ENOIOCTLCMD) {
+		dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
+			common_flags, ret);
+		return ret;
+	}
+
+	cam->flags = common_flags;
+
+	pxa_camera_setup_cicr(icd, common_flags, pixfmt);
+
+	return 0;
+}
+
+static int pxa_camera_try_bus_param(struct soc_camera_device *icd,
+				    unsigned char buswidth)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct pxa_camera_dev *pcdev = ici->priv;
+	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+	unsigned long bus_flags, common_flags;
+	int ret = test_platform_param(pcdev, buswidth, &bus_flags);
+
+	if (ret < 0)
+		return ret;
+
+	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+	if (!ret) {
+		common_flags = soc_mbus_config_compatible(&cfg,
+							  bus_flags);
+		if (!common_flags) {
+			dev_warn(icd->parent,
+				 "Flags incompatible: camera 0x%x, host 0x%lx\n",
+				 cfg.flags, bus_flags);
+			return -EINVAL;
+		}
+	} else if (ret == -ENOIOCTLCMD) {
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static const struct soc_mbus_pixelfmt pxa_camera_formats[] = {
+	{
+		.fourcc			= V4L2_PIX_FMT_YUV422P,
+		.name			= "Planar YUV422 16 bit",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PLANAR_2Y_U_V,
+	},
+};
+
+/* This will be corrected as we get more formats */
+static bool pxa_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
+{
+	return	fmt->packing == SOC_MBUS_PACKING_NONE ||
+		(fmt->bits_per_sample == 8 &&
+		 fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+		(fmt->bits_per_sample > 8 &&
+		 fmt->packing == SOC_MBUS_PACKING_EXTEND16);
+}
+
+static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int idx,
+				  struct soc_camera_format_xlate *xlate)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct device *dev = icd->parent;
+	int formats = 0, ret;
+	struct pxa_cam *cam;
+	enum v4l2_mbus_pixelcode code;
+	const struct soc_mbus_pixelfmt *fmt;
+
+	ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+	if (ret < 0)
+		/* No more formats */
+		return 0;
+
+	fmt = soc_mbus_get_fmtdesc(code);
+	if (!fmt) {
+		dev_err(dev, "Invalid format code #%u: %d\n", idx, code);
+		return 0;
+	}
+
+	/* This also checks support for the requested bits-per-sample */
+	ret = pxa_camera_try_bus_param(icd, fmt->bits_per_sample);
+	if (ret < 0)
+		return 0;
+
+	if (!icd->host_priv) {
+		cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+		if (!cam)
+			return -ENOMEM;
+
+		icd->host_priv = cam;
+	} else {
+		cam = icd->host_priv;
+	}
+
+	switch (code) {
+	case V4L2_MBUS_FMT_UYVY8_2X8:
+		formats++;
+		if (xlate) {
+			xlate->host_fmt	= &pxa_camera_formats[0];
+			xlate->code	= code;
+			xlate++;
+			dev_dbg(dev, "Providing format %s using code %d\n",
+				pxa_camera_formats[0].name, code);
+		}
+	case V4L2_MBUS_FMT_VYUY8_2X8:
+	case V4L2_MBUS_FMT_YUYV8_2X8:
+	case V4L2_MBUS_FMT_YVYU8_2X8:
+	case V4L2_MBUS_FMT_RGB565_2X8_LE:
+	case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
+		if (xlate)
+			dev_dbg(dev, "Providing format %s packed\n",
+				fmt->name);
+		break;
+	default:
+		if (!pxa_camera_packing_supported(fmt))
+			return 0;
+		if (xlate)
+			dev_dbg(dev,
+				"Providing format %s in pass-through mode\n",
+				fmt->name);
+	}
+
+	/* Generic pass-through */
+	formats++;
+	if (xlate) {
+		xlate->host_fmt	= fmt;
+		xlate->code	= code;
+		xlate++;
+	}
+
+	return formats;
+}
+
+static void pxa_camera_put_formats(struct soc_camera_device *icd)
+{
+	kfree(icd->host_priv);
+	icd->host_priv = NULL;
+}
+
+static int pxa_camera_check_frame(u32 width, u32 height)
+{
+	/* limit to pxa hardware capabilities */
+	return height < 32 || height > 2048 || width < 48 || width > 2048 ||
+		(width & 0x01);
+}
+
+static int pxa_camera_set_crop(struct soc_camera_device *icd,
+			       struct v4l2_crop *a)
+{
+	struct v4l2_rect *rect = &a->c;
+	struct device *dev = icd->parent;
+	struct soc_camera_host *ici = to_soc_camera_host(dev);
+	struct pxa_camera_dev *pcdev = ici->priv;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct soc_camera_sense sense = {
+		.master_clock = pcdev->mclk,
+		.pixel_clock_max = pcdev->ciclk / 4,
+	};
+	struct v4l2_mbus_framefmt mf;
+	struct pxa_cam *cam = icd->host_priv;
+	u32 fourcc = icd->current_fmt->host_fmt->fourcc;
+	int ret;
+
+	/* If PCLK is used to latch data from the sensor, check sense */
+	if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
+		icd->sense = &sense;
+
+	ret = v4l2_subdev_call(sd, video, s_crop, a);
+
+	icd->sense = NULL;
+
+	if (ret < 0) {
+		dev_warn(dev, "Failed to crop to %ux%u@%u:%u\n",
+			 rect->width, rect->height, rect->left, rect->top);
+		return ret;
+	}
+
+	ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+	if (ret < 0)
+		return ret;
+
+	if (pxa_camera_check_frame(mf.width, mf.height)) {
+		/*
+		 * Camera cropping produced a frame beyond our capabilities.
+		 * FIXME: just extract a subframe, that we can process.
+		 */
+		v4l_bound_align_image(&mf.width, 48, 2048, 1,
+			&mf.height, 32, 2048, 0,
+			fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0);
+		ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+		if (ret < 0)
+			return ret;
+
+		if (pxa_camera_check_frame(mf.width, mf.height)) {
+			dev_warn(icd->parent,
+				 "Inconsistent state. Use S_FMT to repair\n");
+			return -EINVAL;
+		}
+	}
+
+	if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
+		if (sense.pixel_clock > sense.pixel_clock_max) {
+			dev_err(dev,
+				"pixel clock %lu set by the camera too high!",
+				sense.pixel_clock);
+			return -EIO;
+		}
+		recalculate_fifo_timeout(pcdev, sense.pixel_clock);
+	}
+
+	icd->user_width		= mf.width;
+	icd->user_height	= mf.height;
+
+	pxa_camera_setup_cicr(icd, cam->flags, fourcc);
+
+	return ret;
+}
+
+static int pxa_camera_set_fmt(struct soc_camera_device *icd,
+			      struct v4l2_format *f)
+{
+	struct device *dev = icd->parent;
+	struct soc_camera_host *ici = to_soc_camera_host(dev);
+	struct pxa_camera_dev *pcdev = ici->priv;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	const struct soc_camera_format_xlate *xlate = NULL;
+	struct soc_camera_sense sense = {
+		.master_clock = pcdev->mclk,
+		.pixel_clock_max = pcdev->ciclk / 4,
+	};
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_mbus_framefmt mf;
+	int ret;
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+	if (!xlate) {
+		dev_warn(dev, "Format %x not found\n", pix->pixelformat);
+		return -EINVAL;
+	}
+
+	/* If PCLK is used to latch data from the sensor, check sense */
+	if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
+		/* The caller holds a mutex. */
+		icd->sense = &sense;
+
+	mf.width	= pix->width;
+	mf.height	= pix->height;
+	mf.field	= pix->field;
+	mf.colorspace	= pix->colorspace;
+	mf.code		= xlate->code;
+
+	ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+
+	if (mf.code != xlate->code)
+		return -EINVAL;
+
+	icd->sense = NULL;
+
+	if (ret < 0) {
+		dev_warn(dev, "Failed to configure for format %x\n",
+			 pix->pixelformat);
+	} else if (pxa_camera_check_frame(mf.width, mf.height)) {
+		dev_warn(dev,
+			 "Camera driver produced an unsupported frame %dx%d\n",
+			 mf.width, mf.height);
+		ret = -EINVAL;
+	} else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
+		if (sense.pixel_clock > sense.pixel_clock_max) {
+			dev_err(dev,
+				"pixel clock %lu set by the camera too high!",
+				sense.pixel_clock);
+			return -EIO;
+		}
+		recalculate_fifo_timeout(pcdev, sense.pixel_clock);
+	}
+
+	if (ret < 0)
+		return ret;
+
+	pix->width		= mf.width;
+	pix->height		= mf.height;
+	pix->field		= mf.field;
+	pix->colorspace		= mf.colorspace;
+	icd->current_fmt	= xlate;
+
+	return ret;
+}
+
+static int pxa_camera_try_fmt(struct soc_camera_device *icd,
+			      struct v4l2_format *f)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	const struct soc_camera_format_xlate *xlate;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_mbus_framefmt mf;
+	__u32 pixfmt = pix->pixelformat;
+	int ret;
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+	if (!xlate) {
+		dev_warn(icd->parent, "Format %x not found\n", pixfmt);
+		return -EINVAL;
+	}
+
+	/*
+	 * Limit to pxa hardware capabilities.  YUV422P planar format requires
+	 * images size to be a multiple of 16 bytes.  If not, zeros will be
+	 * inserted between Y and U planes, and U and V planes, which violates
+	 * the YUV422P standard.
+	 */
+	v4l_bound_align_image(&pix->width, 48, 2048, 1,
+			      &pix->height, 32, 2048, 0,
+			      pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0);
+
+	/* limit to sensor capabilities */
+	mf.width	= pix->width;
+	mf.height	= pix->height;
+	/* Only progressive video supported so far */
+	mf.field	= V4L2_FIELD_NONE;
+	mf.colorspace	= pix->colorspace;
+	mf.code		= xlate->code;
+
+	ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+	if (ret < 0)
+		return ret;
+
+	pix->width	= mf.width;
+	pix->height	= mf.height;
+	pix->colorspace	= mf.colorspace;
+
+	switch (mf.field) {
+	case V4L2_FIELD_ANY:
+	case V4L2_FIELD_NONE:
+		pix->field	= V4L2_FIELD_NONE;
+		break;
+	default:
+		/* TODO: support interlaced at least in pass-through mode */
+		dev_err(icd->parent, "Field type %d unsupported.\n",
+			mf.field);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int pxa_camera_reqbufs(struct soc_camera_device *icd,
+			      struct v4l2_requestbuffers *p)
+{
+	int i;
+
+	/*
+	 * This is for locking debugging only. I removed spinlocks and now I
+	 * check whether .prepare is ever called on a linked buffer, or whether
+	 * a dma IRQ can occur for an in-work or unlinked buffer. Until now
+	 * it hadn't triggered
+	 */
+	for (i = 0; i < p->count; i++) {
+		struct pxa_buffer *buf = container_of(icd->vb_vidq.bufs[i],
+						      struct pxa_buffer, vb);
+		buf->inwork = 0;
+		INIT_LIST_HEAD(&buf->vb.queue);
+	}
+
+	return 0;
+}
+
+static unsigned int pxa_camera_poll(struct file *file, poll_table *pt)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct pxa_buffer *buf;
+
+	buf = list_entry(icd->vb_vidq.stream.next, struct pxa_buffer,
+			 vb.stream);
+
+	poll_wait(file, &buf->vb.done, pt);
+
+	if (buf->vb.state == VIDEOBUF_DONE ||
+	    buf->vb.state == VIDEOBUF_ERROR)
+		return POLLIN|POLLRDNORM;
+
+	return 0;
+}
+
+static int pxa_camera_querycap(struct soc_camera_host *ici,
+			       struct v4l2_capability *cap)
+{
+	/* cap->name is set by the firendly caller:-> */
+	strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+	return 0;
+}
+
+static int pxa_camera_suspend(struct device *dev)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(dev);
+	struct pxa_camera_dev *pcdev = ici->priv;
+	int i = 0, ret = 0;
+
+	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR0);
+	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR1);
+	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR2);
+	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR3);
+	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR4);
+
+	if (pcdev->icd) {
+		struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->icd);
+		ret = v4l2_subdev_call(sd, core, s_power, 0);
+		if (ret == -ENOIOCTLCMD)
+			ret = 0;
+	}
+
+	return ret;
+}
+
+static int pxa_camera_resume(struct device *dev)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(dev);
+	struct pxa_camera_dev *pcdev = ici->priv;
+	int i = 0, ret = 0;
+
+	DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD;
+	DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD;
+	DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD;
+
+	__raw_writel(pcdev->save_cicr[i++] & ~CICR0_ENB, pcdev->base + CICR0);
+	__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR1);
+	__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR2);
+	__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR3);
+	__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR4);
+
+	if (pcdev->icd) {
+		struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->icd);
+		ret = v4l2_subdev_call(sd, core, s_power, 1);
+		if (ret == -ENOIOCTLCMD)
+			ret = 0;
+	}
+
+	/* Restart frame capture if active buffer exists */
+	if (!ret && pcdev->active)
+		pxa_camera_start_capture(pcdev);
+
+	return ret;
+}
+
+static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
+	.owner		= THIS_MODULE,
+	.add		= pxa_camera_add_device,
+	.remove		= pxa_camera_remove_device,
+	.set_crop	= pxa_camera_set_crop,
+	.get_formats	= pxa_camera_get_formats,
+	.put_formats	= pxa_camera_put_formats,
+	.set_fmt	= pxa_camera_set_fmt,
+	.try_fmt	= pxa_camera_try_fmt,
+	.init_videobuf	= pxa_camera_init_videobuf,
+	.reqbufs	= pxa_camera_reqbufs,
+	.poll		= pxa_camera_poll,
+	.querycap	= pxa_camera_querycap,
+	.set_bus_param	= pxa_camera_set_bus_param,
+};
+
+static int __devinit pxa_camera_probe(struct platform_device *pdev)
+{
+	struct pxa_camera_dev *pcdev;
+	struct resource *res;
+	void __iomem *base;
+	int irq;
+	int err = 0;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+	if (!res || irq < 0) {
+		err = -ENODEV;
+		goto exit;
+	}
+
+	pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL);
+	if (!pcdev) {
+		dev_err(&pdev->dev, "Could not allocate pcdev\n");
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	pcdev->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(pcdev->clk)) {
+		err = PTR_ERR(pcdev->clk);
+		goto exit_kfree;
+	}
+
+	pcdev->res = res;
+
+	pcdev->pdata = pdev->dev.platform_data;
+	pcdev->platform_flags = pcdev->pdata->flags;
+	if (!(pcdev->platform_flags & (PXA_CAMERA_DATAWIDTH_8 |
+			PXA_CAMERA_DATAWIDTH_9 | PXA_CAMERA_DATAWIDTH_10))) {
+		/*
+		 * Platform hasn't set available data widths. This is bad.
+		 * Warn and use a default.
+		 */
+		dev_warn(&pdev->dev, "WARNING! Platform hasn't set available "
+			 "data widths, using default 10 bit\n");
+		pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
+	}
+	if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8)
+		pcdev->width_flags = 1 << 7;
+	if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_9)
+		pcdev->width_flags |= 1 << 8;
+	if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_10)
+		pcdev->width_flags |= 1 << 9;
+	pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
+	if (!pcdev->mclk) {
+		dev_warn(&pdev->dev,
+			 "mclk == 0! Please, fix your platform data. "
+			 "Using default 20MHz\n");
+		pcdev->mclk = 20000000;
+	}
+
+	pcdev->mclk_divisor = mclk_get_divisor(pdev, pcdev);
+
+	INIT_LIST_HEAD(&pcdev->capture);
+	spin_lock_init(&pcdev->lock);
+
+	/*
+	 * Request the regions.
+	 */
+	if (!request_mem_region(res->start, resource_size(res),
+				PXA_CAM_DRV_NAME)) {
+		err = -EBUSY;
+		goto exit_clk;
+	}
+
+	base = ioremap(res->start, resource_size(res));
+	if (!base) {
+		err = -ENOMEM;
+		goto exit_release;
+	}
+	pcdev->irq = irq;
+	pcdev->base = base;
+
+	/* request dma */
+	err = pxa_request_dma("CI_Y", DMA_PRIO_HIGH,
+			      pxa_camera_dma_irq_y, pcdev);
+	if (err < 0) {
+		dev_err(&pdev->dev, "Can't request DMA for Y\n");
+		goto exit_iounmap;
+	}
+	pcdev->dma_chans[0] = err;
+	dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chans[0]);
+
+	err = pxa_request_dma("CI_U", DMA_PRIO_HIGH,
+			      pxa_camera_dma_irq_u, pcdev);
+	if (err < 0) {
+		dev_err(&pdev->dev, "Can't request DMA for U\n");
+		goto exit_free_dma_y;
+	}
+	pcdev->dma_chans[1] = err;
+	dev_dbg(&pdev->dev, "got DMA channel (U) %d\n", pcdev->dma_chans[1]);
+
+	err = pxa_request_dma("CI_V", DMA_PRIO_HIGH,
+			      pxa_camera_dma_irq_v, pcdev);
+	if (err < 0) {
+		dev_err(&pdev->dev, "Can't request DMA for V\n");
+		goto exit_free_dma_u;
+	}
+	pcdev->dma_chans[2] = err;
+	dev_dbg(&pdev->dev, "got DMA channel (V) %d\n", pcdev->dma_chans[2]);
+
+	DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD;
+	DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD;
+	DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD;
+
+	/* request irq */
+	err = request_irq(pcdev->irq, pxa_camera_irq, 0, PXA_CAM_DRV_NAME,
+			  pcdev);
+	if (err) {
+		dev_err(&pdev->dev, "Camera interrupt register failed \n");
+		goto exit_free_dma;
+	}
+
+	pcdev->soc_host.drv_name	= PXA_CAM_DRV_NAME;
+	pcdev->soc_host.ops		= &pxa_soc_camera_host_ops;
+	pcdev->soc_host.priv		= pcdev;
+	pcdev->soc_host.v4l2_dev.dev	= &pdev->dev;
+	pcdev->soc_host.nr		= pdev->id;
+
+	err = soc_camera_host_register(&pcdev->soc_host);
+	if (err)
+		goto exit_free_irq;
+
+	return 0;
+
+exit_free_irq:
+	free_irq(pcdev->irq, pcdev);
+exit_free_dma:
+	pxa_free_dma(pcdev->dma_chans[2]);
+exit_free_dma_u:
+	pxa_free_dma(pcdev->dma_chans[1]);
+exit_free_dma_y:
+	pxa_free_dma(pcdev->dma_chans[0]);
+exit_iounmap:
+	iounmap(base);
+exit_release:
+	release_mem_region(res->start, resource_size(res));
+exit_clk:
+	clk_put(pcdev->clk);
+exit_kfree:
+	kfree(pcdev);
+exit:
+	return err;
+}
+
+static int __devexit pxa_camera_remove(struct platform_device *pdev)
+{
+	struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+	struct pxa_camera_dev *pcdev = container_of(soc_host,
+					struct pxa_camera_dev, soc_host);
+	struct resource *res;
+
+	clk_put(pcdev->clk);
+
+	pxa_free_dma(pcdev->dma_chans[0]);
+	pxa_free_dma(pcdev->dma_chans[1]);
+	pxa_free_dma(pcdev->dma_chans[2]);
+	free_irq(pcdev->irq, pcdev);
+
+	soc_camera_host_unregister(soc_host);
+
+	iounmap(pcdev->base);
+
+	res = pcdev->res;
+	release_mem_region(res->start, resource_size(res));
+
+	kfree(pcdev);
+
+	dev_info(&pdev->dev, "PXA Camera driver unloaded\n");
+
+	return 0;
+}
+
+static struct dev_pm_ops pxa_camera_pm = {
+	.suspend	= pxa_camera_suspend,
+	.resume		= pxa_camera_resume,
+};
+
+static struct platform_driver pxa_camera_driver = {
+	.driver 	= {
+		.name	= PXA_CAM_DRV_NAME,
+		.pm	= &pxa_camera_pm,
+	},
+	.probe		= pxa_camera_probe,
+	.remove		= __devexit_p(pxa_camera_remove),
+};
+
+module_platform_driver(pxa_camera_driver);
+
+MODULE_DESCRIPTION("PXA27x SoC Camera Host driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PXA_CAM_VERSION);
+MODULE_ALIAS("platform:" PXA_CAM_DRV_NAME);
diff --git a/drivers/media/platform/s5p-fimc/Kconfig b/drivers/media/platform/s5p-fimc/Kconfig
new file mode 100644
index 0000000..a564f7e
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/Kconfig
@@ -0,0 +1,48 @@
+
+config VIDEO_SAMSUNG_S5P_FIMC
+	bool "Samsung S5P/EXYNOS SoC camera interface driver (experimental)"
+	depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && PLAT_S5P && PM_RUNTIME
+	depends on EXPERIMENTAL
+	help
+	  Say Y here to enable camera host interface devices for
+	  Samsung S5P and EXYNOS SoC series.
+
+if VIDEO_SAMSUNG_S5P_FIMC
+
+config VIDEO_S5P_FIMC
+	tristate "S5P/EXYNOS4 FIMC/CAMIF camera interface driver"
+	depends on I2C
+	select VIDEOBUF2_DMA_CONTIG
+	select V4L2_MEM2MEM_DEV
+	help
+	  This is a V4L2 driver for Samsung S5P and EXYNOS4 SoC camera host
+	  interface and video postprocessor (FIMC and FIMC-LITE) devices.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called s5p-fimc.
+
+config VIDEO_S5P_MIPI_CSIS
+	tristate "S5P/EXYNOS MIPI-CSI2 receiver (MIPI-CSIS) driver"
+	depends on REGULATOR
+	help
+	  This is a V4L2 driver for Samsung S5P and EXYNOS4 SoC MIPI-CSI2
+	  receiver (MIPI-CSIS) devices.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called s5p-csis.
+
+if ARCH_EXYNOS
+
+config VIDEO_EXYNOS_FIMC_LITE
+	tristate "EXYNOS FIMC-LITE camera interface driver"
+	depends on I2C
+	select VIDEOBUF2_DMA_CONTIG
+	help
+	  This is a V4L2 driver for Samsung EXYNOS4/5 SoC FIMC-LITE camera
+	  host interface.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called exynos-fimc-lite.
+endif
+
+endif # VIDEO_SAMSUNG_S5P_FIMC
diff --git a/drivers/media/platform/s5p-fimc/Makefile b/drivers/media/platform/s5p-fimc/Makefile
new file mode 100644
index 0000000..4648514
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/Makefile
@@ -0,0 +1,7 @@
+s5p-fimc-objs := fimc-core.o fimc-reg.o fimc-m2m.o fimc-capture.o fimc-mdevice.o
+exynos-fimc-lite-objs += fimc-lite-reg.o fimc-lite.o
+s5p-csis-objs := mipi-csis.o
+
+obj-$(CONFIG_VIDEO_S5P_MIPI_CSIS)	+= s5p-csis.o
+obj-$(CONFIG_VIDEO_EXYNOS_FIMC_LITE)	+= exynos-fimc-lite.o
+obj-$(CONFIG_VIDEO_S5P_FIMC)		+= s5p-fimc.o
diff --git a/drivers/media/platform/s5p-fimc/fimc-capture.c b/drivers/media/platform/s5p-fimc/fimc-capture.c
new file mode 100644
index 0000000..8e413dd
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/fimc-capture.c
@@ -0,0 +1,1738 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series camera interface (camera capture) driver
+ *
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/pm_runtime.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "fimc-mdevice.h"
+#include "fimc-core.h"
+#include "fimc-reg.h"
+
+static int fimc_capture_hw_init(struct fimc_dev *fimc)
+{
+	struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+	struct fimc_pipeline *p = &fimc->pipeline;
+	struct fimc_sensor_info *sensor;
+	unsigned long flags;
+	int ret = 0;
+
+	if (p->subdevs[IDX_SENSOR] == NULL || ctx == NULL)
+		return -ENXIO;
+	if (ctx->s_frame.fmt == NULL)
+		return -EINVAL;
+
+	sensor = v4l2_get_subdev_hostdata(p->subdevs[IDX_SENSOR]);
+
+	spin_lock_irqsave(&fimc->slock, flags);
+	fimc_prepare_dma_offset(ctx, &ctx->d_frame);
+	fimc_set_yuv_order(ctx);
+
+	fimc_hw_set_camera_polarity(fimc, sensor->pdata);
+	fimc_hw_set_camera_type(fimc, sensor->pdata);
+	fimc_hw_set_camera_source(fimc, sensor->pdata);
+	fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
+
+	ret = fimc_set_scaler_info(ctx);
+	if (!ret) {
+		fimc_hw_set_input_path(ctx);
+		fimc_hw_set_prescaler(ctx);
+		fimc_hw_set_mainscaler(ctx);
+		fimc_hw_set_target_format(ctx);
+		fimc_hw_set_rotation(ctx);
+		fimc_hw_set_effect(ctx);
+		fimc_hw_set_output_path(ctx);
+		fimc_hw_set_out_dma(ctx);
+		if (fimc->variant->has_alpha)
+			fimc_hw_set_rgb_alpha(ctx);
+		clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+	}
+	spin_unlock_irqrestore(&fimc->slock, flags);
+	return ret;
+}
+
+/*
+ * Reinitialize the driver so it is ready to start the streaming again.
+ * Set fimc->state to indicate stream off and the hardware shut down state.
+ * If not suspending (@suspend is false), return any buffers to videobuf2.
+ * Otherwise put any owned buffers onto the pending buffers queue, so they
+ * can be re-spun when the device is being resumed. Also perform FIMC
+ * software reset and disable streaming on the whole pipeline if required.
+ */
+static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
+{
+	struct fimc_vid_cap *cap = &fimc->vid_cap;
+	struct fimc_vid_buffer *buf;
+	unsigned long flags;
+	bool streaming;
+
+	spin_lock_irqsave(&fimc->slock, flags);
+	streaming = fimc->state & (1 << ST_CAPT_ISP_STREAM);
+
+	fimc->state &= ~(1 << ST_CAPT_RUN | 1 << ST_CAPT_SHUT |
+			 1 << ST_CAPT_STREAM | 1 << ST_CAPT_ISP_STREAM);
+	if (suspend)
+		fimc->state |= (1 << ST_CAPT_SUSPENDED);
+	else
+		fimc->state &= ~(1 << ST_CAPT_PEND | 1 << ST_CAPT_SUSPENDED);
+
+	/* Release unused buffers */
+	while (!suspend && !list_empty(&cap->pending_buf_q)) {
+		buf = fimc_pending_queue_pop(cap);
+		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+	}
+	/* If suspending put unused buffers onto pending queue */
+	while (!list_empty(&cap->active_buf_q)) {
+		buf = fimc_active_queue_pop(cap);
+		if (suspend)
+			fimc_pending_queue_add(cap, buf);
+		else
+			vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+	}
+
+	fimc_hw_reset(fimc);
+	cap->buf_index = 0;
+
+	spin_unlock_irqrestore(&fimc->slock, flags);
+
+	if (streaming)
+		return fimc_pipeline_s_stream(&fimc->pipeline, 0);
+	else
+		return 0;
+}
+
+static int fimc_stop_capture(struct fimc_dev *fimc, bool suspend)
+{
+	unsigned long flags;
+
+	if (!fimc_capture_active(fimc))
+		return 0;
+
+	spin_lock_irqsave(&fimc->slock, flags);
+	set_bit(ST_CAPT_SHUT, &fimc->state);
+	fimc_deactivate_capture(fimc);
+	spin_unlock_irqrestore(&fimc->slock, flags);
+
+	wait_event_timeout(fimc->irq_queue,
+			   !test_bit(ST_CAPT_SHUT, &fimc->state),
+			   (2*HZ/10)); /* 200 ms */
+
+	return fimc_capture_state_cleanup(fimc, suspend);
+}
+
+/**
+ * fimc_capture_config_update - apply the camera interface configuration
+ *
+ * To be called from within the interrupt handler with fimc.slock
+ * spinlock held. It updates the camera pixel crop, rotation and
+ * image flip in H/W.
+ */
+static int fimc_capture_config_update(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	int ret;
+
+	fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
+
+	ret = fimc_set_scaler_info(ctx);
+	if (ret)
+		return ret;
+
+	fimc_hw_set_prescaler(ctx);
+	fimc_hw_set_mainscaler(ctx);
+	fimc_hw_set_target_format(ctx);
+	fimc_hw_set_rotation(ctx);
+	fimc_hw_set_effect(ctx);
+	fimc_prepare_dma_offset(ctx, &ctx->d_frame);
+	fimc_hw_set_out_dma(ctx);
+	if (fimc->variant->has_alpha)
+		fimc_hw_set_rgb_alpha(ctx);
+
+	clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+	return ret;
+}
+
+void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf)
+{
+	struct fimc_vid_cap *cap = &fimc->vid_cap;
+	struct fimc_vid_buffer *v_buf;
+	struct timeval *tv;
+	struct timespec ts;
+
+	if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) {
+		wake_up(&fimc->irq_queue);
+		goto done;
+	}
+
+	if (!list_empty(&cap->active_buf_q) &&
+	    test_bit(ST_CAPT_RUN, &fimc->state) && deq_buf) {
+		ktime_get_real_ts(&ts);
+
+		v_buf = fimc_active_queue_pop(cap);
+
+		tv = &v_buf->vb.v4l2_buf.timestamp;
+		tv->tv_sec = ts.tv_sec;
+		tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+		v_buf->vb.v4l2_buf.sequence = cap->frame_count++;
+
+		vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE);
+	}
+
+	if (!list_empty(&cap->pending_buf_q)) {
+
+		v_buf = fimc_pending_queue_pop(cap);
+		fimc_hw_set_output_addr(fimc, &v_buf->paddr, cap->buf_index);
+		v_buf->index = cap->buf_index;
+
+		/* Move the buffer to the capture active queue */
+		fimc_active_queue_add(cap, v_buf);
+
+		dbg("next frame: %d, done frame: %d",
+		    fimc_hw_get_frame_index(fimc), v_buf->index);
+
+		if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
+			cap->buf_index = 0;
+	}
+
+	if (cap->active_buf_cnt == 0) {
+		if (deq_buf)
+			clear_bit(ST_CAPT_RUN, &fimc->state);
+
+		if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
+			cap->buf_index = 0;
+	} else {
+		set_bit(ST_CAPT_RUN, &fimc->state);
+	}
+
+	if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+		fimc_capture_config_update(cap->ctx);
+done:
+	if (cap->active_buf_cnt == 1) {
+		fimc_deactivate_capture(fimc);
+		clear_bit(ST_CAPT_STREAM, &fimc->state);
+	}
+
+	dbg("frame: %d, active_buf_cnt: %d",
+	    fimc_hw_get_frame_index(fimc), cap->active_buf_cnt);
+}
+
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct fimc_ctx *ctx = q->drv_priv;
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+	int min_bufs;
+	int ret;
+
+	vid_cap->frame_count = 0;
+
+	ret = fimc_capture_hw_init(fimc);
+	if (ret) {
+		fimc_capture_state_cleanup(fimc, false);
+		return ret;
+	}
+
+	set_bit(ST_CAPT_PEND, &fimc->state);
+
+	min_bufs = fimc->vid_cap.reqbufs_count > 1 ? 2 : 1;
+
+	if (vid_cap->active_buf_cnt >= min_bufs &&
+	    !test_and_set_bit(ST_CAPT_STREAM, &fimc->state)) {
+		fimc_activate_capture(ctx);
+
+		if (!test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state))
+			fimc_pipeline_s_stream(&fimc->pipeline, 1);
+	}
+
+	return 0;
+}
+
+static int stop_streaming(struct vb2_queue *q)
+{
+	struct fimc_ctx *ctx = q->drv_priv;
+	struct fimc_dev *fimc = ctx->fimc_dev;
+
+	if (!fimc_capture_active(fimc))
+		return -EINVAL;
+
+	return fimc_stop_capture(fimc, false);
+}
+
+int fimc_capture_suspend(struct fimc_dev *fimc)
+{
+	bool suspend = fimc_capture_busy(fimc);
+
+	int ret = fimc_stop_capture(fimc, suspend);
+	if (ret)
+		return ret;
+	return fimc_pipeline_shutdown(&fimc->pipeline);
+}
+
+static void buffer_queue(struct vb2_buffer *vb);
+
+int fimc_capture_resume(struct fimc_dev *fimc)
+{
+	struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+	struct fimc_vid_buffer *buf;
+	int i;
+
+	if (!test_and_clear_bit(ST_CAPT_SUSPENDED, &fimc->state))
+		return 0;
+
+	INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q);
+	vid_cap->buf_index = 0;
+	fimc_pipeline_initialize(&fimc->pipeline, &vid_cap->vfd->entity,
+				 false);
+	fimc_capture_hw_init(fimc);
+
+	clear_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+	for (i = 0; i < vid_cap->reqbufs_count; i++) {
+		if (list_empty(&vid_cap->pending_buf_q))
+			break;
+		buf = fimc_pending_queue_pop(vid_cap);
+		buffer_queue(&buf->vb);
+	}
+	return 0;
+
+}
+
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+		       unsigned int *num_buffers, unsigned int *num_planes,
+		       unsigned int sizes[], void *allocators[])
+{
+	const struct v4l2_pix_format_mplane *pixm = NULL;
+	struct fimc_ctx *ctx = vq->drv_priv;
+	struct fimc_frame *frame = &ctx->d_frame;
+	struct fimc_fmt *fmt = frame->fmt;
+	unsigned long wh;
+	int i;
+
+	if (pfmt) {
+		pixm = &pfmt->fmt.pix_mp;
+		fmt = fimc_find_format(&pixm->pixelformat, NULL,
+				       FMT_FLAGS_CAM | FMT_FLAGS_M2M, -1);
+		wh = pixm->width * pixm->height;
+	} else {
+		wh = frame->f_width * frame->f_height;
+	}
+
+	if (fmt == NULL)
+		return -EINVAL;
+
+	*num_planes = fmt->memplanes;
+
+	for (i = 0; i < fmt->memplanes; i++) {
+		unsigned int size = (wh * fmt->depth[i]) / 8;
+		if (pixm)
+			sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
+		else
+			sizes[i] = max_t(u32, size, frame->payload[i]);
+
+		allocators[i] = ctx->fimc_dev->alloc_ctx;
+	}
+
+	return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+	struct vb2_queue *vq = vb->vb2_queue;
+	struct fimc_ctx *ctx = vq->drv_priv;
+	int i;
+
+	if (ctx->d_frame.fmt == NULL)
+		return -EINVAL;
+
+	for (i = 0; i < ctx->d_frame.fmt->memplanes; i++) {
+		unsigned long size = ctx->d_frame.payload[i];
+
+		if (vb2_plane_size(vb, i) < size) {
+			v4l2_err(ctx->fimc_dev->vid_cap.vfd,
+				 "User buffer too small (%ld < %ld)\n",
+				 vb2_plane_size(vb, i), size);
+			return -EINVAL;
+		}
+		vb2_set_plane_payload(vb, i, size);
+	}
+
+	return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+	struct fimc_vid_buffer *buf
+		= container_of(vb, struct fimc_vid_buffer, vb);
+	struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+	unsigned long flags;
+	int min_bufs;
+
+	spin_lock_irqsave(&fimc->slock, flags);
+	fimc_prepare_addr(ctx, &buf->vb, &ctx->d_frame, &buf->paddr);
+
+	if (!test_bit(ST_CAPT_SUSPENDED, &fimc->state) &&
+	    !test_bit(ST_CAPT_STREAM, &fimc->state) &&
+	    vid_cap->active_buf_cnt < FIMC_MAX_OUT_BUFS) {
+		/* Setup the buffer directly for processing. */
+		int buf_id = (vid_cap->reqbufs_count == 1) ? -1 :
+				vid_cap->buf_index;
+
+		fimc_hw_set_output_addr(fimc, &buf->paddr, buf_id);
+		buf->index = vid_cap->buf_index;
+		fimc_active_queue_add(vid_cap, buf);
+
+		if (++vid_cap->buf_index >= FIMC_MAX_OUT_BUFS)
+			vid_cap->buf_index = 0;
+	} else {
+		fimc_pending_queue_add(vid_cap, buf);
+	}
+
+	min_bufs = vid_cap->reqbufs_count > 1 ? 2 : 1;
+
+
+	if (vb2_is_streaming(&vid_cap->vbq) &&
+	    vid_cap->active_buf_cnt >= min_bufs &&
+	    !test_and_set_bit(ST_CAPT_STREAM, &fimc->state)) {
+		fimc_activate_capture(ctx);
+		spin_unlock_irqrestore(&fimc->slock, flags);
+
+		if (!test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state))
+			fimc_pipeline_s_stream(&fimc->pipeline, 1);
+		return;
+	}
+	spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static void fimc_lock(struct vb2_queue *vq)
+{
+	struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+	mutex_lock(&ctx->fimc_dev->lock);
+}
+
+static void fimc_unlock(struct vb2_queue *vq)
+{
+	struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+	mutex_unlock(&ctx->fimc_dev->lock);
+}
+
+static struct vb2_ops fimc_capture_qops = {
+	.queue_setup		= queue_setup,
+	.buf_prepare		= buffer_prepare,
+	.buf_queue		= buffer_queue,
+	.wait_prepare		= fimc_unlock,
+	.wait_finish		= fimc_lock,
+	.start_streaming	= start_streaming,
+	.stop_streaming		= stop_streaming,
+};
+
+/**
+ * fimc_capture_ctrls_create - initialize the control handler
+ * Initialize the capture video node control handler and fill it
+ * with the FIMC controls. Inherit any sensor's controls if the
+ * 'user_subdev_api' flag is false (default behaviour).
+ * This function need to be called with the graph mutex held.
+ */
+int fimc_capture_ctrls_create(struct fimc_dev *fimc)
+{
+	struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+	int ret;
+
+	if (WARN_ON(vid_cap->ctx == NULL))
+		return -ENXIO;
+	if (vid_cap->ctx->ctrls.ready)
+		return 0;
+
+	ret = fimc_ctrls_create(vid_cap->ctx);
+	if (ret || vid_cap->user_subdev_api || !vid_cap->ctx->ctrls.ready)
+		return ret;
+
+	return v4l2_ctrl_add_handler(&vid_cap->ctx->ctrls.handler,
+		    fimc->pipeline.subdevs[IDX_SENSOR]->ctrl_handler);
+}
+
+static int fimc_capture_set_default_format(struct fimc_dev *fimc);
+
+static int fimc_capture_open(struct file *file)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+	int ret = -EBUSY;
+
+	dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
+
+	if (mutex_lock_interruptible(&fimc->lock))
+		return -ERESTARTSYS;
+
+	if (fimc_m2m_active(fimc))
+		goto unlock;
+
+	set_bit(ST_CAPT_BUSY, &fimc->state);
+	ret = pm_runtime_get_sync(&fimc->pdev->dev);
+	if (ret < 0)
+		goto unlock;
+
+	ret = v4l2_fh_open(file);
+	if (ret) {
+		pm_runtime_put(&fimc->pdev->dev);
+		goto unlock;
+	}
+
+	if (++fimc->vid_cap.refcnt == 1) {
+		ret = fimc_pipeline_initialize(&fimc->pipeline,
+				       &fimc->vid_cap.vfd->entity, true);
+
+		if (!ret && !fimc->vid_cap.user_subdev_api)
+			ret = fimc_capture_set_default_format(fimc);
+
+		if (!ret)
+			ret = fimc_capture_ctrls_create(fimc);
+
+		if (ret < 0) {
+			clear_bit(ST_CAPT_BUSY, &fimc->state);
+			pm_runtime_put_sync(&fimc->pdev->dev);
+			fimc->vid_cap.refcnt--;
+			v4l2_fh_release(file);
+		}
+	}
+unlock:
+	mutex_unlock(&fimc->lock);
+	return ret;
+}
+
+static int fimc_capture_close(struct file *file)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+	int ret;
+
+	dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
+
+	if (mutex_lock_interruptible(&fimc->lock))
+		return -ERESTARTSYS;
+
+	if (--fimc->vid_cap.refcnt == 0) {
+		clear_bit(ST_CAPT_BUSY, &fimc->state);
+		fimc_stop_capture(fimc, false);
+		fimc_pipeline_shutdown(&fimc->pipeline);
+		clear_bit(ST_CAPT_SUSPENDED, &fimc->state);
+	}
+
+	pm_runtime_put(&fimc->pdev->dev);
+
+	if (fimc->vid_cap.refcnt == 0) {
+		vb2_queue_release(&fimc->vid_cap.vbq);
+		fimc_ctrls_delete(fimc->vid_cap.ctx);
+	}
+
+	ret = v4l2_fh_release(file);
+
+	mutex_unlock(&fimc->lock);
+	return ret;
+}
+
+static unsigned int fimc_capture_poll(struct file *file,
+				      struct poll_table_struct *wait)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+	int ret;
+
+	if (mutex_lock_interruptible(&fimc->lock))
+		return POLL_ERR;
+
+	ret = vb2_poll(&fimc->vid_cap.vbq, file, wait);
+	mutex_unlock(&fimc->lock);
+
+	return ret;
+}
+
+static int fimc_capture_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+	int ret;
+
+	if (mutex_lock_interruptible(&fimc->lock))
+		return -ERESTARTSYS;
+
+	ret = vb2_mmap(&fimc->vid_cap.vbq, vma);
+	mutex_unlock(&fimc->lock);
+
+	return ret;
+}
+
+static const struct v4l2_file_operations fimc_capture_fops = {
+	.owner		= THIS_MODULE,
+	.open		= fimc_capture_open,
+	.release	= fimc_capture_close,
+	.poll		= fimc_capture_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= fimc_capture_mmap,
+};
+
+/*
+ * Format and crop negotiation helpers
+ */
+
+static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
+						u32 *width, u32 *height,
+						u32 *code, u32 *fourcc, int pad)
+{
+	bool rotation = ctx->rotation == 90 || ctx->rotation == 270;
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	struct fimc_variant *var = fimc->variant;
+	struct fimc_pix_limit *pl = var->pix_limit;
+	struct fimc_frame *dst = &ctx->d_frame;
+	u32 depth, min_w, max_w, min_h, align_h = 3;
+	u32 mask = FMT_FLAGS_CAM;
+	struct fimc_fmt *ffmt;
+
+	/* Color conversion from/to JPEG is not supported */
+	if (code && ctx->s_frame.fmt && pad == FIMC_SD_PAD_SOURCE &&
+	    fimc_fmt_is_jpeg(ctx->s_frame.fmt->color))
+		*code = V4L2_MBUS_FMT_JPEG_1X8;
+
+	if (fourcc && *fourcc != V4L2_PIX_FMT_JPEG && pad != FIMC_SD_PAD_SINK)
+		mask |= FMT_FLAGS_M2M;
+
+	ffmt = fimc_find_format(fourcc, code, mask, 0);
+	if (WARN_ON(!ffmt))
+		return NULL;
+	if (code)
+		*code = ffmt->mbus_code;
+	if (fourcc)
+		*fourcc = ffmt->fourcc;
+
+	if (pad == FIMC_SD_PAD_SINK) {
+		max_w = fimc_fmt_is_jpeg(ffmt->color) ?
+			pl->scaler_dis_w : pl->scaler_en_w;
+		/* Apply the camera input interface pixel constraints */
+		v4l_bound_align_image(width, max_t(u32, *width, 32), max_w, 4,
+				      height, max_t(u32, *height, 32),
+				      FIMC_CAMIF_MAX_HEIGHT,
+				      fimc_fmt_is_jpeg(ffmt->color) ? 3 : 1,
+				      0);
+		return ffmt;
+	}
+	/* Can't scale or crop in transparent (JPEG) transfer mode */
+	if (fimc_fmt_is_jpeg(ffmt->color)) {
+		*width  = ctx->s_frame.f_width;
+		*height = ctx->s_frame.f_height;
+		return ffmt;
+	}
+	/* Apply the scaler and the output DMA constraints */
+	max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
+	if (ctx->state & FIMC_COMPOSE) {
+		min_w = dst->offs_h + dst->width;
+		min_h = dst->offs_v + dst->height;
+	} else {
+		min_w = var->min_out_pixsize;
+		min_h = var->min_out_pixsize;
+	}
+	if (var->min_vsize_align == 1 && !rotation)
+		align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
+
+	depth = fimc_get_format_depth(ffmt);
+	v4l_bound_align_image(width, min_w, max_w,
+			      ffs(var->min_out_pixsize) - 1,
+			      height, min_h, FIMC_CAMIF_MAX_HEIGHT,
+			      align_h,
+			      64/(ALIGN(depth, 8)));
+
+	dbg("pad%d: code: 0x%x, %dx%d. dst fmt: %dx%d",
+	    pad, code ? *code : 0, *width, *height,
+	    dst->f_width, dst->f_height);
+
+	return ffmt;
+}
+
+static void fimc_capture_try_selection(struct fimc_ctx *ctx,
+				       struct v4l2_rect *r,
+				       int target)
+{
+	bool rotate = ctx->rotation == 90 || ctx->rotation == 270;
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	struct fimc_variant *var = fimc->variant;
+	struct fimc_pix_limit *pl = var->pix_limit;
+	struct fimc_frame *sink = &ctx->s_frame;
+	u32 max_w, max_h, min_w = 0, min_h = 0, min_sz;
+	u32 align_sz = 0, align_h = 4;
+	u32 max_sc_h, max_sc_v;
+
+	/* In JPEG transparent transfer mode cropping is not supported */
+	if (fimc_fmt_is_jpeg(ctx->d_frame.fmt->color)) {
+		r->width  = sink->f_width;
+		r->height = sink->f_height;
+		r->left   = r->top = 0;
+		return;
+	}
+	if (target == V4L2_SEL_TGT_COMPOSE) {
+		if (ctx->rotation != 90 && ctx->rotation != 270)
+			align_h = 1;
+		max_sc_h = min(SCALER_MAX_HRATIO, 1 << (ffs(sink->width) - 3));
+		max_sc_v = min(SCALER_MAX_VRATIO, 1 << (ffs(sink->height) - 1));
+		min_sz = var->min_out_pixsize;
+	} else {
+		u32 depth = fimc_get_format_depth(sink->fmt);
+		align_sz = 64/ALIGN(depth, 8);
+		min_sz = var->min_inp_pixsize;
+		min_w = min_h = min_sz;
+		max_sc_h = max_sc_v = 1;
+	}
+	/*
+	 * For the compose rectangle the following constraints must be met:
+	 * - it must fit in the sink pad format rectangle (f_width/f_height);
+	 * - maximum downscaling ratio is 64;
+	 * - maximum crop size depends if the rotator is used or not;
+	 * - the sink pad format width/height must be 4 multiple of the
+	 *   prescaler ratios determined by sink pad size and source pad crop,
+	 *   the prescaler ratio is returned by fimc_get_scaler_factor().
+	 */
+	max_w = min_t(u32,
+		      rotate ? pl->out_rot_en_w : pl->out_rot_dis_w,
+		      rotate ? sink->f_height : sink->f_width);
+	max_h = min_t(u32, FIMC_CAMIF_MAX_HEIGHT, sink->f_height);
+
+	if (target == V4L2_SEL_TGT_COMPOSE) {
+		min_w = min_t(u32, max_w, sink->f_width / max_sc_h);
+		min_h = min_t(u32, max_h, sink->f_height / max_sc_v);
+		if (rotate) {
+			swap(max_sc_h, max_sc_v);
+			swap(min_w, min_h);
+		}
+	}
+	v4l_bound_align_image(&r->width, min_w, max_w, ffs(min_sz) - 1,
+			      &r->height, min_h, max_h, align_h,
+			      align_sz);
+	/* Adjust left/top if crop/compose rectangle is out of bounds */
+	r->left = clamp_t(u32, r->left, 0, sink->f_width - r->width);
+	r->top  = clamp_t(u32, r->top, 0, sink->f_height - r->height);
+	r->left = round_down(r->left, var->hor_offs_align);
+
+	dbg("target %#x: (%d,%d)/%dx%d, sink fmt: %dx%d",
+	    target, r->left, r->top, r->width, r->height,
+	    sink->f_width, sink->f_height);
+}
+
+/*
+ * The video node ioctl operations
+ */
+static int fimc_vidioc_querycap_capture(struct file *file, void *priv,
+					struct v4l2_capability *cap)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+
+	strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
+	strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
+	cap->bus_info[0] = 0;
+	cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+
+	return 0;
+}
+
+static int fimc_cap_enum_fmt_mplane(struct file *file, void *priv,
+				    struct v4l2_fmtdesc *f)
+{
+	struct fimc_fmt *fmt;
+
+	fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM | FMT_FLAGS_M2M,
+			       f->index);
+	if (!fmt)
+		return -EINVAL;
+	strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+	f->pixelformat = fmt->fourcc;
+	if (fmt->fourcc == V4L2_MBUS_FMT_JPEG_1X8)
+		f->flags |= V4L2_FMT_FLAG_COMPRESSED;
+	return 0;
+}
+
+/**
+ * fimc_pipeline_try_format - negotiate and/or set formats at pipeline
+ *                            elements
+ * @ctx: FIMC capture context
+ * @tfmt: media bus format to try/set on subdevs
+ * @fmt_id: fimc pixel format id corresponding to returned @tfmt (output)
+ * @set: true to set format on subdevs, false to try only
+ */
+static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
+				    struct v4l2_mbus_framefmt *tfmt,
+				    struct fimc_fmt **fmt_id,
+				    bool set)
+{
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR];
+	struct v4l2_subdev *csis = fimc->pipeline.subdevs[IDX_CSIS];
+	struct v4l2_subdev_format sfmt;
+	struct v4l2_mbus_framefmt *mf = &sfmt.format;
+	struct fimc_fmt *ffmt = NULL;
+	int ret, i = 0;
+
+	if (WARN_ON(!sd || !tfmt))
+		return -EINVAL;
+
+	memset(&sfmt, 0, sizeof(sfmt));
+	sfmt.format = *tfmt;
+
+	sfmt.which = set ? V4L2_SUBDEV_FORMAT_ACTIVE : V4L2_SUBDEV_FORMAT_TRY;
+	while (1) {
+		ffmt = fimc_find_format(NULL, mf->code != 0 ? &mf->code : NULL,
+					FMT_FLAGS_CAM, i++);
+		if (ffmt == NULL) {
+			/*
+			 * Notify user-space if common pixel code for
+			 * host and sensor does not exist.
+			 */
+			return -EINVAL;
+		}
+		mf->code = tfmt->code = ffmt->mbus_code;
+
+		ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sfmt);
+		if (ret)
+			return ret;
+		if (mf->code != tfmt->code) {
+			mf->code = 0;
+			continue;
+		}
+		if (mf->width != tfmt->width || mf->height != tfmt->height) {
+			u32 fcc = ffmt->fourcc;
+			tfmt->width  = mf->width;
+			tfmt->height = mf->height;
+			ffmt = fimc_capture_try_format(ctx,
+					       &tfmt->width, &tfmt->height,
+					       NULL, &fcc, FIMC_SD_PAD_SOURCE);
+			if (ffmt && ffmt->mbus_code)
+				mf->code = ffmt->mbus_code;
+			if (mf->width != tfmt->width ||
+			    mf->height != tfmt->height)
+				continue;
+			tfmt->code = mf->code;
+		}
+		if (csis)
+			ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt);
+
+		if (mf->code == tfmt->code &&
+		    mf->width == tfmt->width && mf->height == tfmt->height)
+			break;
+	}
+
+	if (fmt_id && ffmt)
+		*fmt_id = ffmt;
+	*tfmt = *mf;
+
+	dbg("code: 0x%x, %dx%d, %p", mf->code, mf->width, mf->height, ffmt);
+	return 0;
+}
+
+static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
+				 struct v4l2_format *f)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+	struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+
+	return fimc_fill_format(&ctx->d_frame, f);
+}
+
+static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
+				   struct v4l2_format *f)
+{
+	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+	struct fimc_dev *fimc = video_drvdata(file);
+	struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+	struct v4l2_mbus_framefmt mf;
+	struct fimc_fmt *ffmt = NULL;
+
+	if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
+		fimc_capture_try_format(ctx, &pix->width, &pix->height,
+					NULL, &pix->pixelformat,
+					FIMC_SD_PAD_SINK);
+		ctx->s_frame.f_width  = pix->width;
+		ctx->s_frame.f_height = pix->height;
+	}
+	ffmt = fimc_capture_try_format(ctx, &pix->width, &pix->height,
+				       NULL, &pix->pixelformat,
+				       FIMC_SD_PAD_SOURCE);
+	if (!ffmt)
+		return -EINVAL;
+
+	if (!fimc->vid_cap.user_subdev_api) {
+		mf.width  = pix->width;
+		mf.height = pix->height;
+		mf.code   = ffmt->mbus_code;
+		fimc_md_graph_lock(fimc);
+		fimc_pipeline_try_format(ctx, &mf, &ffmt, false);
+		fimc_md_graph_unlock(fimc);
+
+		pix->width	 = mf.width;
+		pix->height	 = mf.height;
+		if (ffmt)
+			pix->pixelformat = ffmt->fourcc;
+	}
+
+	fimc_adjust_mplane_format(ffmt, pix->width, pix->height, pix);
+	return 0;
+}
+
+static void fimc_capture_mark_jpeg_xfer(struct fimc_ctx *ctx, bool jpeg)
+{
+	ctx->scaler.enabled = !jpeg;
+	fimc_ctrls_activate(ctx, !jpeg);
+
+	if (jpeg)
+		set_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state);
+	else
+		clear_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state);
+}
+
+static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
+{
+	struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+	struct v4l2_mbus_framefmt *mf = &fimc->vid_cap.mf;
+	struct fimc_frame *ff = &ctx->d_frame;
+	struct fimc_fmt *s_fmt = NULL;
+	int ret, i;
+
+	if (vb2_is_busy(&fimc->vid_cap.vbq))
+		return -EBUSY;
+
+	/* Pre-configure format at camera interface input, for JPEG only */
+	if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
+		fimc_capture_try_format(ctx, &pix->width, &pix->height,
+					NULL, &pix->pixelformat,
+					FIMC_SD_PAD_SINK);
+		ctx->s_frame.f_width  = pix->width;
+		ctx->s_frame.f_height = pix->height;
+	}
+	/* Try the format at the scaler and the DMA output */
+	ff->fmt = fimc_capture_try_format(ctx, &pix->width, &pix->height,
+					  NULL, &pix->pixelformat,
+					  FIMC_SD_PAD_SOURCE);
+	if (!ff->fmt)
+		return -EINVAL;
+
+	/* Update RGB Alpha control state and value range */
+	fimc_alpha_ctrl_update(ctx);
+
+	/* Try to match format at the host and the sensor */
+	if (!fimc->vid_cap.user_subdev_api) {
+		mf->code   = ff->fmt->mbus_code;
+		mf->width  = pix->width;
+		mf->height = pix->height;
+
+		fimc_md_graph_lock(fimc);
+		ret = fimc_pipeline_try_format(ctx, mf, &s_fmt, true);
+		fimc_md_graph_unlock(fimc);
+		if (ret)
+			return ret;
+		pix->width  = mf->width;
+		pix->height = mf->height;
+	}
+
+	fimc_adjust_mplane_format(ff->fmt, pix->width, pix->height, pix);
+	for (i = 0; i < ff->fmt->colplanes; i++)
+		ff->payload[i] = pix->plane_fmt[i].sizeimage;
+
+	set_frame_bounds(ff, pix->width, pix->height);
+	/* Reset the composition rectangle if not yet configured */
+	if (!(ctx->state & FIMC_COMPOSE))
+		set_frame_crop(ff, 0, 0, pix->width, pix->height);
+
+	fimc_capture_mark_jpeg_xfer(ctx, fimc_fmt_is_jpeg(ff->fmt->color));
+
+	/* Reset cropping and set format at the camera interface input */
+	if (!fimc->vid_cap.user_subdev_api) {
+		ctx->s_frame.fmt = s_fmt;
+		set_frame_bounds(&ctx->s_frame, pix->width, pix->height);
+		set_frame_crop(&ctx->s_frame, 0, 0, pix->width, pix->height);
+	}
+
+	return ret;
+}
+
+static int fimc_cap_s_fmt_mplane(struct file *file, void *priv,
+				 struct v4l2_format *f)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+
+	return fimc_capture_set_format(fimc, f);
+}
+
+static int fimc_cap_enum_input(struct file *file, void *priv,
+			       struct v4l2_input *i)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+	struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR];
+
+	if (i->index != 0)
+		return -EINVAL;
+
+	i->type = V4L2_INPUT_TYPE_CAMERA;
+	if (sd)
+		strlcpy(i->name, sd->name, sizeof(i->name));
+	return 0;
+}
+
+static int fimc_cap_s_input(struct file *file, void *priv, unsigned int i)
+{
+	return i == 0 ? i : -EINVAL;
+}
+
+static int fimc_cap_g_input(struct file *file, void *priv, unsigned int *i)
+{
+	*i = 0;
+	return 0;
+}
+
+/**
+ * fimc_pipeline_validate - check for formats inconsistencies
+ *                          between source and sink pad of each link
+ *
+ * Return 0 if all formats match or -EPIPE otherwise.
+ */
+static int fimc_pipeline_validate(struct fimc_dev *fimc)
+{
+	struct v4l2_subdev_format sink_fmt, src_fmt;
+	struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+	struct v4l2_subdev *sd;
+	struct media_pad *pad;
+	int ret;
+
+	/* Start with the video capture node pad */
+	pad = media_entity_remote_source(&vid_cap->vd_pad);
+	if (pad == NULL)
+		return -EPIPE;
+	/* FIMC.{N} subdevice */
+	sd = media_entity_to_v4l2_subdev(pad->entity);
+
+	while (1) {
+		/* Retrieve format at the sink pad */
+		pad = &sd->entity.pads[0];
+		if (!(pad->flags & MEDIA_PAD_FL_SINK))
+			break;
+		/* Don't call FIMC subdev operation to avoid nested locking */
+		if (sd == &fimc->vid_cap.subdev) {
+			struct fimc_frame *ff = &vid_cap->ctx->s_frame;
+			sink_fmt.format.width = ff->f_width;
+			sink_fmt.format.height = ff->f_height;
+			sink_fmt.format.code = ff->fmt ? ff->fmt->mbus_code : 0;
+		} else {
+			sink_fmt.pad = pad->index;
+			sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+			ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sink_fmt);
+			if (ret < 0 && ret != -ENOIOCTLCMD)
+				return -EPIPE;
+		}
+		/* Retrieve format at the source pad */
+		pad = media_entity_remote_source(pad);
+		if (pad == NULL ||
+		    media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+			break;
+
+		sd = media_entity_to_v4l2_subdev(pad->entity);
+		src_fmt.pad = pad->index;
+		src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+		ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt);
+		if (ret < 0 && ret != -ENOIOCTLCMD)
+			return -EPIPE;
+
+		if (src_fmt.format.width != sink_fmt.format.width ||
+		    src_fmt.format.height != sink_fmt.format.height ||
+		    src_fmt.format.code != sink_fmt.format.code)
+			return -EPIPE;
+	}
+	return 0;
+}
+
+static int fimc_cap_streamon(struct file *file, void *priv,
+			     enum v4l2_buf_type type)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+	struct fimc_pipeline *p = &fimc->pipeline;
+	struct v4l2_subdev *sd = p->subdevs[IDX_SENSOR];
+	int ret;
+
+	if (fimc_capture_active(fimc))
+		return -EBUSY;
+
+	ret = media_entity_pipeline_start(&sd->entity, p->m_pipeline);
+	if (ret < 0)
+		return ret;
+
+	if (fimc->vid_cap.user_subdev_api) {
+		ret = fimc_pipeline_validate(fimc);
+		if (ret < 0) {
+			media_entity_pipeline_stop(&sd->entity);
+			return ret;
+		}
+	}
+	return vb2_streamon(&fimc->vid_cap.vbq, type);
+}
+
+static int fimc_cap_streamoff(struct file *file, void *priv,
+			    enum v4l2_buf_type type)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+	struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR];
+	int ret;
+
+	ret = vb2_streamoff(&fimc->vid_cap.vbq, type);
+	if (ret == 0)
+		media_entity_pipeline_stop(&sd->entity);
+	return ret;
+}
+
+static int fimc_cap_reqbufs(struct file *file, void *priv,
+			    struct v4l2_requestbuffers *reqbufs)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+	int ret = vb2_reqbufs(&fimc->vid_cap.vbq, reqbufs);
+
+	if (!ret)
+		fimc->vid_cap.reqbufs_count = reqbufs->count;
+	return ret;
+}
+
+static int fimc_cap_querybuf(struct file *file, void *priv,
+			   struct v4l2_buffer *buf)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+
+	return vb2_querybuf(&fimc->vid_cap.vbq, buf);
+}
+
+static int fimc_cap_qbuf(struct file *file, void *priv,
+			  struct v4l2_buffer *buf)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+
+	return vb2_qbuf(&fimc->vid_cap.vbq, buf);
+}
+
+static int fimc_cap_dqbuf(struct file *file, void *priv,
+			   struct v4l2_buffer *buf)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+
+	return vb2_dqbuf(&fimc->vid_cap.vbq, buf, file->f_flags & O_NONBLOCK);
+}
+
+static int fimc_cap_create_bufs(struct file *file, void *priv,
+				struct v4l2_create_buffers *create)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+
+	return vb2_create_bufs(&fimc->vid_cap.vbq, create);
+}
+
+static int fimc_cap_prepare_buf(struct file *file, void *priv,
+				struct v4l2_buffer *b)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+
+	return vb2_prepare_buf(&fimc->vid_cap.vbq, b);
+}
+
+static int fimc_cap_g_selection(struct file *file, void *fh,
+				struct v4l2_selection *s)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+	struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+	struct fimc_frame *f = &ctx->s_frame;
+
+	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		return -EINVAL;
+
+	switch (s->target) {
+	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+		f = &ctx->d_frame;
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = f->o_width;
+		s->r.height = f->o_height;
+		return 0;
+
+	case V4L2_SEL_TGT_COMPOSE:
+		f = &ctx->d_frame;
+	case V4L2_SEL_TGT_CROP:
+		s->r.left = f->offs_h;
+		s->r.top = f->offs_v;
+		s->r.width = f->width;
+		s->r.height = f->height;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+	if (a->left < b->left || a->top < b->top)
+		return 0;
+	if (a->left + a->width > b->left + b->width)
+		return 0;
+	if (a->top + a->height > b->top + b->height)
+		return 0;
+
+	return 1;
+}
+
+static int fimc_cap_s_selection(struct file *file, void *fh,
+				struct v4l2_selection *s)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+	struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+	struct v4l2_rect rect = s->r;
+	struct fimc_frame *f;
+	unsigned long flags;
+
+	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		return -EINVAL;
+
+	if (s->target == V4L2_SEL_TGT_COMPOSE)
+		f = &ctx->d_frame;
+	else if (s->target == V4L2_SEL_TGT_CROP)
+		f = &ctx->s_frame;
+	else
+		return -EINVAL;
+
+	fimc_capture_try_selection(ctx, &rect, s->target);
+
+	if (s->flags & V4L2_SEL_FLAG_LE &&
+	    !enclosed_rectangle(&rect, &s->r))
+		return -ERANGE;
+
+	if (s->flags & V4L2_SEL_FLAG_GE &&
+	    !enclosed_rectangle(&s->r, &rect))
+		return -ERANGE;
+
+	s->r = rect;
+	spin_lock_irqsave(&fimc->slock, flags);
+	set_frame_crop(f, s->r.left, s->r.top, s->r.width,
+		       s->r.height);
+	spin_unlock_irqrestore(&fimc->slock, flags);
+
+	set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
+	.vidioc_querycap		= fimc_vidioc_querycap_capture,
+
+	.vidioc_enum_fmt_vid_cap_mplane	= fimc_cap_enum_fmt_mplane,
+	.vidioc_try_fmt_vid_cap_mplane	= fimc_cap_try_fmt_mplane,
+	.vidioc_s_fmt_vid_cap_mplane	= fimc_cap_s_fmt_mplane,
+	.vidioc_g_fmt_vid_cap_mplane	= fimc_cap_g_fmt_mplane,
+
+	.vidioc_reqbufs			= fimc_cap_reqbufs,
+	.vidioc_querybuf		= fimc_cap_querybuf,
+
+	.vidioc_qbuf			= fimc_cap_qbuf,
+	.vidioc_dqbuf			= fimc_cap_dqbuf,
+
+	.vidioc_prepare_buf		= fimc_cap_prepare_buf,
+	.vidioc_create_bufs		= fimc_cap_create_bufs,
+
+	.vidioc_streamon		= fimc_cap_streamon,
+	.vidioc_streamoff		= fimc_cap_streamoff,
+
+	.vidioc_g_selection		= fimc_cap_g_selection,
+	.vidioc_s_selection		= fimc_cap_s_selection,
+
+	.vidioc_enum_input		= fimc_cap_enum_input,
+	.vidioc_s_input			= fimc_cap_s_input,
+	.vidioc_g_input			= fimc_cap_g_input,
+};
+
+/* Capture subdev media entity operations */
+static int fimc_link_setup(struct media_entity *entity,
+			   const struct media_pad *local,
+			   const struct media_pad *remote, u32 flags)
+{
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+	struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+
+	if (media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+		return -EINVAL;
+
+	if (WARN_ON(fimc == NULL))
+		return 0;
+
+	dbg("%s --> %s, flags: 0x%x. input: 0x%x",
+	    local->entity->name, remote->entity->name, flags,
+	    fimc->vid_cap.input);
+
+	if (flags & MEDIA_LNK_FL_ENABLED) {
+		if (fimc->vid_cap.input != 0)
+			return -EBUSY;
+		fimc->vid_cap.input = sd->grp_id;
+		return 0;
+	}
+
+	fimc->vid_cap.input = 0;
+	return 0;
+}
+
+static const struct media_entity_operations fimc_sd_media_ops = {
+	.link_setup = fimc_link_setup,
+};
+
+/**
+ * fimc_sensor_notify - v4l2_device notification from a sensor subdev
+ * @sd: pointer to a subdev generating the notification
+ * @notification: the notification type, must be S5P_FIMC_TX_END_NOTIFY
+ * @arg: pointer to an u32 type integer that stores the frame payload value
+ *
+ * The End Of Frame notification sent by sensor subdev in its still capture
+ * mode. If there is only a single VSYNC generated by the sensor at the
+ * beginning of a frame transmission, FIMC does not issue the LastIrq
+ * (end of frame) interrupt. And this notification is used to complete the
+ * frame capture and returning a buffer to user-space. Subdev drivers should
+ * call this notification from their last 'End of frame capture' interrupt.
+ */
+void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
+			void *arg)
+{
+	struct fimc_sensor_info	*sensor;
+	struct fimc_vid_buffer *buf;
+	struct fimc_md *fmd;
+	struct fimc_dev *fimc;
+	unsigned long flags;
+
+	if (sd == NULL)
+		return;
+
+	sensor = v4l2_get_subdev_hostdata(sd);
+	fmd = entity_to_fimc_mdev(&sd->entity);
+
+	spin_lock_irqsave(&fmd->slock, flags);
+	fimc = sensor ? sensor->host : NULL;
+
+	if (fimc && arg && notification == S5P_FIMC_TX_END_NOTIFY &&
+	    test_bit(ST_CAPT_PEND, &fimc->state)) {
+		unsigned long irq_flags;
+		spin_lock_irqsave(&fimc->slock, irq_flags);
+		if (!list_empty(&fimc->vid_cap.active_buf_q)) {
+			buf = list_entry(fimc->vid_cap.active_buf_q.next,
+					 struct fimc_vid_buffer, list);
+			vb2_set_plane_payload(&buf->vb, 0, *((u32 *)arg));
+		}
+		fimc_capture_irq_handler(fimc, 1);
+		fimc_deactivate_capture(fimc);
+		spin_unlock_irqrestore(&fimc->slock, irq_flags);
+	}
+	spin_unlock_irqrestore(&fmd->slock, flags);
+}
+
+static int fimc_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+				      struct v4l2_subdev_fh *fh,
+				      struct v4l2_subdev_mbus_code_enum *code)
+{
+	struct fimc_fmt *fmt;
+
+	fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, code->index);
+	if (!fmt)
+		return -EINVAL;
+	code->code = fmt->mbus_code;
+	return 0;
+}
+
+static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
+			       struct v4l2_subdev_fh *fh,
+			       struct v4l2_subdev_format *fmt)
+{
+	struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+	struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+	struct v4l2_mbus_framefmt *mf;
+	struct fimc_frame *ff;
+
+	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+		mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+		fmt->format = *mf;
+		return 0;
+	}
+	mf = &fmt->format;
+	mf->colorspace = V4L2_COLORSPACE_JPEG;
+	ff = fmt->pad == FIMC_SD_PAD_SINK ? &ctx->s_frame : &ctx->d_frame;
+
+	mutex_lock(&fimc->lock);
+	/* The pixel code is same on both input and output pad */
+	if (!WARN_ON(ctx->s_frame.fmt == NULL))
+		mf->code = ctx->s_frame.fmt->mbus_code;
+	mf->width  = ff->f_width;
+	mf->height = ff->f_height;
+	mutex_unlock(&fimc->lock);
+
+	return 0;
+}
+
+static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
+			       struct v4l2_subdev_fh *fh,
+			       struct v4l2_subdev_format *fmt)
+{
+	struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *mf = &fmt->format;
+	struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+	struct fimc_frame *ff;
+	struct fimc_fmt *ffmt;
+
+	dbg("pad%d: code: 0x%x, %dx%d",
+	    fmt->pad, mf->code, mf->width, mf->height);
+
+	if (fmt->pad == FIMC_SD_PAD_SOURCE &&
+	    vb2_is_busy(&fimc->vid_cap.vbq))
+		return -EBUSY;
+
+	mutex_lock(&fimc->lock);
+	ffmt = fimc_capture_try_format(ctx, &mf->width, &mf->height,
+				       &mf->code, NULL, fmt->pad);
+	mutex_unlock(&fimc->lock);
+	mf->colorspace = V4L2_COLORSPACE_JPEG;
+
+	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+		mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+		*mf = fmt->format;
+		return 0;
+	}
+	/* Update RGB Alpha control state and value range */
+	fimc_alpha_ctrl_update(ctx);
+
+	fimc_capture_mark_jpeg_xfer(ctx, fimc_fmt_is_jpeg(ffmt->color));
+
+	ff = fmt->pad == FIMC_SD_PAD_SINK ?
+		&ctx->s_frame : &ctx->d_frame;
+
+	mutex_lock(&fimc->lock);
+	set_frame_bounds(ff, mf->width, mf->height);
+	fimc->vid_cap.mf = *mf;
+	ff->fmt = ffmt;
+
+	/* Reset the crop rectangle if required. */
+	if (!(fmt->pad == FIMC_SD_PAD_SOURCE && (ctx->state & FIMC_COMPOSE)))
+		set_frame_crop(ff, 0, 0, mf->width, mf->height);
+
+	if (fmt->pad == FIMC_SD_PAD_SINK)
+		ctx->state &= ~FIMC_COMPOSE;
+	mutex_unlock(&fimc->lock);
+	return 0;
+}
+
+static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
+				     struct v4l2_subdev_fh *fh,
+				     struct v4l2_subdev_selection *sel)
+{
+	struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+	struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+	struct fimc_frame *f = &ctx->s_frame;
+	struct v4l2_rect *r = &sel->r;
+	struct v4l2_rect *try_sel;
+
+	if (sel->pad != FIMC_SD_PAD_SINK)
+		return -EINVAL;
+
+	mutex_lock(&fimc->lock);
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+		f = &ctx->d_frame;
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		r->width = f->o_width;
+		r->height = f->o_height;
+		r->left = 0;
+		r->top = 0;
+		mutex_unlock(&fimc->lock);
+		return 0;
+
+	case V4L2_SEL_TGT_CROP:
+		try_sel = v4l2_subdev_get_try_crop(fh, sel->pad);
+		break;
+	case V4L2_SEL_TGT_COMPOSE:
+		try_sel = v4l2_subdev_get_try_compose(fh, sel->pad);
+		f = &ctx->d_frame;
+		break;
+	default:
+		mutex_unlock(&fimc->lock);
+		return -EINVAL;
+	}
+
+	if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+		sel->r = *try_sel;
+	} else {
+		r->left = f->offs_h;
+		r->top = f->offs_v;
+		r->width = f->width;
+		r->height = f->height;
+	}
+
+	dbg("target %#x: l:%d, t:%d, %dx%d, f_w: %d, f_h: %d",
+	    sel->pad, r->left, r->top, r->width, r->height,
+	    f->f_width, f->f_height);
+
+	mutex_unlock(&fimc->lock);
+	return 0;
+}
+
+static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
+				     struct v4l2_subdev_fh *fh,
+				     struct v4l2_subdev_selection *sel)
+{
+	struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+	struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+	struct fimc_frame *f = &ctx->s_frame;
+	struct v4l2_rect *r = &sel->r;
+	struct v4l2_rect *try_sel;
+	unsigned long flags;
+
+	if (sel->pad != FIMC_SD_PAD_SINK)
+		return -EINVAL;
+
+	mutex_lock(&fimc->lock);
+	fimc_capture_try_selection(ctx, r, V4L2_SEL_TGT_CROP);
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+		f = &ctx->d_frame;
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		r->width = f->o_width;
+		r->height = f->o_height;
+		r->left = 0;
+		r->top = 0;
+		mutex_unlock(&fimc->lock);
+		return 0;
+
+	case V4L2_SEL_TGT_CROP:
+		try_sel = v4l2_subdev_get_try_crop(fh, sel->pad);
+		break;
+	case V4L2_SEL_TGT_COMPOSE:
+		try_sel = v4l2_subdev_get_try_compose(fh, sel->pad);
+		f = &ctx->d_frame;
+		break;
+	default:
+		mutex_unlock(&fimc->lock);
+		return -EINVAL;
+	}
+
+	if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+		*try_sel = sel->r;
+	} else {
+		spin_lock_irqsave(&fimc->slock, flags);
+		set_frame_crop(f, r->left, r->top, r->width, r->height);
+		set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+		spin_unlock_irqrestore(&fimc->slock, flags);
+		if (sel->target == V4L2_SEL_TGT_COMPOSE)
+			ctx->state |= FIMC_COMPOSE;
+	}
+
+	dbg("target %#x: (%d,%d)/%dx%d", sel->target, r->left, r->top,
+	    r->width, r->height);
+
+	mutex_unlock(&fimc->lock);
+	return 0;
+}
+
+static struct v4l2_subdev_pad_ops fimc_subdev_pad_ops = {
+	.enum_mbus_code = fimc_subdev_enum_mbus_code,
+	.get_selection = fimc_subdev_get_selection,
+	.set_selection = fimc_subdev_set_selection,
+	.get_fmt = fimc_subdev_get_fmt,
+	.set_fmt = fimc_subdev_set_fmt,
+};
+
+static struct v4l2_subdev_ops fimc_subdev_ops = {
+	.pad = &fimc_subdev_pad_ops,
+};
+
+/* Set default format at the sensor and host interface */
+static int fimc_capture_set_default_format(struct fimc_dev *fimc)
+{
+	struct v4l2_format fmt = {
+		.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+		.fmt.pix_mp = {
+			.width		= 640,
+			.height		= 480,
+			.pixelformat	= V4L2_PIX_FMT_YUYV,
+			.field		= V4L2_FIELD_NONE,
+			.colorspace	= V4L2_COLORSPACE_JPEG,
+		},
+	};
+
+	return fimc_capture_set_format(fimc, &fmt);
+}
+
+/* fimc->lock must be already initialized */
+static int fimc_register_capture_device(struct fimc_dev *fimc,
+				 struct v4l2_device *v4l2_dev)
+{
+	struct video_device *vfd;
+	struct fimc_vid_cap *vid_cap;
+	struct fimc_ctx *ctx;
+	struct vb2_queue *q;
+	int ret = -ENOMEM;
+
+	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->fimc_dev	 = fimc;
+	ctx->in_path	 = FIMC_IO_CAMERA;
+	ctx->out_path	 = FIMC_IO_DMA;
+	ctx->state	 = FIMC_CTX_CAP;
+	ctx->s_frame.fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0);
+	ctx->d_frame.fmt = ctx->s_frame.fmt;
+
+	vfd = video_device_alloc();
+	if (!vfd) {
+		v4l2_err(v4l2_dev, "Failed to allocate video device\n");
+		goto err_vd_alloc;
+	}
+
+	snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.capture", fimc->id);
+
+	vfd->fops	= &fimc_capture_fops;
+	vfd->ioctl_ops	= &fimc_capture_ioctl_ops;
+	vfd->v4l2_dev	= v4l2_dev;
+	vfd->minor	= -1;
+	vfd->release	= video_device_release;
+	vfd->lock	= &fimc->lock;
+
+	video_set_drvdata(vfd, fimc);
+
+	vid_cap = &fimc->vid_cap;
+	vid_cap->vfd = vfd;
+	vid_cap->active_buf_cnt = 0;
+	vid_cap->reqbufs_count  = 0;
+	vid_cap->refcnt = 0;
+
+	INIT_LIST_HEAD(&vid_cap->pending_buf_q);
+	INIT_LIST_HEAD(&vid_cap->active_buf_q);
+	vid_cap->ctx = ctx;
+
+	q = &fimc->vid_cap.vbq;
+	memset(q, 0, sizeof(*q));
+	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+	q->io_modes = VB2_MMAP | VB2_USERPTR;
+	q->drv_priv = fimc->vid_cap.ctx;
+	q->ops = &fimc_capture_qops;
+	q->mem_ops = &vb2_dma_contig_memops;
+	q->buf_struct_size = sizeof(struct fimc_vid_buffer);
+
+	vb2_queue_init(q);
+
+	vid_cap->vd_pad.flags = MEDIA_PAD_FL_SINK;
+	ret = media_entity_init(&vfd->entity, 1, &vid_cap->vd_pad, 0);
+	if (ret)
+		goto err_ent;
+
+	ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+	if (ret)
+		goto err_vd;
+
+	v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
+		  vfd->name, video_device_node_name(vfd));
+
+	vfd->ctrl_handler = &ctx->ctrls.handler;
+	return 0;
+
+err_vd:
+	media_entity_cleanup(&vfd->entity);
+err_ent:
+	video_device_release(vfd);
+err_vd_alloc:
+	kfree(ctx);
+	return ret;
+}
+
+static int fimc_capture_subdev_registered(struct v4l2_subdev *sd)
+{
+	struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+	int ret;
+
+	ret = fimc_register_m2m_device(fimc, sd->v4l2_dev);
+	if (ret)
+		return ret;
+
+	ret = fimc_register_capture_device(fimc, sd->v4l2_dev);
+	if (ret)
+		fimc_unregister_m2m_device(fimc);
+
+	return ret;
+}
+
+static void fimc_capture_subdev_unregistered(struct v4l2_subdev *sd)
+{
+	struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+
+	if (fimc == NULL)
+		return;
+
+	fimc_unregister_m2m_device(fimc);
+
+	if (fimc->vid_cap.vfd) {
+		media_entity_cleanup(&fimc->vid_cap.vfd->entity);
+		video_unregister_device(fimc->vid_cap.vfd);
+		fimc->vid_cap.vfd = NULL;
+	}
+
+	kfree(fimc->vid_cap.ctx);
+	fimc->vid_cap.ctx = NULL;
+}
+
+static const struct v4l2_subdev_internal_ops fimc_capture_sd_internal_ops = {
+	.registered = fimc_capture_subdev_registered,
+	.unregistered = fimc_capture_subdev_unregistered,
+};
+
+int fimc_initialize_capture_subdev(struct fimc_dev *fimc)
+{
+	struct v4l2_subdev *sd = &fimc->vid_cap.subdev;
+	int ret;
+
+	v4l2_subdev_init(sd, &fimc_subdev_ops);
+	sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+	snprintf(sd->name, sizeof(sd->name), "FIMC.%d", fimc->pdev->id);
+
+	fimc->vid_cap.sd_pads[FIMC_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	fimc->vid_cap.sd_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+	ret = media_entity_init(&sd->entity, FIMC_SD_PADS_NUM,
+				fimc->vid_cap.sd_pads, 0);
+	if (ret)
+		return ret;
+
+	sd->entity.ops = &fimc_sd_media_ops;
+	sd->internal_ops = &fimc_capture_sd_internal_ops;
+	v4l2_set_subdevdata(sd, fimc);
+	return 0;
+}
+
+void fimc_unregister_capture_subdev(struct fimc_dev *fimc)
+{
+	struct v4l2_subdev *sd = &fimc->vid_cap.subdev;
+
+	v4l2_device_unregister_subdev(sd);
+	media_entity_cleanup(&sd->entity);
+	v4l2_set_subdevdata(sd, NULL);
+}
diff --git a/drivers/media/platform/s5p-fimc/fimc-core.c b/drivers/media/platform/s5p-fimc/fimc-core.c
new file mode 100644
index 0000000..1a44540
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/fimc-core.c
@@ -0,0 +1,1239 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series FIMC (CAMIF) driver
+ *
+ * Copyright (C) 2010-2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "fimc-core.h"
+#include "fimc-reg.h"
+#include "fimc-mdevice.h"
+
+static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
+	"sclk_fimc", "fimc"
+};
+
+static struct fimc_fmt fimc_formats[] = {
+	{
+		.name		= "RGB565",
+		.fourcc		= V4L2_PIX_FMT_RGB565,
+		.depth		= { 16 },
+		.color		= FIMC_FMT_RGB565,
+		.memplanes	= 1,
+		.colplanes	= 1,
+		.flags		= FMT_FLAGS_M2M,
+	}, {
+		.name		= "BGR666",
+		.fourcc		= V4L2_PIX_FMT_BGR666,
+		.depth		= { 32 },
+		.color		= FIMC_FMT_RGB666,
+		.memplanes	= 1,
+		.colplanes	= 1,
+		.flags		= FMT_FLAGS_M2M,
+	}, {
+		.name		= "ARGB8888, 32 bpp",
+		.fourcc		= V4L2_PIX_FMT_RGB32,
+		.depth		= { 32 },
+		.color		= FIMC_FMT_RGB888,
+		.memplanes	= 1,
+		.colplanes	= 1,
+		.flags		= FMT_FLAGS_M2M | FMT_HAS_ALPHA,
+	}, {
+		.name		= "ARGB1555",
+		.fourcc		= V4L2_PIX_FMT_RGB555,
+		.depth		= { 16 },
+		.color		= FIMC_FMT_RGB555,
+		.memplanes	= 1,
+		.colplanes	= 1,
+		.flags		= FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
+	}, {
+		.name		= "ARGB4444",
+		.fourcc		= V4L2_PIX_FMT_RGB444,
+		.depth		= { 16 },
+		.color		= FIMC_FMT_RGB444,
+		.memplanes	= 1,
+		.colplanes	= 1,
+		.flags		= FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
+	}, {
+		.name		= "YUV 4:2:2 packed, YCbYCr",
+		.fourcc		= V4L2_PIX_FMT_YUYV,
+		.depth		= { 16 },
+		.color		= FIMC_FMT_YCBYCR422,
+		.memplanes	= 1,
+		.colplanes	= 1,
+		.mbus_code	= V4L2_MBUS_FMT_YUYV8_2X8,
+		.flags		= FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+	}, {
+		.name		= "YUV 4:2:2 packed, CbYCrY",
+		.fourcc		= V4L2_PIX_FMT_UYVY,
+		.depth		= { 16 },
+		.color		= FIMC_FMT_CBYCRY422,
+		.memplanes	= 1,
+		.colplanes	= 1,
+		.mbus_code	= V4L2_MBUS_FMT_UYVY8_2X8,
+		.flags		= FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+	}, {
+		.name		= "YUV 4:2:2 packed, CrYCbY",
+		.fourcc		= V4L2_PIX_FMT_VYUY,
+		.depth		= { 16 },
+		.color		= FIMC_FMT_CRYCBY422,
+		.memplanes	= 1,
+		.colplanes	= 1,
+		.mbus_code	= V4L2_MBUS_FMT_VYUY8_2X8,
+		.flags		= FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+	}, {
+		.name		= "YUV 4:2:2 packed, YCrYCb",
+		.fourcc		= V4L2_PIX_FMT_YVYU,
+		.depth		= { 16 },
+		.color		= FIMC_FMT_YCRYCB422,
+		.memplanes	= 1,
+		.colplanes	= 1,
+		.mbus_code	= V4L2_MBUS_FMT_YVYU8_2X8,
+		.flags		= FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+	}, {
+		.name		= "YUV 4:2:2 planar, Y/Cb/Cr",
+		.fourcc		= V4L2_PIX_FMT_YUV422P,
+		.depth		= { 12 },
+		.color		= FIMC_FMT_YCBYCR422,
+		.memplanes	= 1,
+		.colplanes	= 3,
+		.flags		= FMT_FLAGS_M2M,
+	}, {
+		.name		= "YUV 4:2:2 planar, Y/CbCr",
+		.fourcc		= V4L2_PIX_FMT_NV16,
+		.depth		= { 16 },
+		.color		= FIMC_FMT_YCBYCR422,
+		.memplanes	= 1,
+		.colplanes	= 2,
+		.flags		= FMT_FLAGS_M2M,
+	}, {
+		.name		= "YUV 4:2:2 planar, Y/CrCb",
+		.fourcc		= V4L2_PIX_FMT_NV61,
+		.depth		= { 16 },
+		.color		= FIMC_FMT_YCRYCB422,
+		.memplanes	= 1,
+		.colplanes	= 2,
+		.flags		= FMT_FLAGS_M2M,
+	}, {
+		.name		= "YUV 4:2:0 planar, YCbCr",
+		.fourcc		= V4L2_PIX_FMT_YUV420,
+		.depth		= { 12 },
+		.color		= FIMC_FMT_YCBCR420,
+		.memplanes	= 1,
+		.colplanes	= 3,
+		.flags		= FMT_FLAGS_M2M,
+	}, {
+		.name		= "YUV 4:2:0 planar, Y/CbCr",
+		.fourcc		= V4L2_PIX_FMT_NV12,
+		.depth		= { 12 },
+		.color		= FIMC_FMT_YCBCR420,
+		.memplanes	= 1,
+		.colplanes	= 2,
+		.flags		= FMT_FLAGS_M2M,
+	}, {
+		.name		= "YUV 4:2:0 non-contig. 2p, Y/CbCr",
+		.fourcc		= V4L2_PIX_FMT_NV12M,
+		.color		= FIMC_FMT_YCBCR420,
+		.depth		= { 8, 4 },
+		.memplanes	= 2,
+		.colplanes	= 2,
+		.flags		= FMT_FLAGS_M2M,
+	}, {
+		.name		= "YUV 4:2:0 non-contig. 3p, Y/Cb/Cr",
+		.fourcc		= V4L2_PIX_FMT_YUV420M,
+		.color		= FIMC_FMT_YCBCR420,
+		.depth		= { 8, 2, 2 },
+		.memplanes	= 3,
+		.colplanes	= 3,
+		.flags		= FMT_FLAGS_M2M,
+	}, {
+		.name		= "YUV 4:2:0 non-contig. 2p, tiled",
+		.fourcc		= V4L2_PIX_FMT_NV12MT,
+		.color		= FIMC_FMT_YCBCR420,
+		.depth		= { 8, 4 },
+		.memplanes	= 2,
+		.colplanes	= 2,
+		.flags		= FMT_FLAGS_M2M,
+	}, {
+		.name		= "JPEG encoded data",
+		.fourcc		= V4L2_PIX_FMT_JPEG,
+		.color		= FIMC_FMT_JPEG,
+		.depth		= { 8 },
+		.memplanes	= 1,
+		.colplanes	= 1,
+		.mbus_code	= V4L2_MBUS_FMT_JPEG_1X8,
+		.flags		= FMT_FLAGS_CAM,
+	},
+};
+
+struct fimc_fmt *fimc_get_format(unsigned int index)
+{
+	if (index >= ARRAY_SIZE(fimc_formats))
+		return NULL;
+
+	return &fimc_formats[index];
+}
+
+int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
+			    int dw, int dh, int rotation)
+{
+	if (rotation == 90 || rotation == 270)
+		swap(dw, dh);
+
+	if (!ctx->scaler.enabled)
+		return (sw == dw && sh == dh) ? 0 : -EINVAL;
+
+	if ((sw >= SCALER_MAX_HRATIO * dw) || (sh >= SCALER_MAX_VRATIO * dh))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
+{
+	u32 sh = 6;
+
+	if (src >= 64 * tar)
+		return -EINVAL;
+
+	while (sh--) {
+		u32 tmp = 1 << sh;
+		if (src >= tar * tmp) {
+			*shift = sh, *ratio = tmp;
+			return 0;
+		}
+	}
+	*shift = 0, *ratio = 1;
+	return 0;
+}
+
+int fimc_set_scaler_info(struct fimc_ctx *ctx)
+{
+	struct fimc_variant *variant = ctx->fimc_dev->variant;
+	struct device *dev = &ctx->fimc_dev->pdev->dev;
+	struct fimc_scaler *sc = &ctx->scaler;
+	struct fimc_frame *s_frame = &ctx->s_frame;
+	struct fimc_frame *d_frame = &ctx->d_frame;
+	int tx, ty, sx, sy;
+	int ret;
+
+	if (ctx->rotation == 90 || ctx->rotation == 270) {
+		ty = d_frame->width;
+		tx = d_frame->height;
+	} else {
+		tx = d_frame->width;
+		ty = d_frame->height;
+	}
+	if (tx <= 0 || ty <= 0) {
+		dev_err(dev, "Invalid target size: %dx%d", tx, ty);
+		return -EINVAL;
+	}
+
+	sx = s_frame->width;
+	sy = s_frame->height;
+	if (sx <= 0 || sy <= 0) {
+		dev_err(dev, "Invalid source size: %dx%d", sx, sy);
+		return -EINVAL;
+	}
+	sc->real_width = sx;
+	sc->real_height = sy;
+
+	ret = fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor);
+	if (ret)
+		return ret;
+
+	ret = fimc_get_scaler_factor(sy, ty,  &sc->pre_vratio, &sc->vfactor);
+	if (ret)
+		return ret;
+
+	sc->pre_dst_width = sx / sc->pre_hratio;
+	sc->pre_dst_height = sy / sc->pre_vratio;
+
+	if (variant->has_mainscaler_ext) {
+		sc->main_hratio = (sx << 14) / (tx << sc->hfactor);
+		sc->main_vratio = (sy << 14) / (ty << sc->vfactor);
+	} else {
+		sc->main_hratio = (sx << 8) / (tx << sc->hfactor);
+		sc->main_vratio = (sy << 8) / (ty << sc->vfactor);
+
+	}
+
+	sc->scaleup_h = (tx >= sx) ? 1 : 0;
+	sc->scaleup_v = (ty >= sy) ? 1 : 0;
+
+	/* check to see if input and output size/format differ */
+	if (s_frame->fmt->color == d_frame->fmt->color
+		&& s_frame->width == d_frame->width
+		&& s_frame->height == d_frame->height)
+		sc->copy_mode = 1;
+	else
+		sc->copy_mode = 0;
+
+	return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *priv)
+{
+	struct fimc_dev *fimc = priv;
+	struct fimc_ctx *ctx;
+
+	fimc_hw_clear_irq(fimc);
+
+	spin_lock(&fimc->slock);
+
+	if (test_and_clear_bit(ST_M2M_PEND, &fimc->state)) {
+		if (test_and_clear_bit(ST_M2M_SUSPENDING, &fimc->state)) {
+			set_bit(ST_M2M_SUSPENDED, &fimc->state);
+			wake_up(&fimc->irq_queue);
+			goto out;
+		}
+		ctx = v4l2_m2m_get_curr_priv(fimc->m2m.m2m_dev);
+		if (ctx != NULL) {
+			spin_unlock(&fimc->slock);
+			fimc_m2m_job_finish(ctx, VB2_BUF_STATE_DONE);
+
+			if (ctx->state & FIMC_CTX_SHUT) {
+				ctx->state &= ~FIMC_CTX_SHUT;
+				wake_up(&fimc->irq_queue);
+			}
+			return IRQ_HANDLED;
+		}
+	} else if (test_bit(ST_CAPT_PEND, &fimc->state)) {
+		int last_buf = test_bit(ST_CAPT_JPEG, &fimc->state) &&
+				fimc->vid_cap.reqbufs_count == 1;
+		fimc_capture_irq_handler(fimc, !last_buf);
+	}
+out:
+	spin_unlock(&fimc->slock);
+	return IRQ_HANDLED;
+}
+
+/* The color format (colplanes, memplanes) must be already configured. */
+int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
+		      struct fimc_frame *frame, struct fimc_addr *paddr)
+{
+	int ret = 0;
+	u32 pix_size;
+
+	if (vb == NULL || frame == NULL)
+		return -EINVAL;
+
+	pix_size = frame->width * frame->height;
+
+	dbg("memplanes= %d, colplanes= %d, pix_size= %d",
+		frame->fmt->memplanes, frame->fmt->colplanes, pix_size);
+
+	paddr->y = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+	if (frame->fmt->memplanes == 1) {
+		switch (frame->fmt->colplanes) {
+		case 1:
+			paddr->cb = 0;
+			paddr->cr = 0;
+			break;
+		case 2:
+			/* decompose Y into Y/Cb */
+			paddr->cb = (u32)(paddr->y + pix_size);
+			paddr->cr = 0;
+			break;
+		case 3:
+			paddr->cb = (u32)(paddr->y + pix_size);
+			/* decompose Y into Y/Cb/Cr */
+			if (FIMC_FMT_YCBCR420 == frame->fmt->color)
+				paddr->cr = (u32)(paddr->cb
+						+ (pix_size >> 2));
+			else /* 422 */
+				paddr->cr = (u32)(paddr->cb
+						+ (pix_size >> 1));
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else {
+		if (frame->fmt->memplanes >= 2)
+			paddr->cb = vb2_dma_contig_plane_dma_addr(vb, 1);
+
+		if (frame->fmt->memplanes == 3)
+			paddr->cr = vb2_dma_contig_plane_dma_addr(vb, 2);
+	}
+
+	dbg("PHYS_ADDR: y= 0x%X  cb= 0x%X cr= 0x%X ret= %d",
+	    paddr->y, paddr->cb, paddr->cr, ret);
+
+	return ret;
+}
+
+/* Set order for 1 and 2 plane YCBCR 4:2:2 formats. */
+void fimc_set_yuv_order(struct fimc_ctx *ctx)
+{
+	/* The one only mode supported in SoC. */
+	ctx->in_order_2p = FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB;
+	ctx->out_order_2p = FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB;
+
+	/* Set order for 1 plane input formats. */
+	switch (ctx->s_frame.fmt->color) {
+	case FIMC_FMT_YCRYCB422:
+		ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_CBYCRY;
+		break;
+	case FIMC_FMT_CBYCRY422:
+		ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_YCRYCB;
+		break;
+	case FIMC_FMT_CRYCBY422:
+		ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_YCBYCR;
+		break;
+	case FIMC_FMT_YCBYCR422:
+	default:
+		ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_CRYCBY;
+		break;
+	}
+	dbg("ctx->in_order_1p= %d", ctx->in_order_1p);
+
+	switch (ctx->d_frame.fmt->color) {
+	case FIMC_FMT_YCRYCB422:
+		ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_CBYCRY;
+		break;
+	case FIMC_FMT_CBYCRY422:
+		ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_YCRYCB;
+		break;
+	case FIMC_FMT_CRYCBY422:
+		ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_YCBYCR;
+		break;
+	case FIMC_FMT_YCBYCR422:
+	default:
+		ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_CRYCBY;
+		break;
+	}
+	dbg("ctx->out_order_1p= %d", ctx->out_order_1p);
+}
+
+void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
+{
+	struct fimc_variant *variant = ctx->fimc_dev->variant;
+	u32 i, depth = 0;
+
+	for (i = 0; i < f->fmt->colplanes; i++)
+		depth += f->fmt->depth[i];
+
+	f->dma_offset.y_h = f->offs_h;
+	if (!variant->pix_hoff)
+		f->dma_offset.y_h *= (depth >> 3);
+
+	f->dma_offset.y_v = f->offs_v;
+
+	f->dma_offset.cb_h = f->offs_h;
+	f->dma_offset.cb_v = f->offs_v;
+
+	f->dma_offset.cr_h = f->offs_h;
+	f->dma_offset.cr_v = f->offs_v;
+
+	if (!variant->pix_hoff) {
+		if (f->fmt->colplanes == 3) {
+			f->dma_offset.cb_h >>= 1;
+			f->dma_offset.cr_h >>= 1;
+		}
+		if (f->fmt->color == FIMC_FMT_YCBCR420) {
+			f->dma_offset.cb_v >>= 1;
+			f->dma_offset.cr_v >>= 1;
+		}
+	}
+
+	dbg("in_offset: color= %d, y_h= %d, y_v= %d",
+	    f->fmt->color, f->dma_offset.y_h, f->dma_offset.y_v);
+}
+
+static int fimc_set_color_effect(struct fimc_ctx *ctx, enum v4l2_colorfx colorfx)
+{
+	struct fimc_effect *effect = &ctx->effect;
+
+	switch (colorfx) {
+	case V4L2_COLORFX_NONE:
+		effect->type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
+		break;
+	case V4L2_COLORFX_BW:
+		effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+		effect->pat_cb = 128;
+		effect->pat_cr = 128;
+		break;
+	case V4L2_COLORFX_SEPIA:
+		effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+		effect->pat_cb = 115;
+		effect->pat_cr = 145;
+		break;
+	case V4L2_COLORFX_NEGATIVE:
+		effect->type = FIMC_REG_CIIMGEFF_FIN_NEGATIVE;
+		break;
+	case V4L2_COLORFX_EMBOSS:
+		effect->type = FIMC_REG_CIIMGEFF_FIN_EMBOSSING;
+		break;
+	case V4L2_COLORFX_ART_FREEZE:
+		effect->type = FIMC_REG_CIIMGEFF_FIN_ARTFREEZE;
+		break;
+	case V4L2_COLORFX_SILHOUETTE:
+		effect->type = FIMC_REG_CIIMGEFF_FIN_SILHOUETTE;
+		break;
+	case V4L2_COLORFX_SET_CBCR:
+		effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+		effect->pat_cb = ctx->ctrls.colorfx_cbcr->val >> 8;
+		effect->pat_cr = ctx->ctrls.colorfx_cbcr->val & 0xff;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * V4L2 controls handling
+ */
+#define ctrl_to_ctx(__ctrl) \
+	container_of((__ctrl)->handler, struct fimc_ctx, ctrls.handler)
+
+static int __fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_ctrl *ctrl)
+{
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	struct fimc_variant *variant = fimc->variant;
+	unsigned int flags = FIMC_DST_FMT | FIMC_SRC_FMT;
+	int ret = 0;
+
+	if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+		return 0;
+
+	switch (ctrl->id) {
+	case V4L2_CID_HFLIP:
+		ctx->hflip = ctrl->val;
+		break;
+
+	case V4L2_CID_VFLIP:
+		ctx->vflip = ctrl->val;
+		break;
+
+	case V4L2_CID_ROTATE:
+		if (fimc_capture_pending(fimc) ||
+		    (ctx->state & flags) == flags) {
+			ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
+					ctx->s_frame.height, ctx->d_frame.width,
+					ctx->d_frame.height, ctrl->val);
+			if (ret)
+				return -EINVAL;
+		}
+		if ((ctrl->val == 90 || ctrl->val == 270) &&
+		    !variant->has_out_rot)
+			return -EINVAL;
+
+		ctx->rotation = ctrl->val;
+		break;
+
+	case V4L2_CID_ALPHA_COMPONENT:
+		ctx->d_frame.alpha = ctrl->val;
+		break;
+
+	case V4L2_CID_COLORFX:
+		ret = fimc_set_color_effect(ctx, ctrl->val);
+		if (ret)
+			return ret;
+		break;
+	}
+
+	ctx->state |= FIMC_PARAMS;
+	set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+	return 0;
+}
+
+static int fimc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct fimc_ctx *ctx = ctrl_to_ctx(ctrl);
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
+	ret = __fimc_s_ctrl(ctx, ctrl);
+	spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
+
+	return ret;
+}
+
+static const struct v4l2_ctrl_ops fimc_ctrl_ops = {
+	.s_ctrl = fimc_s_ctrl,
+};
+
+int fimc_ctrls_create(struct fimc_ctx *ctx)
+{
+	struct fimc_variant *variant = ctx->fimc_dev->variant;
+	unsigned int max_alpha = fimc_get_alpha_mask(ctx->d_frame.fmt);
+	struct fimc_ctrls *ctrls = &ctx->ctrls;
+	struct v4l2_ctrl_handler *handler = &ctrls->handler;
+
+	if (ctx->ctrls.ready)
+		return 0;
+
+	v4l2_ctrl_handler_init(handler, 6);
+
+	ctrls->rotate = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+					V4L2_CID_ROTATE, 0, 270, 90, 0);
+	ctrls->hflip = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+					V4L2_CID_HFLIP, 0, 1, 1, 0);
+	ctrls->vflip = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+					V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+	if (variant->has_alpha)
+		ctrls->alpha = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+					V4L2_CID_ALPHA_COMPONENT,
+					0, max_alpha, 1, 0);
+	else
+		ctrls->alpha = NULL;
+
+	ctrls->colorfx = v4l2_ctrl_new_std_menu(handler, &fimc_ctrl_ops,
+				V4L2_CID_COLORFX, V4L2_COLORFX_SET_CBCR,
+				~0x983f, V4L2_COLORFX_NONE);
+
+	ctrls->colorfx_cbcr = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+				V4L2_CID_COLORFX_CBCR, 0, 0xffff, 1, 0);
+
+	ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
+
+	if (!handler->error) {
+		v4l2_ctrl_cluster(2, &ctrls->colorfx);
+		ctrls->ready = true;
+	}
+
+	return handler->error;
+}
+
+void fimc_ctrls_delete(struct fimc_ctx *ctx)
+{
+	struct fimc_ctrls *ctrls = &ctx->ctrls;
+
+	if (ctrls->ready) {
+		v4l2_ctrl_handler_free(&ctrls->handler);
+		ctrls->ready = false;
+		ctrls->alpha = NULL;
+	}
+}
+
+void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
+{
+	unsigned int has_alpha = ctx->d_frame.fmt->flags & FMT_HAS_ALPHA;
+	struct fimc_ctrls *ctrls = &ctx->ctrls;
+
+	if (!ctrls->ready)
+		return;
+
+	mutex_lock(ctrls->handler.lock);
+	v4l2_ctrl_activate(ctrls->rotate, active);
+	v4l2_ctrl_activate(ctrls->hflip, active);
+	v4l2_ctrl_activate(ctrls->vflip, active);
+	v4l2_ctrl_activate(ctrls->colorfx, active);
+	if (ctrls->alpha)
+		v4l2_ctrl_activate(ctrls->alpha, active && has_alpha);
+
+	if (active) {
+		fimc_set_color_effect(ctx, ctrls->colorfx->cur.val);
+		ctx->rotation = ctrls->rotate->val;
+		ctx->hflip    = ctrls->hflip->val;
+		ctx->vflip    = ctrls->vflip->val;
+	} else {
+		ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
+		ctx->rotation = 0;
+		ctx->hflip    = 0;
+		ctx->vflip    = 0;
+	}
+	mutex_unlock(ctrls->handler.lock);
+}
+
+/* Update maximum value of the alpha color control */
+void fimc_alpha_ctrl_update(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	struct v4l2_ctrl *ctrl = ctx->ctrls.alpha;
+
+	if (ctrl == NULL || !fimc->variant->has_alpha)
+		return;
+
+	v4l2_ctrl_lock(ctrl);
+	ctrl->maximum = fimc_get_alpha_mask(ctx->d_frame.fmt);
+
+	if (ctrl->cur.val > ctrl->maximum)
+		ctrl->cur.val = ctrl->maximum;
+
+	v4l2_ctrl_unlock(ctrl);
+}
+
+int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f)
+{
+	struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+	int i;
+
+	pixm->width = frame->o_width;
+	pixm->height = frame->o_height;
+	pixm->field = V4L2_FIELD_NONE;
+	pixm->pixelformat = frame->fmt->fourcc;
+	pixm->colorspace = V4L2_COLORSPACE_JPEG;
+	pixm->num_planes = frame->fmt->memplanes;
+
+	for (i = 0; i < pixm->num_planes; ++i) {
+		int bpl = frame->f_width;
+		if (frame->fmt->colplanes == 1) /* packed formats */
+			bpl = (bpl * frame->fmt->depth[0]) / 8;
+		pixm->plane_fmt[i].bytesperline = bpl;
+		pixm->plane_fmt[i].sizeimage = (frame->o_width *
+			frame->o_height * frame->fmt->depth[i]) / 8;
+	}
+	return 0;
+}
+
+void fimc_fill_frame(struct fimc_frame *frame, struct v4l2_format *f)
+{
+	struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+
+	frame->f_width  = pixm->plane_fmt[0].bytesperline;
+	if (frame->fmt->colplanes == 1)
+		frame->f_width = (frame->f_width * 8) / frame->fmt->depth[0];
+	frame->f_height	= pixm->height;
+	frame->width    = pixm->width;
+	frame->height   = pixm->height;
+	frame->o_width  = pixm->width;
+	frame->o_height = pixm->height;
+	frame->offs_h   = 0;
+	frame->offs_v   = 0;
+}
+
+/**
+ * fimc_adjust_mplane_format - adjust bytesperline/sizeimage for each plane
+ * @fmt: fimc pixel format description (input)
+ * @width: requested pixel width
+ * @height: requested pixel height
+ * @pix: multi-plane format to adjust
+ */
+void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
+			       struct v4l2_pix_format_mplane *pix)
+{
+	u32 bytesperline = 0;
+	int i;
+
+	pix->colorspace	= V4L2_COLORSPACE_JPEG;
+	pix->field = V4L2_FIELD_NONE;
+	pix->num_planes = fmt->memplanes;
+	pix->pixelformat = fmt->fourcc;
+	pix->height = height;
+	pix->width = width;
+
+	for (i = 0; i < pix->num_planes; ++i) {
+		struct v4l2_plane_pix_format *plane_fmt = &pix->plane_fmt[i];
+		u32 bpl = plane_fmt->bytesperline;
+
+		if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
+			bpl = pix->width; /* Planar */
+
+		if (fmt->colplanes == 1 && /* Packed */
+		    (bpl == 0 || ((bpl * 8) / fmt->depth[i]) < pix->width))
+			bpl = (pix->width * fmt->depth[0]) / 8;
+
+		if (i == 0) /* Same bytesperline for each plane. */
+			bytesperline = bpl;
+
+		plane_fmt->bytesperline = bytesperline;
+		plane_fmt->sizeimage = max((pix->width * pix->height *
+				   fmt->depth[i]) / 8, plane_fmt->sizeimage);
+	}
+}
+
+/**
+ * fimc_find_format - lookup fimc color format by fourcc or media bus format
+ * @pixelformat: fourcc to match, ignored if null
+ * @mbus_code: media bus code to match, ignored if null
+ * @mask: the color flags to match
+ * @index: offset in the fimc_formats array, ignored if negative
+ */
+struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
+				  unsigned int mask, int index)
+{
+	struct fimc_fmt *fmt, *def_fmt = NULL;
+	unsigned int i;
+	int id = 0;
+
+	if (index >= (int)ARRAY_SIZE(fimc_formats))
+		return NULL;
+
+	for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
+		fmt = &fimc_formats[i];
+		if (!(fmt->flags & mask))
+			continue;
+		if (pixelformat && fmt->fourcc == *pixelformat)
+			return fmt;
+		if (mbus_code && fmt->mbus_code == *mbus_code)
+			return fmt;
+		if (index == id)
+			def_fmt = fmt;
+		id++;
+	}
+	return def_fmt;
+}
+
+static void fimc_clk_put(struct fimc_dev *fimc)
+{
+	int i;
+	for (i = 0; i < MAX_FIMC_CLOCKS; i++) {
+		if (IS_ERR_OR_NULL(fimc->clock[i]))
+			continue;
+		clk_unprepare(fimc->clock[i]);
+		clk_put(fimc->clock[i]);
+		fimc->clock[i] = NULL;
+	}
+}
+
+static int fimc_clk_get(struct fimc_dev *fimc)
+{
+	int i, ret;
+
+	for (i = 0; i < MAX_FIMC_CLOCKS; i++) {
+		fimc->clock[i] = clk_get(&fimc->pdev->dev, fimc_clocks[i]);
+		if (IS_ERR(fimc->clock[i]))
+			goto err;
+		ret = clk_prepare(fimc->clock[i]);
+		if (ret < 0) {
+			clk_put(fimc->clock[i]);
+			fimc->clock[i] = NULL;
+			goto err;
+		}
+	}
+	return 0;
+err:
+	fimc_clk_put(fimc);
+	dev_err(&fimc->pdev->dev, "failed to get clock: %s\n",
+		fimc_clocks[i]);
+	return -ENXIO;
+}
+
+static int fimc_m2m_suspend(struct fimc_dev *fimc)
+{
+	unsigned long flags;
+	int timeout;
+
+	spin_lock_irqsave(&fimc->slock, flags);
+	if (!fimc_m2m_pending(fimc)) {
+		spin_unlock_irqrestore(&fimc->slock, flags);
+		return 0;
+	}
+	clear_bit(ST_M2M_SUSPENDED, &fimc->state);
+	set_bit(ST_M2M_SUSPENDING, &fimc->state);
+	spin_unlock_irqrestore(&fimc->slock, flags);
+
+	timeout = wait_event_timeout(fimc->irq_queue,
+			     test_bit(ST_M2M_SUSPENDED, &fimc->state),
+			     FIMC_SHUTDOWN_TIMEOUT);
+
+	clear_bit(ST_M2M_SUSPENDING, &fimc->state);
+	return timeout == 0 ? -EAGAIN : 0;
+}
+
+static int fimc_m2m_resume(struct fimc_dev *fimc)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&fimc->slock, flags);
+	/* Clear for full H/W setup in first run after resume */
+	fimc->m2m.ctx = NULL;
+	spin_unlock_irqrestore(&fimc->slock, flags);
+
+	if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state))
+		fimc_m2m_job_finish(fimc->m2m.ctx,
+				    VB2_BUF_STATE_ERROR);
+	return 0;
+}
+
+static int fimc_probe(struct platform_device *pdev)
+{
+	struct fimc_drvdata *drv_data = fimc_get_drvdata(pdev);
+	struct s5p_platform_fimc *pdata;
+	struct fimc_dev *fimc;
+	struct resource *res;
+	int ret = 0;
+
+	if (pdev->id >= drv_data->num_entities) {
+		dev_err(&pdev->dev, "Invalid platform device id: %d\n",
+			pdev->id);
+		return -EINVAL;
+	}
+
+	fimc = devm_kzalloc(&pdev->dev, sizeof(*fimc), GFP_KERNEL);
+	if (!fimc)
+		return -ENOMEM;
+
+	fimc->id = pdev->id;
+
+	fimc->variant = drv_data->variant[fimc->id];
+	fimc->pdev = pdev;
+	pdata = pdev->dev.platform_data;
+	fimc->pdata = pdata;
+
+	init_waitqueue_head(&fimc->irq_queue);
+	spin_lock_init(&fimc->slock);
+	mutex_init(&fimc->lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	fimc->regs = devm_request_and_ioremap(&pdev->dev, res);
+	if (fimc->regs == NULL) {
+		dev_err(&pdev->dev, "Failed to obtain io memory\n");
+		return -ENOENT;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "Failed to get IRQ resource\n");
+		return -ENXIO;
+	}
+
+	ret = fimc_clk_get(fimc);
+	if (ret)
+		return ret;
+	clk_set_rate(fimc->clock[CLK_BUS], drv_data->lclk_frequency);
+	clk_enable(fimc->clock[CLK_BUS]);
+
+	ret = devm_request_irq(&pdev->dev, res->start, fimc_irq_handler,
+			       0, dev_name(&pdev->dev), fimc);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to install irq (%d)\n", ret);
+		goto err_clk;
+	}
+
+	ret = fimc_initialize_capture_subdev(fimc);
+	if (ret)
+		goto err_clk;
+
+	platform_set_drvdata(pdev, fimc);
+	pm_runtime_enable(&pdev->dev);
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0)
+		goto err_sd;
+	/* Initialize contiguous memory allocator */
+	fimc->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+	if (IS_ERR(fimc->alloc_ctx)) {
+		ret = PTR_ERR(fimc->alloc_ctx);
+		goto err_pm;
+	}
+
+	dev_dbg(&pdev->dev, "FIMC.%d registered successfully\n", fimc->id);
+
+	pm_runtime_put(&pdev->dev);
+	return 0;
+err_pm:
+	pm_runtime_put(&pdev->dev);
+err_sd:
+	fimc_unregister_capture_subdev(fimc);
+err_clk:
+	fimc_clk_put(fimc);
+	return ret;
+}
+
+static int fimc_runtime_resume(struct device *dev)
+{
+	struct fimc_dev *fimc =	dev_get_drvdata(dev);
+
+	dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
+
+	/* Enable clocks and perform basic initalization */
+	clk_enable(fimc->clock[CLK_GATE]);
+	fimc_hw_reset(fimc);
+
+	/* Resume the capture or mem-to-mem device */
+	if (fimc_capture_busy(fimc))
+		return fimc_capture_resume(fimc);
+
+	return fimc_m2m_resume(fimc);
+}
+
+static int fimc_runtime_suspend(struct device *dev)
+{
+	struct fimc_dev *fimc =	dev_get_drvdata(dev);
+	int ret = 0;
+
+	if (fimc_capture_busy(fimc))
+		ret = fimc_capture_suspend(fimc);
+	else
+		ret = fimc_m2m_suspend(fimc);
+	if (!ret)
+		clk_disable(fimc->clock[CLK_GATE]);
+
+	dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
+	return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_resume(struct device *dev)
+{
+	struct fimc_dev *fimc =	dev_get_drvdata(dev);
+	unsigned long flags;
+
+	dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
+
+	/* Do not resume if the device was idle before system suspend */
+	spin_lock_irqsave(&fimc->slock, flags);
+	if (!test_and_clear_bit(ST_LPM, &fimc->state) ||
+	    (!fimc_m2m_active(fimc) && !fimc_capture_busy(fimc))) {
+		spin_unlock_irqrestore(&fimc->slock, flags);
+		return 0;
+	}
+	fimc_hw_reset(fimc);
+	spin_unlock_irqrestore(&fimc->slock, flags);
+
+	if (fimc_capture_busy(fimc))
+		return fimc_capture_resume(fimc);
+
+	return fimc_m2m_resume(fimc);
+}
+
+static int fimc_suspend(struct device *dev)
+{
+	struct fimc_dev *fimc =	dev_get_drvdata(dev);
+
+	dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
+
+	if (test_and_set_bit(ST_LPM, &fimc->state))
+		return 0;
+	if (fimc_capture_busy(fimc))
+		return fimc_capture_suspend(fimc);
+
+	return fimc_m2m_suspend(fimc);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int __devexit fimc_remove(struct platform_device *pdev)
+{
+	struct fimc_dev *fimc = platform_get_drvdata(pdev);
+
+	pm_runtime_disable(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+
+	fimc_unregister_capture_subdev(fimc);
+	vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
+
+	clk_disable(fimc->clock[CLK_BUS]);
+	fimc_clk_put(fimc);
+
+	dev_info(&pdev->dev, "driver unloaded\n");
+	return 0;
+}
+
+/* Image pixel limits, similar across several FIMC HW revisions. */
+static struct fimc_pix_limit s5p_pix_limit[4] = {
+	[0] = {
+		.scaler_en_w	= 3264,
+		.scaler_dis_w	= 8192,
+		.in_rot_en_h	= 1920,
+		.in_rot_dis_w	= 8192,
+		.out_rot_en_w	= 1920,
+		.out_rot_dis_w	= 4224,
+	},
+	[1] = {
+		.scaler_en_w	= 4224,
+		.scaler_dis_w	= 8192,
+		.in_rot_en_h	= 1920,
+		.in_rot_dis_w	= 8192,
+		.out_rot_en_w	= 1920,
+		.out_rot_dis_w	= 4224,
+	},
+	[2] = {
+		.scaler_en_w	= 1920,
+		.scaler_dis_w	= 8192,
+		.in_rot_en_h	= 1280,
+		.in_rot_dis_w	= 8192,
+		.out_rot_en_w	= 1280,
+		.out_rot_dis_w	= 1920,
+	},
+	[3] = {
+		.scaler_en_w	= 1920,
+		.scaler_dis_w	= 8192,
+		.in_rot_en_h	= 1366,
+		.in_rot_dis_w	= 8192,
+		.out_rot_en_w	= 1366,
+		.out_rot_dis_w	= 1920,
+	},
+};
+
+static struct fimc_variant fimc0_variant_s5p = {
+	.has_inp_rot	 = 1,
+	.has_out_rot	 = 1,
+	.has_cam_if	 = 1,
+	.min_inp_pixsize = 16,
+	.min_out_pixsize = 16,
+	.hor_offs_align	 = 8,
+	.min_vsize_align = 16,
+	.out_buf_count	 = 4,
+	.pix_limit	 = &s5p_pix_limit[0],
+};
+
+static struct fimc_variant fimc2_variant_s5p = {
+	.has_cam_if	 = 1,
+	.min_inp_pixsize = 16,
+	.min_out_pixsize = 16,
+	.hor_offs_align	 = 8,
+	.min_vsize_align = 16,
+	.out_buf_count	 = 4,
+	.pix_limit	 = &s5p_pix_limit[1],
+};
+
+static struct fimc_variant fimc0_variant_s5pv210 = {
+	.pix_hoff	 = 1,
+	.has_inp_rot	 = 1,
+	.has_out_rot	 = 1,
+	.has_cam_if	 = 1,
+	.min_inp_pixsize = 16,
+	.min_out_pixsize = 16,
+	.hor_offs_align	 = 8,
+	.min_vsize_align = 16,
+	.out_buf_count	 = 4,
+	.pix_limit	 = &s5p_pix_limit[1],
+};
+
+static struct fimc_variant fimc1_variant_s5pv210 = {
+	.pix_hoff	 = 1,
+	.has_inp_rot	 = 1,
+	.has_out_rot	 = 1,
+	.has_cam_if	 = 1,
+	.has_mainscaler_ext = 1,
+	.min_inp_pixsize = 16,
+	.min_out_pixsize = 16,
+	.hor_offs_align	 = 1,
+	.min_vsize_align = 1,
+	.out_buf_count	 = 4,
+	.pix_limit	 = &s5p_pix_limit[2],
+};
+
+static struct fimc_variant fimc2_variant_s5pv210 = {
+	.has_cam_if	 = 1,
+	.pix_hoff	 = 1,
+	.min_inp_pixsize = 16,
+	.min_out_pixsize = 16,
+	.hor_offs_align	 = 8,
+	.min_vsize_align = 16,
+	.out_buf_count	 = 4,
+	.pix_limit	 = &s5p_pix_limit[2],
+};
+
+static struct fimc_variant fimc0_variant_exynos4 = {
+	.pix_hoff	 = 1,
+	.has_inp_rot	 = 1,
+	.has_out_rot	 = 1,
+	.has_cam_if	 = 1,
+	.has_cistatus2	 = 1,
+	.has_mainscaler_ext = 1,
+	.has_alpha	 = 1,
+	.min_inp_pixsize = 16,
+	.min_out_pixsize = 16,
+	.hor_offs_align	 = 2,
+	.min_vsize_align = 1,
+	.out_buf_count	 = 32,
+	.pix_limit	 = &s5p_pix_limit[1],
+};
+
+static struct fimc_variant fimc3_variant_exynos4 = {
+	.pix_hoff	 = 1,
+	.has_cam_if	 = 1,
+	.has_cistatus2	 = 1,
+	.has_mainscaler_ext = 1,
+	.has_alpha	 = 1,
+	.min_inp_pixsize = 16,
+	.min_out_pixsize = 16,
+	.hor_offs_align	 = 2,
+	.min_vsize_align = 1,
+	.out_buf_count	 = 32,
+	.pix_limit	 = &s5p_pix_limit[3],
+};
+
+/* S5PC100 */
+static struct fimc_drvdata fimc_drvdata_s5p = {
+	.variant = {
+		[0] = &fimc0_variant_s5p,
+		[1] = &fimc0_variant_s5p,
+		[2] = &fimc2_variant_s5p,
+	},
+	.num_entities = 3,
+	.lclk_frequency = 133000000UL,
+};
+
+/* S5PV210, S5PC110 */
+static struct fimc_drvdata fimc_drvdata_s5pv210 = {
+	.variant = {
+		[0] = &fimc0_variant_s5pv210,
+		[1] = &fimc1_variant_s5pv210,
+		[2] = &fimc2_variant_s5pv210,
+	},
+	.num_entities = 3,
+	.lclk_frequency = 166000000UL,
+};
+
+/* EXYNOS4210, S5PV310, S5PC210 */
+static struct fimc_drvdata fimc_drvdata_exynos4 = {
+	.variant = {
+		[0] = &fimc0_variant_exynos4,
+		[1] = &fimc0_variant_exynos4,
+		[2] = &fimc0_variant_exynos4,
+		[3] = &fimc3_variant_exynos4,
+	},
+	.num_entities = 4,
+	.lclk_frequency = 166000000UL,
+};
+
+static struct platform_device_id fimc_driver_ids[] = {
+	{
+		.name		= "s5p-fimc",
+		.driver_data	= (unsigned long)&fimc_drvdata_s5p,
+	}, {
+		.name		= "s5pv210-fimc",
+		.driver_data	= (unsigned long)&fimc_drvdata_s5pv210,
+	}, {
+		.name		= "exynos4-fimc",
+		.driver_data	= (unsigned long)&fimc_drvdata_exynos4,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+
+static const struct dev_pm_ops fimc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+	SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+static struct platform_driver fimc_driver = {
+	.probe		= fimc_probe,
+	.remove		= __devexit_p(fimc_remove),
+	.id_table	= fimc_driver_ids,
+	.driver = {
+		.name	= FIMC_MODULE_NAME,
+		.owner	= THIS_MODULE,
+		.pm     = &fimc_pm_ops,
+	}
+};
+
+int __init fimc_register_driver(void)
+{
+	return platform_driver_register(&fimc_driver);
+}
+
+void __exit fimc_unregister_driver(void)
+{
+	platform_driver_unregister(&fimc_driver);
+}
diff --git a/drivers/media/platform/s5p-fimc/fimc-core.h b/drivers/media/platform/s5p-fimc/fimc-core.h
new file mode 100644
index 0000000..808ccc6
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/fimc-core.h
@@ -0,0 +1,713 @@
+/*
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_CORE_H_
+#define FIMC_CORE_H_
+
+/*#define DEBUG*/
+
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/io.h>
+#include <asm/sizes.h>
+
+#include <media/media-entity.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-mediabus.h>
+#include <media/s5p_fimc.h>
+
+#define dbg(fmt, args...) \
+	pr_debug("%s:%d: " fmt "\n", __func__, __LINE__, ##args)
+
+/* Time to wait for next frame VSYNC interrupt while stopping operation. */
+#define FIMC_SHUTDOWN_TIMEOUT	((100*HZ)/1000)
+#define MAX_FIMC_CLOCKS		2
+#define FIMC_MODULE_NAME	"s5p-fimc"
+#define FIMC_MAX_DEVS		4
+#define FIMC_MAX_OUT_BUFS	4
+#define SCALER_MAX_HRATIO	64
+#define SCALER_MAX_VRATIO	64
+#define DMA_MIN_SIZE		8
+#define FIMC_CAMIF_MAX_HEIGHT	0x2000
+
+/* indices to the clocks array */
+enum {
+	CLK_BUS,
+	CLK_GATE,
+};
+
+enum fimc_dev_flags {
+	ST_LPM,
+	/* m2m node */
+	ST_M2M_RUN,
+	ST_M2M_PEND,
+	ST_M2M_SUSPENDING,
+	ST_M2M_SUSPENDED,
+	/* capture node */
+	ST_CAPT_PEND,
+	ST_CAPT_RUN,
+	ST_CAPT_STREAM,
+	ST_CAPT_ISP_STREAM,
+	ST_CAPT_SUSPENDED,
+	ST_CAPT_SHUT,
+	ST_CAPT_BUSY,
+	ST_CAPT_APPLY_CFG,
+	ST_CAPT_JPEG,
+};
+
+#define fimc_m2m_active(dev) test_bit(ST_M2M_RUN, &(dev)->state)
+#define fimc_m2m_pending(dev) test_bit(ST_M2M_PEND, &(dev)->state)
+
+#define fimc_capture_running(dev) test_bit(ST_CAPT_RUN, &(dev)->state)
+#define fimc_capture_pending(dev) test_bit(ST_CAPT_PEND, &(dev)->state)
+#define fimc_capture_busy(dev) test_bit(ST_CAPT_BUSY, &(dev)->state)
+
+enum fimc_datapath {
+	FIMC_IO_NONE,
+	FIMC_IO_CAMERA,
+	FIMC_IO_DMA,
+	FIMC_IO_LCDFIFO,
+	FIMC_IO_WRITEBACK,
+	FIMC_IO_ISP,
+};
+
+enum fimc_color_fmt {
+	FIMC_FMT_RGB444 = 0x10,
+	FIMC_FMT_RGB555,
+	FIMC_FMT_RGB565,
+	FIMC_FMT_RGB666,
+	FIMC_FMT_RGB888,
+	FIMC_FMT_RGB30_LOCAL,
+	FIMC_FMT_YCBCR420 = 0x20,
+	FIMC_FMT_YCBYCR422,
+	FIMC_FMT_YCRYCB422,
+	FIMC_FMT_CBYCRY422,
+	FIMC_FMT_CRYCBY422,
+	FIMC_FMT_YCBCR444_LOCAL,
+	FIMC_FMT_JPEG = 0x40,
+	FIMC_FMT_RAW8 = 0x80,
+	FIMC_FMT_RAW10,
+	FIMC_FMT_RAW12,
+};
+
+#define fimc_fmt_is_rgb(x) (!!((x) & 0x10))
+#define fimc_fmt_is_jpeg(x) (!!((x) & 0x40))
+
+#define IS_M2M(__strt) ((__strt) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE || \
+			__strt == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+
+/* The hardware context state. */
+#define	FIMC_PARAMS		(1 << 0)
+#define	FIMC_SRC_FMT		(1 << 3)
+#define	FIMC_DST_FMT		(1 << 4)
+#define	FIMC_COMPOSE		(1 << 5)
+#define	FIMC_CTX_M2M		(1 << 16)
+#define	FIMC_CTX_CAP		(1 << 17)
+#define	FIMC_CTX_SHUT		(1 << 18)
+
+/* Image conversion flags */
+#define	FIMC_IN_DMA_ACCESS_TILED	(1 << 0)
+#define	FIMC_IN_DMA_ACCESS_LINEAR	(0 << 0)
+#define	FIMC_OUT_DMA_ACCESS_TILED	(1 << 1)
+#define	FIMC_OUT_DMA_ACCESS_LINEAR	(0 << 1)
+#define	FIMC_SCAN_MODE_PROGRESSIVE	(0 << 2)
+#define	FIMC_SCAN_MODE_INTERLACED	(1 << 2)
+/*
+ * YCbCr data dynamic range for RGB-YUV color conversion.
+ * Y/Cb/Cr: (0 ~ 255) */
+#define	FIMC_COLOR_RANGE_WIDE		(0 << 3)
+/* Y (16 ~ 235), Cb/Cr (16 ~ 240) */
+#define	FIMC_COLOR_RANGE_NARROW		(1 << 3)
+
+/**
+ * struct fimc_fmt - the driver's internal color format data
+ * @mbus_code: Media Bus pixel code, -1 if not applicable
+ * @name: format description
+ * @fourcc: the fourcc code for this format, 0 if not applicable
+ * @color: the corresponding fimc_color_fmt
+ * @memplanes: number of physically non-contiguous data planes
+ * @colplanes: number of physically contiguous data planes
+ * @depth: per plane driver's private 'number of bits per pixel'
+ * @flags: flags indicating which operation mode format applies to
+ */
+struct fimc_fmt {
+	enum v4l2_mbus_pixelcode mbus_code;
+	char	*name;
+	u32	fourcc;
+	u32	color;
+	u16	memplanes;
+	u16	colplanes;
+	u8	depth[VIDEO_MAX_PLANES];
+	u16	flags;
+#define FMT_FLAGS_CAM		(1 << 0)
+#define FMT_FLAGS_M2M_IN	(1 << 1)
+#define FMT_FLAGS_M2M_OUT	(1 << 2)
+#define FMT_FLAGS_M2M		(1 << 1 | 1 << 2)
+#define FMT_HAS_ALPHA		(1 << 3)
+};
+
+/**
+ * struct fimc_dma_offset - pixel offset information for DMA
+ * @y_h:	y value horizontal offset
+ * @y_v:	y value vertical offset
+ * @cb_h:	cb value horizontal offset
+ * @cb_v:	cb value vertical offset
+ * @cr_h:	cr value horizontal offset
+ * @cr_v:	cr value vertical offset
+ */
+struct fimc_dma_offset {
+	int	y_h;
+	int	y_v;
+	int	cb_h;
+	int	cb_v;
+	int	cr_h;
+	int	cr_v;
+};
+
+/**
+ * struct fimc_effect - color effect information
+ * @type:	effect type
+ * @pat_cb:	cr value when type is "arbitrary"
+ * @pat_cr:	cr value when type is "arbitrary"
+ */
+struct fimc_effect {
+	u32	type;
+	u8	pat_cb;
+	u8	pat_cr;
+};
+
+/**
+ * struct fimc_scaler - the configuration data for FIMC inetrnal scaler
+ * @scaleup_h:		flag indicating scaling up horizontally
+ * @scaleup_v:		flag indicating scaling up vertically
+ * @copy_mode:		flag indicating transparent DMA transfer (no scaling
+ *			and color format conversion)
+ * @enabled:		flag indicating if the scaler is used
+ * @hfactor:		horizontal shift factor
+ * @vfactor:		vertical shift factor
+ * @pre_hratio:		horizontal ratio of the prescaler
+ * @pre_vratio:		vertical ratio of the prescaler
+ * @pre_dst_width:	the prescaler's destination width
+ * @pre_dst_height:	the prescaler's destination height
+ * @main_hratio:	the main scaler's horizontal ratio
+ * @main_vratio:	the main scaler's vertical ratio
+ * @real_width:		source pixel (width - offset)
+ * @real_height:	source pixel (height - offset)
+ */
+struct fimc_scaler {
+	unsigned int scaleup_h:1;
+	unsigned int scaleup_v:1;
+	unsigned int copy_mode:1;
+	unsigned int enabled:1;
+	u32	hfactor;
+	u32	vfactor;
+	u32	pre_hratio;
+	u32	pre_vratio;
+	u32	pre_dst_width;
+	u32	pre_dst_height;
+	u32	main_hratio;
+	u32	main_vratio;
+	u32	real_width;
+	u32	real_height;
+};
+
+/**
+ * struct fimc_addr - the FIMC physical address set for DMA
+ * @y:	 luminance plane physical address
+ * @cb:	 Cb plane physical address
+ * @cr:	 Cr plane physical address
+ */
+struct fimc_addr {
+	u32	y;
+	u32	cb;
+	u32	cr;
+};
+
+/**
+ * struct fimc_vid_buffer - the driver's video buffer
+ * @vb:    v4l videobuf buffer
+ * @list:  linked list structure for buffer queue
+ * @paddr: precalculated physical address set
+ * @index: buffer index for the output DMA engine
+ */
+struct fimc_vid_buffer {
+	struct vb2_buffer	vb;
+	struct list_head	list;
+	struct fimc_addr	paddr;
+	int			index;
+};
+
+/**
+ * struct fimc_frame - source/target frame properties
+ * @f_width:	image full width (virtual screen size)
+ * @f_height:	image full height (virtual screen size)
+ * @o_width:	original image width as set by S_FMT
+ * @o_height:	original image height as set by S_FMT
+ * @offs_h:	image horizontal pixel offset
+ * @offs_v:	image vertical pixel offset
+ * @width:	image pixel width
+ * @height:	image pixel weight
+ * @payload:	image size in bytes (w x h x bpp)
+ * @paddr:	image frame buffer physical addresses
+ * @dma_offset:	DMA offset in bytes
+ * @fmt:	fimc color format pointer
+ */
+struct fimc_frame {
+	u32	f_width;
+	u32	f_height;
+	u32	o_width;
+	u32	o_height;
+	u32	offs_h;
+	u32	offs_v;
+	u32	width;
+	u32	height;
+	unsigned long		payload[VIDEO_MAX_PLANES];
+	struct fimc_addr	paddr;
+	struct fimc_dma_offset	dma_offset;
+	struct fimc_fmt		*fmt;
+	u8			alpha;
+};
+
+/**
+ * struct fimc_m2m_device - v4l2 memory-to-memory device data
+ * @vfd: the video device node for v4l2 m2m mode
+ * @m2m_dev: v4l2 memory-to-memory device data
+ * @ctx: hardware context data
+ * @refcnt: the reference counter
+ */
+struct fimc_m2m_device {
+	struct video_device	*vfd;
+	struct v4l2_m2m_dev	*m2m_dev;
+	struct fimc_ctx		*ctx;
+	int			refcnt;
+};
+
+#define FIMC_SD_PAD_SINK	0
+#define FIMC_SD_PAD_SOURCE	1
+#define FIMC_SD_PADS_NUM	2
+
+/**
+ * struct fimc_vid_cap - camera capture device information
+ * @ctx: hardware context data
+ * @vfd: video device node for camera capture mode
+ * @subdev: subdev exposing the FIMC processing block
+ * @vd_pad: fimc video capture node pad
+ * @sd_pads: fimc video processing block pads
+ * @mf: media bus format at the FIMC camera input (and the scaler output) pad
+ * @pending_buf_q: the pending buffer queue head
+ * @active_buf_q: the queue head of buffers scheduled in hardware
+ * @vbq: the capture am video buffer queue
+ * @active_buf_cnt: number of video buffers scheduled in hardware
+ * @buf_index: index for managing the output DMA buffers
+ * @frame_count: the frame counter for statistics
+ * @reqbufs_count: the number of buffers requested in REQBUFS ioctl
+ * @input_index: input (camera sensor) index
+ * @refcnt: driver's private reference counter
+ * @input: capture input type, grp_id of the attached subdev
+ * @user_subdev_api: true if subdevs are not configured by the host driver
+ */
+struct fimc_vid_cap {
+	struct fimc_ctx			*ctx;
+	struct vb2_alloc_ctx		*alloc_ctx;
+	struct video_device		*vfd;
+	struct v4l2_subdev		subdev;
+	struct media_pad		vd_pad;
+	struct v4l2_mbus_framefmt	mf;
+	struct media_pad		sd_pads[FIMC_SD_PADS_NUM];
+	struct list_head		pending_buf_q;
+	struct list_head		active_buf_q;
+	struct vb2_queue		vbq;
+	int				active_buf_cnt;
+	int				buf_index;
+	unsigned int			frame_count;
+	unsigned int			reqbufs_count;
+	int				input_index;
+	int				refcnt;
+	u32				input;
+	bool				user_subdev_api;
+};
+
+/**
+ *  struct fimc_pix_limit - image pixel size limits in various IP configurations
+ *
+ *  @scaler_en_w: max input pixel width when the scaler is enabled
+ *  @scaler_dis_w: max input pixel width when the scaler is disabled
+ *  @in_rot_en_h: max input width with the input rotator is on
+ *  @in_rot_dis_w: max input width with the input rotator is off
+ *  @out_rot_en_w: max output width with the output rotator on
+ *  @out_rot_dis_w: max output width with the output rotator off
+ */
+struct fimc_pix_limit {
+	u16 scaler_en_w;
+	u16 scaler_dis_w;
+	u16 in_rot_en_h;
+	u16 in_rot_dis_w;
+	u16 out_rot_en_w;
+	u16 out_rot_dis_w;
+};
+
+/**
+ * struct fimc_variant - FIMC device variant information
+ * @pix_hoff: indicate whether horizontal offset is in pixels or in bytes
+ * @has_inp_rot: set if has input rotator
+ * @has_out_rot: set if has output rotator
+ * @has_cistatus2: 1 if CISTATUS2 register is present in this IP revision
+ * @has_mainscaler_ext: 1 if extended mainscaler ratios in CIEXTEN register
+ *			 are present in this IP revision
+ * @has_cam_if: set if this instance has a camera input interface
+ * @pix_limit: pixel size constraints for the scaler
+ * @min_inp_pixsize: minimum input pixel size
+ * @min_out_pixsize: minimum output pixel size
+ * @hor_offs_align: horizontal pixel offset aligment
+ * @min_vsize_align: minimum vertical pixel size alignment
+ * @out_buf_count: the number of buffers in output DMA sequence
+ */
+struct fimc_variant {
+	unsigned int	pix_hoff:1;
+	unsigned int	has_inp_rot:1;
+	unsigned int	has_out_rot:1;
+	unsigned int	has_cistatus2:1;
+	unsigned int	has_mainscaler_ext:1;
+	unsigned int	has_cam_if:1;
+	unsigned int	has_alpha:1;
+	struct fimc_pix_limit *pix_limit;
+	u16		min_inp_pixsize;
+	u16		min_out_pixsize;
+	u16		hor_offs_align;
+	u16		min_vsize_align;
+	u16		out_buf_count;
+};
+
+/**
+ * struct fimc_drvdata - per device type driver data
+ * @variant: variant information for this device
+ * @num_entities: number of fimc instances available in a SoC
+ * @lclk_frequency: local bus clock frequency
+ */
+struct fimc_drvdata {
+	struct fimc_variant *variant[FIMC_MAX_DEVS];
+	int num_entities;
+	unsigned long lclk_frequency;
+};
+
+#define fimc_get_drvdata(_pdev) \
+	((struct fimc_drvdata *) platform_get_device_id(_pdev)->driver_data)
+
+struct fimc_ctx;
+
+/**
+ * struct fimc_dev - abstraction for FIMC entity
+ * @slock:	the spinlock protecting this data structure
+ * @lock:	the mutex protecting this data structure
+ * @pdev:	pointer to the FIMC platform device
+ * @pdata:	pointer to the device platform data
+ * @variant:	the IP variant information
+ * @id:		FIMC device index (0..FIMC_MAX_DEVS)
+ * @clock:	clocks required for FIMC operation
+ * @regs:	the mapped hardware registers
+ * @irq_queue:	interrupt handler waitqueue
+ * @v4l2_dev:	root v4l2_device
+ * @m2m:	memory-to-memory V4L2 device information
+ * @vid_cap:	camera capture device information
+ * @state:	flags used to synchronize m2m and capture mode operation
+ * @alloc_ctx:	videobuf2 memory allocator context
+ * @pipeline:	fimc video capture pipeline data structure
+ */
+struct fimc_dev {
+	spinlock_t			slock;
+	struct mutex			lock;
+	struct platform_device		*pdev;
+	struct s5p_platform_fimc	*pdata;
+	struct fimc_variant		*variant;
+	u16				id;
+	struct clk			*clock[MAX_FIMC_CLOCKS];
+	void __iomem			*regs;
+	wait_queue_head_t		irq_queue;
+	struct v4l2_device		*v4l2_dev;
+	struct fimc_m2m_device		m2m;
+	struct fimc_vid_cap		vid_cap;
+	unsigned long			state;
+	struct vb2_alloc_ctx		*alloc_ctx;
+	struct fimc_pipeline		pipeline;
+};
+
+/**
+ * struct fimc_ctrls - v4l2 controls structure
+ * @handler: the control handler
+ * @colorfx: image effect control
+ * @colorfx_cbcr: Cb/Cr coefficients control
+ * @rotate: image rotation control
+ * @hflip: horizontal flip control
+ * @vflip: vertical flip control
+ * @alpha: RGB alpha control
+ * @ready: true if @handler is initialized
+ */
+struct fimc_ctrls {
+	struct v4l2_ctrl_handler handler;
+	struct {
+		struct v4l2_ctrl *colorfx;
+		struct v4l2_ctrl *colorfx_cbcr;
+	};
+	struct v4l2_ctrl *rotate;
+	struct v4l2_ctrl *hflip;
+	struct v4l2_ctrl *vflip;
+	struct v4l2_ctrl *alpha;
+	bool ready;
+};
+
+/**
+ * fimc_ctx - the device context data
+ * @s_frame:		source frame properties
+ * @d_frame:		destination frame properties
+ * @out_order_1p:	output 1-plane YCBCR order
+ * @out_order_2p:	output 2-plane YCBCR order
+ * @in_order_1p		input 1-plane YCBCR order
+ * @in_order_2p:	input 2-plane YCBCR order
+ * @in_path:		input mode (DMA or camera)
+ * @out_path:		output mode (DMA or FIFO)
+ * @scaler:		image scaler properties
+ * @effect:		image effect
+ * @rotation:		image clockwise rotation in degrees
+ * @hflip:		indicates image horizontal flip if set
+ * @vflip:		indicates image vertical flip if set
+ * @flags:		additional flags for image conversion
+ * @state:		flags to keep track of user configuration
+ * @fimc_dev:		the FIMC device this context applies to
+ * @m2m_ctx:		memory-to-memory device context
+ * @fh:			v4l2 file handle
+ * @ctrls:		v4l2 controls structure
+ */
+struct fimc_ctx {
+	struct fimc_frame	s_frame;
+	struct fimc_frame	d_frame;
+	u32			out_order_1p;
+	u32			out_order_2p;
+	u32			in_order_1p;
+	u32			in_order_2p;
+	enum fimc_datapath	in_path;
+	enum fimc_datapath	out_path;
+	struct fimc_scaler	scaler;
+	struct fimc_effect	effect;
+	int			rotation;
+	unsigned int		hflip:1;
+	unsigned int		vflip:1;
+	u32			flags;
+	u32			state;
+	struct fimc_dev		*fimc_dev;
+	struct v4l2_m2m_ctx	*m2m_ctx;
+	struct v4l2_fh		fh;
+	struct fimc_ctrls	ctrls;
+};
+
+#define fh_to_ctx(__fh) container_of(__fh, struct fimc_ctx, fh)
+
+static inline void set_frame_bounds(struct fimc_frame *f, u32 width, u32 height)
+{
+	f->o_width  = width;
+	f->o_height = height;
+	f->f_width  = width;
+	f->f_height = height;
+}
+
+static inline void set_frame_crop(struct fimc_frame *f,
+				  u32 left, u32 top, u32 width, u32 height)
+{
+	f->offs_h = left;
+	f->offs_v = top;
+	f->width  = width;
+	f->height = height;
+}
+
+static inline u32 fimc_get_format_depth(struct fimc_fmt *ff)
+{
+	u32 i, depth = 0;
+
+	if (ff != NULL)
+		for (i = 0; i < ff->colplanes; i++)
+			depth += ff->depth[i];
+	return depth;
+}
+
+static inline bool fimc_capture_active(struct fimc_dev *fimc)
+{
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(&fimc->slock, flags);
+	ret = !!(fimc->state & (1 << ST_CAPT_RUN) ||
+		 fimc->state & (1 << ST_CAPT_PEND));
+	spin_unlock_irqrestore(&fimc->slock, flags);
+	return ret;
+}
+
+static inline void fimc_ctx_state_set(u32 state, struct fimc_ctx *ctx)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
+	ctx->state |= state;
+	spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
+}
+
+static inline bool fimc_ctx_state_is_set(u32 mask, struct fimc_ctx *ctx)
+{
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
+	ret = (ctx->state & mask) == mask;
+	spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
+	return ret;
+}
+
+static inline int tiled_fmt(struct fimc_fmt *fmt)
+{
+	return fmt->fourcc == V4L2_PIX_FMT_NV12MT;
+}
+
+/* Return the alpha component bit mask */
+static inline int fimc_get_alpha_mask(struct fimc_fmt *fmt)
+{
+	switch (fmt->color) {
+	case FIMC_FMT_RGB444:	return 0x0f;
+	case FIMC_FMT_RGB555:	return 0x01;
+	case FIMC_FMT_RGB888:	return 0xff;
+	default:		return 0;
+	};
+}
+
+static inline struct fimc_frame *ctx_get_frame(struct fimc_ctx *ctx,
+					       enum v4l2_buf_type type)
+{
+	struct fimc_frame *frame;
+
+	if (V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE == type) {
+		if (fimc_ctx_state_is_set(FIMC_CTX_M2M, ctx))
+			frame = &ctx->s_frame;
+		else
+			return ERR_PTR(-EINVAL);
+	} else if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == type) {
+		frame = &ctx->d_frame;
+	} else {
+		v4l2_err(ctx->fimc_dev->v4l2_dev,
+			"Wrong buffer/video queue type (%d)\n", type);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return frame;
+}
+
+/* -----------------------------------------------------*/
+/* fimc-core.c */
+int fimc_vidioc_enum_fmt_mplane(struct file *file, void *priv,
+				struct v4l2_fmtdesc *f);
+int fimc_ctrls_create(struct fimc_ctx *ctx);
+void fimc_ctrls_delete(struct fimc_ctx *ctx);
+void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active);
+void fimc_alpha_ctrl_update(struct fimc_ctx *ctx);
+int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f);
+void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
+			       struct v4l2_pix_format_mplane *pix);
+struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
+				  unsigned int mask, int index);
+struct fimc_fmt *fimc_get_format(unsigned int index);
+
+int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
+			    int dw, int dh, int rotation);
+int fimc_set_scaler_info(struct fimc_ctx *ctx);
+int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags);
+int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
+		      struct fimc_frame *frame, struct fimc_addr *paddr);
+void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f);
+void fimc_set_yuv_order(struct fimc_ctx *ctx);
+void fimc_fill_frame(struct fimc_frame *frame, struct v4l2_format *f);
+void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf);
+
+int fimc_register_m2m_device(struct fimc_dev *fimc,
+			     struct v4l2_device *v4l2_dev);
+void fimc_unregister_m2m_device(struct fimc_dev *fimc);
+int fimc_register_driver(void);
+void fimc_unregister_driver(void);
+
+/* -----------------------------------------------------*/
+/* fimc-m2m.c */
+void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state);
+
+/* -----------------------------------------------------*/
+/* fimc-capture.c					*/
+int fimc_initialize_capture_subdev(struct fimc_dev *fimc);
+void fimc_unregister_capture_subdev(struct fimc_dev *fimc);
+int fimc_capture_ctrls_create(struct fimc_dev *fimc);
+void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
+			void *arg);
+int fimc_capture_suspend(struct fimc_dev *fimc);
+int fimc_capture_resume(struct fimc_dev *fimc);
+
+/*
+ * Buffer list manipulation functions. Must be called with fimc.slock held.
+ */
+
+/**
+ * fimc_active_queue_add - add buffer to the capture active buffers queue
+ * @buf: buffer to add to the active buffers list
+ */
+static inline void fimc_active_queue_add(struct fimc_vid_cap *vid_cap,
+					 struct fimc_vid_buffer *buf)
+{
+	list_add_tail(&buf->list, &vid_cap->active_buf_q);
+	vid_cap->active_buf_cnt++;
+}
+
+/**
+ * fimc_active_queue_pop - pop buffer from the capture active buffers queue
+ *
+ * The caller must assure the active_buf_q list is not empty.
+ */
+static inline struct fimc_vid_buffer *fimc_active_queue_pop(
+				    struct fimc_vid_cap *vid_cap)
+{
+	struct fimc_vid_buffer *buf;
+	buf = list_entry(vid_cap->active_buf_q.next,
+			 struct fimc_vid_buffer, list);
+	list_del(&buf->list);
+	vid_cap->active_buf_cnt--;
+	return buf;
+}
+
+/**
+ * fimc_pending_queue_add - add buffer to the capture pending buffers queue
+ * @buf: buffer to add to the pending buffers list
+ */
+static inline void fimc_pending_queue_add(struct fimc_vid_cap *vid_cap,
+					  struct fimc_vid_buffer *buf)
+{
+	list_add_tail(&buf->list, &vid_cap->pending_buf_q);
+}
+
+/**
+ * fimc_pending_queue_pop - pop buffer from the capture pending buffers queue
+ *
+ * The caller must assure the pending_buf_q list is not empty.
+ */
+static inline struct fimc_vid_buffer *fimc_pending_queue_pop(
+				     struct fimc_vid_cap *vid_cap)
+{
+	struct fimc_vid_buffer *buf;
+	buf = list_entry(vid_cap->pending_buf_q.next,
+			struct fimc_vid_buffer, list);
+	list_del(&buf->list);
+	return buf;
+}
+
+#endif /* FIMC_CORE_H_ */
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite-reg.c b/drivers/media/platform/s5p-fimc/fimc-lite-reg.c
new file mode 100644
index 0000000..f996e94
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/fimc-lite-reg.c
@@ -0,0 +1,300 @@
+/*
+ * Register interface file for EXYNOS FIMC-LITE (camera interface) driver
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <media/s5p_fimc.h>
+
+#include "fimc-lite-reg.h"
+#include "fimc-lite.h"
+#include "fimc-core.h"
+
+#define FLITE_RESET_TIMEOUT 50 /* in ms */
+
+void flite_hw_reset(struct fimc_lite *dev)
+{
+	unsigned long end = jiffies + msecs_to_jiffies(FLITE_RESET_TIMEOUT);
+	u32 cfg;
+
+	cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+	cfg |= FLITE_REG_CIGCTRL_SWRST_REQ;
+	writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+	while (time_is_after_jiffies(end)) {
+		cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+		if (cfg & FLITE_REG_CIGCTRL_SWRST_RDY)
+			break;
+		usleep_range(1000, 5000);
+	}
+
+	cfg |= FLITE_REG_CIGCTRL_SWRST;
+	writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+void flite_hw_clear_pending_irq(struct fimc_lite *dev)
+{
+	u32 cfg = readl(dev->regs + FLITE_REG_CISTATUS);
+	cfg &= ~FLITE_REG_CISTATUS_IRQ_CAM;
+	writel(cfg, dev->regs + FLITE_REG_CISTATUS);
+}
+
+u32 flite_hw_get_interrupt_source(struct fimc_lite *dev)
+{
+	u32 intsrc = readl(dev->regs + FLITE_REG_CISTATUS);
+	return intsrc & FLITE_REG_CISTATUS_IRQ_MASK;
+}
+
+void flite_hw_clear_last_capture_end(struct fimc_lite *dev)
+{
+
+	u32 cfg = readl(dev->regs + FLITE_REG_CISTATUS2);
+	cfg &= ~FLITE_REG_CISTATUS2_LASTCAPEND;
+	writel(cfg, dev->regs + FLITE_REG_CISTATUS2);
+}
+
+void flite_hw_set_interrupt_mask(struct fimc_lite *dev)
+{
+	u32 cfg, intsrc;
+
+	/* Select interrupts to be enabled for each output mode */
+	if (dev->out_path == FIMC_IO_DMA) {
+		intsrc = FLITE_REG_CIGCTRL_IRQ_OVFEN |
+			 FLITE_REG_CIGCTRL_IRQ_LASTEN |
+			 FLITE_REG_CIGCTRL_IRQ_STARTEN;
+	} else {
+		/* An output to the FIMC-IS */
+		intsrc = FLITE_REG_CIGCTRL_IRQ_OVFEN |
+			 FLITE_REG_CIGCTRL_IRQ_LASTEN;
+	}
+
+	cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+	cfg |= FLITE_REG_CIGCTRL_IRQ_DISABLE_MASK;
+	cfg &= ~intsrc;
+	writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+void flite_hw_capture_start(struct fimc_lite *dev)
+{
+	u32 cfg = readl(dev->regs + FLITE_REG_CIIMGCPT);
+	cfg |= FLITE_REG_CIIMGCPT_IMGCPTEN;
+	writel(cfg, dev->regs + FLITE_REG_CIIMGCPT);
+}
+
+void flite_hw_capture_stop(struct fimc_lite *dev)
+{
+	u32 cfg = readl(dev->regs + FLITE_REG_CIIMGCPT);
+	cfg &= ~FLITE_REG_CIIMGCPT_IMGCPTEN;
+	writel(cfg, dev->regs + FLITE_REG_CIIMGCPT);
+}
+
+/*
+ * Test pattern (color bars) enable/disable. External sensor
+ * pixel clock must be active for the test pattern to work.
+ */
+void flite_hw_set_test_pattern(struct fimc_lite *dev, bool on)
+{
+	u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+	if (on)
+		cfg |= FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR;
+	else
+		cfg &= ~FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR;
+	writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+static const u32 src_pixfmt_map[8][3] = {
+	{ V4L2_MBUS_FMT_YUYV8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR,
+	  FLITE_REG_CIGCTRL_YUV422_1P },
+	{ V4L2_MBUS_FMT_YVYU8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB,
+	  FLITE_REG_CIGCTRL_YUV422_1P },
+	{ V4L2_MBUS_FMT_UYVY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY,
+	  FLITE_REG_CIGCTRL_YUV422_1P },
+	{ V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY,
+	  FLITE_REG_CIGCTRL_YUV422_1P },
+	{ V4L2_PIX_FMT_SGRBG8, 0, FLITE_REG_CIGCTRL_RAW8 },
+	{ V4L2_PIX_FMT_SGRBG10, 0, FLITE_REG_CIGCTRL_RAW10 },
+	{ V4L2_PIX_FMT_SGRBG12, 0, FLITE_REG_CIGCTRL_RAW12 },
+	{ V4L2_MBUS_FMT_JPEG_1X8, 0, FLITE_REG_CIGCTRL_USER(1) },
+};
+
+/* Set camera input pixel format and resolution */
+void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
+{
+	enum v4l2_mbus_pixelcode pixelcode = dev->fmt->mbus_code;
+	unsigned int i = ARRAY_SIZE(src_pixfmt_map);
+	u32 cfg;
+
+	while (i-- >= 0) {
+		if (src_pixfmt_map[i][0] == pixelcode)
+			break;
+	}
+
+	if (i == 0 && src_pixfmt_map[i][0] != pixelcode) {
+		v4l2_err(dev->vfd,
+			 "Unsupported pixel code, falling back to %#08x\n",
+			 src_pixfmt_map[i][0]);
+	}
+
+	cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+	cfg &= ~FLITE_REG_CIGCTRL_FMT_MASK;
+	cfg |= src_pixfmt_map[i][2];
+	writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+	cfg = readl(dev->regs + FLITE_REG_CISRCSIZE);
+	cfg &= ~(FLITE_REG_CISRCSIZE_ORDER422_MASK |
+		 FLITE_REG_CISRCSIZE_SIZE_CAM_MASK);
+	cfg |= (f->f_width << 16) | f->f_height;
+	cfg |= src_pixfmt_map[i][1];
+	writel(cfg, dev->regs + FLITE_REG_CISRCSIZE);
+}
+
+/* Set the camera host input window offsets (cropping) */
+void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f)
+{
+	u32 hoff2, voff2;
+	u32 cfg;
+
+	cfg = readl(dev->regs + FLITE_REG_CIWDOFST);
+	cfg &= ~FLITE_REG_CIWDOFST_OFST_MASK;
+	cfg |= (f->rect.left << 16) | f->rect.top;
+	cfg |= FLITE_REG_CIWDOFST_WINOFSEN;
+	writel(cfg, dev->regs + FLITE_REG_CIWDOFST);
+
+	hoff2 = f->f_width - f->rect.width - f->rect.left;
+	voff2 = f->f_height - f->rect.height - f->rect.top;
+
+	cfg = (hoff2 << 16) | voff2;
+	writel(cfg, dev->regs + FLITE_REG_CIWDOFST2);
+}
+
+/* Select camera port (A, B) */
+static void flite_hw_set_camera_port(struct fimc_lite *dev, int id)
+{
+	u32 cfg = readl(dev->regs + FLITE_REG_CIGENERAL);
+	if (id == 0)
+		cfg &= ~FLITE_REG_CIGENERAL_CAM_B;
+	else
+		cfg |= FLITE_REG_CIGENERAL_CAM_B;
+	writel(cfg, dev->regs + FLITE_REG_CIGENERAL);
+}
+
+/* Select serial or parallel bus, camera port (A,B) and set signals polarity */
+void flite_hw_set_camera_bus(struct fimc_lite *dev,
+			     struct s5p_fimc_isp_info *s_info)
+{
+	u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+	unsigned int flags = s_info->flags;
+
+	if (s_info->bus_type != FIMC_MIPI_CSI2) {
+		cfg &= ~(FLITE_REG_CIGCTRL_SELCAM_MIPI |
+			 FLITE_REG_CIGCTRL_INVPOLPCLK |
+			 FLITE_REG_CIGCTRL_INVPOLVSYNC |
+			 FLITE_REG_CIGCTRL_INVPOLHREF);
+
+		if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+			cfg |= FLITE_REG_CIGCTRL_INVPOLPCLK;
+
+		if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+			cfg |= FLITE_REG_CIGCTRL_INVPOLVSYNC;
+
+		if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+			cfg |= FLITE_REG_CIGCTRL_INVPOLHREF;
+	} else {
+		cfg |= FLITE_REG_CIGCTRL_SELCAM_MIPI;
+	}
+
+	writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+	flite_hw_set_camera_port(dev, s_info->mux_id);
+}
+
+static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
+{
+	static const u32 pixcode[4][2] = {
+		{ V4L2_MBUS_FMT_YUYV8_2X8, FLITE_REG_CIODMAFMT_YCBYCR },
+		{ V4L2_MBUS_FMT_YVYU8_2X8, FLITE_REG_CIODMAFMT_YCRYCB },
+		{ V4L2_MBUS_FMT_UYVY8_2X8, FLITE_REG_CIODMAFMT_CBYCRY },
+		{ V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY },
+	};
+	u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
+	unsigned int i = ARRAY_SIZE(pixcode);
+
+	while (i-- >= 0)
+		if (pixcode[i][0] == dev->fmt->mbus_code)
+			break;
+	cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK;
+	writel(cfg | pixcode[i][1], dev->regs + FLITE_REG_CIODMAFMT);
+}
+
+void flite_hw_set_dma_window(struct fimc_lite *dev, struct flite_frame *f)
+{
+	u32 cfg;
+
+	/* Maximum output pixel size */
+	cfg = readl(dev->regs + FLITE_REG_CIOCAN);
+	cfg &= ~FLITE_REG_CIOCAN_MASK;
+	cfg = (f->f_height << 16) | f->f_width;
+	writel(cfg, dev->regs + FLITE_REG_CIOCAN);
+
+	/* DMA offsets */
+	cfg = readl(dev->regs + FLITE_REG_CIOOFF);
+	cfg &= ~FLITE_REG_CIOOFF_MASK;
+	cfg |= (f->rect.top << 16) | f->rect.left;
+	writel(cfg, dev->regs + FLITE_REG_CIOOFF);
+}
+
+/* Enable/disable output DMA, set output pixel size and offsets (composition) */
+void flite_hw_set_output_dma(struct fimc_lite *dev, struct flite_frame *f,
+			     bool enable)
+{
+	u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+
+	if (!enable) {
+		cfg |= FLITE_REG_CIGCTRL_ODMA_DISABLE;
+		writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+		return;
+	}
+
+	cfg &= ~FLITE_REG_CIGCTRL_ODMA_DISABLE;
+	writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+	flite_hw_set_out_order(dev, f);
+	flite_hw_set_dma_window(dev, f);
+}
+
+void flite_hw_dump_regs(struct fimc_lite *dev, const char *label)
+{
+	struct {
+		u32 offset;
+		const char * const name;
+	} registers[] = {
+		{ 0x00, "CISRCSIZE" },
+		{ 0x04, "CIGCTRL" },
+		{ 0x08, "CIIMGCPT" },
+		{ 0x0c, "CICPTSEQ" },
+		{ 0x10, "CIWDOFST" },
+		{ 0x14, "CIWDOFST2" },
+		{ 0x18, "CIODMAFMT" },
+		{ 0x20, "CIOCAN" },
+		{ 0x24, "CIOOFF" },
+		{ 0x30, "CIOSA" },
+		{ 0x40, "CISTATUS" },
+		{ 0x44, "CISTATUS2" },
+		{ 0xf0, "CITHOLD" },
+		{ 0xfc, "CIGENERAL" },
+	};
+	u32 i;
+
+	pr_info("--- %s ---\n", label);
+	for (i = 0; i < ARRAY_SIZE(registers); i++) {
+		u32 cfg = readl(dev->regs + registers[i].offset);
+		pr_info("%s: %s:\t0x%08x\n", __func__, registers[i].name, cfg);
+	}
+}
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite-reg.h b/drivers/media/platform/s5p-fimc/fimc-lite-reg.h
new file mode 100644
index 0000000..adb9e9e
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/fimc-lite-reg.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_LITE_REG_H_
+#define FIMC_LITE_REG_H_
+
+#include "fimc-lite.h"
+
+/* Camera Source size */
+#define FLITE_REG_CISRCSIZE			0x00
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR	(0 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB	(1 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY	(2 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY	(3 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_MASK	(0x3 << 14)
+#define FLITE_REG_CISRCSIZE_SIZE_CAM_MASK	(0x3fff << 16 | 0x3fff)
+
+/* Global control */
+#define FLITE_REG_CIGCTRL			0x04
+#define FLITE_REG_CIGCTRL_YUV422_1P		(0x1e << 24)
+#define FLITE_REG_CIGCTRL_RAW8			(0x2a << 24)
+#define FLITE_REG_CIGCTRL_RAW10			(0x2b << 24)
+#define FLITE_REG_CIGCTRL_RAW12			(0x2c << 24)
+#define FLITE_REG_CIGCTRL_RAW14			(0x2d << 24)
+/* User defined formats. x = 0...15 */
+#define FLITE_REG_CIGCTRL_USER(x)		((0x30 + x - 1) << 24)
+#define FLITE_REG_CIGCTRL_FMT_MASK		(0x3f << 24)
+#define FLITE_REG_CIGCTRL_SHADOWMASK_DISABLE	(1 << 21)
+#define FLITE_REG_CIGCTRL_ODMA_DISABLE		(1 << 20)
+#define FLITE_REG_CIGCTRL_SWRST_REQ		(1 << 19)
+#define FLITE_REG_CIGCTRL_SWRST_RDY		(1 << 18)
+#define FLITE_REG_CIGCTRL_SWRST			(1 << 17)
+#define FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR	(1 << 15)
+#define FLITE_REG_CIGCTRL_INVPOLPCLK		(1 << 14)
+#define FLITE_REG_CIGCTRL_INVPOLVSYNC		(1 << 13)
+#define FLITE_REG_CIGCTRL_INVPOLHREF		(1 << 12)
+/* Interrupts mask bits (1 disables an interrupt) */
+#define FLITE_REG_CIGCTRL_IRQ_LASTEN		(1 << 8)
+#define FLITE_REG_CIGCTRL_IRQ_ENDEN		(1 << 7)
+#define FLITE_REG_CIGCTRL_IRQ_STARTEN		(1 << 6)
+#define FLITE_REG_CIGCTRL_IRQ_OVFEN		(1 << 5)
+#define FLITE_REG_CIGCTRL_IRQ_DISABLE_MASK	(0xf << 5)
+#define FLITE_REG_CIGCTRL_SELCAM_MIPI		(1 << 3)
+
+/* Image Capture Enable */
+#define FLITE_REG_CIIMGCPT			0x08
+#define FLITE_REG_CIIMGCPT_IMGCPTEN		(1 << 31)
+#define FLITE_REG_CIIMGCPT_CPT_FREN		(1 << 25)
+#define FLITE_REG_CIIMGCPT_CPT_MOD_FRCNT	(1 << 18)
+#define FLITE_REG_CIIMGCPT_CPT_MOD_FREN		(0 << 18)
+
+/* Capture Sequence */
+#define FLITE_REG_CICPTSEQ			0x0c
+
+/* Camera Window Offset */
+#define FLITE_REG_CIWDOFST			0x10
+#define FLITE_REG_CIWDOFST_WINOFSEN		(1 << 31)
+#define FLITE_REG_CIWDOFST_CLROVIY		(1 << 31)
+#define FLITE_REG_CIWDOFST_CLROVFICB		(1 << 15)
+#define FLITE_REG_CIWDOFST_CLROVFICR		(1 << 14)
+#define FLITE_REG_CIWDOFST_OFST_MASK		((0x1fff << 16) | 0x1fff)
+
+/* Camera Window Offset2 */
+#define FLITE_REG_CIWDOFST2			0x14
+
+/* Camera Output DMA Format */
+#define FLITE_REG_CIODMAFMT			0x18
+#define FLITE_REG_CIODMAFMT_RAW_CON		(1 << 15)
+#define FLITE_REG_CIODMAFMT_PACK12		(1 << 14)
+#define FLITE_REG_CIODMAFMT_CRYCBY		(0 << 4)
+#define FLITE_REG_CIODMAFMT_CBYCRY		(1 << 4)
+#define FLITE_REG_CIODMAFMT_YCRYCB		(2 << 4)
+#define FLITE_REG_CIODMAFMT_YCBYCR		(3 << 4)
+#define FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK	(0x3 << 4)
+
+/* Camera Output Canvas */
+#define FLITE_REG_CIOCAN			0x20
+#define FLITE_REG_CIOCAN_MASK			((0x3fff << 16) | 0x3fff)
+
+/* Camera Output DMA Offset */
+#define FLITE_REG_CIOOFF			0x24
+#define FLITE_REG_CIOOFF_MASK			((0x3fff << 16) | 0x3fff)
+
+/* Camera Output DMA Start Address */
+#define FLITE_REG_CIOSA				0x30
+
+/* Camera Status */
+#define FLITE_REG_CISTATUS			0x40
+#define FLITE_REG_CISTATUS_MIPI_VVALID		(1 << 22)
+#define FLITE_REG_CISTATUS_MIPI_HVALID		(1 << 21)
+#define FLITE_REG_CISTATUS_MIPI_DVALID		(1 << 20)
+#define FLITE_REG_CISTATUS_ITU_VSYNC		(1 << 14)
+#define FLITE_REG_CISTATUS_ITU_HREFF		(1 << 13)
+#define FLITE_REG_CISTATUS_OVFIY		(1 << 10)
+#define FLITE_REG_CISTATUS_OVFICB		(1 << 9)
+#define FLITE_REG_CISTATUS_OVFICR		(1 << 8)
+#define FLITE_REG_CISTATUS_IRQ_SRC_OVERFLOW	(1 << 7)
+#define FLITE_REG_CISTATUS_IRQ_SRC_LASTCAPEND	(1 << 6)
+#define FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART	(1 << 5)
+#define FLITE_REG_CISTATUS_IRQ_SRC_FRMEND	(1 << 4)
+#define FLITE_REG_CISTATUS_IRQ_CAM		(1 << 0)
+#define FLITE_REG_CISTATUS_IRQ_MASK		(0xf << 4)
+
+/* Camera Status2 */
+#define FLITE_REG_CISTATUS2			0x44
+#define FLITE_REG_CISTATUS2_LASTCAPEND		(1 << 1)
+#define FLITE_REG_CISTATUS2_FRMEND		(1 << 0)
+
+/* Qos Threshold */
+#define FLITE_REG_CITHOLD			0xf0
+#define FLITE_REG_CITHOLD_W_QOS_EN		(1 << 30)
+
+/* Camera General Purpose */
+#define FLITE_REG_CIGENERAL			0xfc
+/* b0: 1 - camera B, 0 - camera A */
+#define FLITE_REG_CIGENERAL_CAM_B		(1 << 0)
+
+/* ----------------------------------------------------------------------------
+ * Function declarations
+ */
+void flite_hw_reset(struct fimc_lite *dev);
+void flite_hw_clear_pending_irq(struct fimc_lite *dev);
+u32 flite_hw_get_interrupt_source(struct fimc_lite *dev);
+void flite_hw_clear_last_capture_end(struct fimc_lite *dev);
+void flite_hw_set_interrupt_mask(struct fimc_lite *dev);
+void flite_hw_capture_start(struct fimc_lite *dev);
+void flite_hw_capture_stop(struct fimc_lite *dev);
+void flite_hw_set_camera_bus(struct fimc_lite *dev,
+			     struct s5p_fimc_isp_info *s_info);
+void flite_hw_set_camera_polarity(struct fimc_lite *dev,
+				  struct s5p_fimc_isp_info *cam);
+void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f);
+void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f);
+
+void flite_hw_set_output_dma(struct fimc_lite *dev, struct flite_frame *f,
+			     bool enable);
+void flite_hw_set_dma_window(struct fimc_lite *dev, struct flite_frame *f);
+void flite_hw_set_test_pattern(struct fimc_lite *dev, bool on);
+void flite_hw_dump_regs(struct fimc_lite *dev, const char *label);
+
+static inline void flite_hw_set_output_addr(struct fimc_lite *dev, u32 paddr)
+{
+	writel(paddr, dev->regs + FLITE_REG_CIOSA);
+}
+#endif /* FIMC_LITE_REG_H */
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite.c b/drivers/media/platform/s5p-fimc/fimc-lite.c
new file mode 100644
index 0000000..c5b57e8
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/fimc-lite.c
@@ -0,0 +1,1606 @@
+/*
+ * Samsung EXYNOS FIMC-LITE (camera host interface) driver
+*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "fimc-mdevice.h"
+#include "fimc-core.h"
+#include "fimc-lite-reg.h"
+
+static int debug;
+module_param(debug, int, 0644);
+
+static const struct fimc_fmt fimc_lite_formats[] = {
+	{
+		.name		= "YUV 4:2:2 packed, YCbYCr",
+		.fourcc		= V4L2_PIX_FMT_YUYV,
+		.depth		= { 16 },
+		.color		= FIMC_FMT_YCBYCR422,
+		.memplanes	= 1,
+		.mbus_code	= V4L2_MBUS_FMT_YUYV8_2X8,
+	}, {
+		.name		= "YUV 4:2:2 packed, CbYCrY",
+		.fourcc		= V4L2_PIX_FMT_UYVY,
+		.depth		= { 16 },
+		.color		= FIMC_FMT_CBYCRY422,
+		.memplanes	= 1,
+		.mbus_code	= V4L2_MBUS_FMT_UYVY8_2X8,
+	}, {
+		.name		= "YUV 4:2:2 packed, CrYCbY",
+		.fourcc		= V4L2_PIX_FMT_VYUY,
+		.depth		= { 16 },
+		.color		= FIMC_FMT_CRYCBY422,
+		.memplanes	= 1,
+		.mbus_code	= V4L2_MBUS_FMT_VYUY8_2X8,
+	}, {
+		.name		= "YUV 4:2:2 packed, YCrYCb",
+		.fourcc		= V4L2_PIX_FMT_YVYU,
+		.depth		= { 16 },
+		.color		= FIMC_FMT_YCRYCB422,
+		.memplanes	= 1,
+		.mbus_code	= V4L2_MBUS_FMT_YVYU8_2X8,
+	}, {
+		.name		= "RAW8 (GRBG)",
+		.fourcc		= V4L2_PIX_FMT_SGRBG8,
+		.depth		= { 8 },
+		.color		= FIMC_FMT_RAW8,
+		.memplanes	= 1,
+		.mbus_code	= V4L2_MBUS_FMT_SGRBG8_1X8,
+	}, {
+		.name		= "RAW10 (GRBG)",
+		.fourcc		= V4L2_PIX_FMT_SGRBG10,
+		.depth		= { 10 },
+		.color		= FIMC_FMT_RAW10,
+		.memplanes	= 1,
+		.mbus_code	= V4L2_MBUS_FMT_SGRBG10_1X10,
+	}, {
+		.name		= "RAW12 (GRBG)",
+		.fourcc		= V4L2_PIX_FMT_SGRBG12,
+		.depth		= { 12 },
+		.color		= FIMC_FMT_RAW12,
+		.memplanes	= 1,
+		.mbus_code	= V4L2_MBUS_FMT_SGRBG12_1X12,
+	},
+};
+
+/**
+ * fimc_lite_find_format - lookup fimc color format by fourcc or media bus code
+ * @pixelformat: fourcc to match, ignored if null
+ * @mbus_code: media bus code to match, ignored if null
+ * @index: index to the fimc_lite_formats array, ignored if negative
+ */
+static const struct fimc_fmt *fimc_lite_find_format(const u32 *pixelformat,
+					const u32 *mbus_code, int index)
+{
+	const struct fimc_fmt *fmt, *def_fmt = NULL;
+	unsigned int i;
+	int id = 0;
+
+	if (index >= (int)ARRAY_SIZE(fimc_lite_formats))
+		return NULL;
+
+	for (i = 0; i < ARRAY_SIZE(fimc_lite_formats); ++i) {
+		fmt = &fimc_lite_formats[i];
+		if (pixelformat && fmt->fourcc == *pixelformat)
+			return fmt;
+		if (mbus_code && fmt->mbus_code == *mbus_code)
+			return fmt;
+		if (index == id)
+			def_fmt = fmt;
+		id++;
+	}
+	return def_fmt;
+}
+
+static int fimc_lite_hw_init(struct fimc_lite *fimc)
+{
+	struct fimc_pipeline *pipeline = &fimc->pipeline;
+	struct fimc_sensor_info *sensor;
+	unsigned long flags;
+
+	if (pipeline->subdevs[IDX_SENSOR] == NULL)
+		return -ENXIO;
+
+	if (fimc->fmt == NULL)
+		return -EINVAL;
+
+	sensor = v4l2_get_subdev_hostdata(pipeline->subdevs[IDX_SENSOR]);
+	spin_lock_irqsave(&fimc->slock, flags);
+
+	flite_hw_set_camera_bus(fimc, sensor->pdata);
+	flite_hw_set_source_format(fimc, &fimc->inp_frame);
+	flite_hw_set_window_offset(fimc, &fimc->inp_frame);
+	flite_hw_set_output_dma(fimc, &fimc->out_frame, true);
+	flite_hw_set_interrupt_mask(fimc);
+	flite_hw_set_test_pattern(fimc, fimc->test_pattern->val);
+
+	if (debug > 0)
+		flite_hw_dump_regs(fimc, __func__);
+
+	spin_unlock_irqrestore(&fimc->slock, flags);
+	return 0;
+}
+
+/*
+ * Reinitialize the driver so it is ready to start the streaming again.
+ * Set fimc->state to indicate stream off and the hardware shut down state.
+ * If not suspending (@suspend is false), return any buffers to videobuf2.
+ * Otherwise put any owned buffers onto the pending buffers queue, so they
+ * can be re-spun when the device is being resumed. Also perform FIMC
+ * software reset and disable streaming on the whole pipeline if required.
+ */
+static int fimc_lite_reinit(struct fimc_lite *fimc, bool suspend)
+{
+	struct flite_buffer *buf;
+	unsigned long flags;
+	bool streaming;
+
+	spin_lock_irqsave(&fimc->slock, flags);
+	streaming = fimc->state & (1 << ST_SENSOR_STREAM);
+
+	fimc->state &= ~(1 << ST_FLITE_RUN | 1 << ST_FLITE_OFF |
+			 1 << ST_FLITE_STREAM | 1 << ST_SENSOR_STREAM);
+	if (suspend)
+		fimc->state |= (1 << ST_FLITE_SUSPENDED);
+	else
+		fimc->state &= ~(1 << ST_FLITE_PENDING |
+				 1 << ST_FLITE_SUSPENDED);
+
+	/* Release unused buffers */
+	while (!suspend && !list_empty(&fimc->pending_buf_q)) {
+		buf = fimc_lite_pending_queue_pop(fimc);
+		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+	}
+	/* If suspending put unused buffers onto pending queue */
+	while (!list_empty(&fimc->active_buf_q)) {
+		buf = fimc_lite_active_queue_pop(fimc);
+		if (suspend)
+			fimc_lite_pending_queue_add(fimc, buf);
+		else
+			vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+	}
+
+	spin_unlock_irqrestore(&fimc->slock, flags);
+
+	flite_hw_reset(fimc);
+
+	if (!streaming)
+		return 0;
+
+	return fimc_pipeline_s_stream(&fimc->pipeline, 0);
+}
+
+static int fimc_lite_stop_capture(struct fimc_lite *fimc, bool suspend)
+{
+	unsigned long flags;
+
+	if (!fimc_lite_active(fimc))
+		return 0;
+
+	spin_lock_irqsave(&fimc->slock, flags);
+	set_bit(ST_FLITE_OFF, &fimc->state);
+	flite_hw_capture_stop(fimc);
+	spin_unlock_irqrestore(&fimc->slock, flags);
+
+	wait_event_timeout(fimc->irq_queue,
+			   !test_bit(ST_FLITE_OFF, &fimc->state),
+			   (2*HZ/10)); /* 200 ms */
+
+	return fimc_lite_reinit(fimc, suspend);
+}
+
+/* Must be called  with fimc.slock spinlock held. */
+static void fimc_lite_config_update(struct fimc_lite *fimc)
+{
+	flite_hw_set_window_offset(fimc, &fimc->inp_frame);
+	flite_hw_set_dma_window(fimc, &fimc->out_frame);
+	flite_hw_set_test_pattern(fimc, fimc->test_pattern->val);
+	clear_bit(ST_FLITE_CONFIG, &fimc->state);
+}
+
+static irqreturn_t flite_irq_handler(int irq, void *priv)
+{
+	struct fimc_lite *fimc = priv;
+	struct flite_buffer *vbuf;
+	unsigned long flags;
+	struct timeval *tv;
+	struct timespec ts;
+	u32 intsrc;
+
+	spin_lock_irqsave(&fimc->slock, flags);
+
+	intsrc = flite_hw_get_interrupt_source(fimc);
+	flite_hw_clear_pending_irq(fimc);
+
+	if (test_and_clear_bit(ST_FLITE_OFF, &fimc->state)) {
+		wake_up(&fimc->irq_queue);
+		goto done;
+	}
+
+	if (intsrc & FLITE_REG_CISTATUS_IRQ_SRC_OVERFLOW) {
+		clear_bit(ST_FLITE_RUN, &fimc->state);
+		fimc->events.data_overflow++;
+	}
+
+	if (intsrc & FLITE_REG_CISTATUS_IRQ_SRC_LASTCAPEND) {
+		flite_hw_clear_last_capture_end(fimc);
+		clear_bit(ST_FLITE_STREAM, &fimc->state);
+		wake_up(&fimc->irq_queue);
+	}
+
+	if (fimc->out_path != FIMC_IO_DMA)
+		goto done;
+
+	if ((intsrc & FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART) &&
+	    test_bit(ST_FLITE_RUN, &fimc->state) &&
+	    !list_empty(&fimc->active_buf_q) &&
+	    !list_empty(&fimc->pending_buf_q)) {
+		vbuf = fimc_lite_active_queue_pop(fimc);
+		ktime_get_ts(&ts);
+		tv = &vbuf->vb.v4l2_buf.timestamp;
+		tv->tv_sec = ts.tv_sec;
+		tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+		vbuf->vb.v4l2_buf.sequence = fimc->frame_count++;
+		vb2_buffer_done(&vbuf->vb, VB2_BUF_STATE_DONE);
+
+		vbuf = fimc_lite_pending_queue_pop(fimc);
+		flite_hw_set_output_addr(fimc, vbuf->paddr);
+		fimc_lite_active_queue_add(fimc, vbuf);
+	}
+
+	if (test_bit(ST_FLITE_CONFIG, &fimc->state))
+		fimc_lite_config_update(fimc);
+
+	if (list_empty(&fimc->pending_buf_q)) {
+		flite_hw_capture_stop(fimc);
+		clear_bit(ST_FLITE_STREAM, &fimc->state);
+	}
+done:
+	set_bit(ST_FLITE_RUN, &fimc->state);
+	spin_unlock_irqrestore(&fimc->slock, flags);
+	return IRQ_HANDLED;
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct fimc_lite *fimc = q->drv_priv;
+	int ret;
+
+	fimc->frame_count = 0;
+
+	ret = fimc_lite_hw_init(fimc);
+	if (ret) {
+		fimc_lite_reinit(fimc, false);
+		return ret;
+	}
+
+	set_bit(ST_FLITE_PENDING, &fimc->state);
+
+	if (!list_empty(&fimc->active_buf_q) &&
+	    !test_and_set_bit(ST_FLITE_STREAM, &fimc->state)) {
+		flite_hw_capture_start(fimc);
+
+		if (!test_and_set_bit(ST_SENSOR_STREAM, &fimc->state))
+			fimc_pipeline_s_stream(&fimc->pipeline, 1);
+	}
+	if (debug > 0)
+		flite_hw_dump_regs(fimc, __func__);
+
+	return 0;
+}
+
+static int stop_streaming(struct vb2_queue *q)
+{
+	struct fimc_lite *fimc = q->drv_priv;
+
+	if (!fimc_lite_active(fimc))
+		return -EINVAL;
+
+	return fimc_lite_stop_capture(fimc, false);
+}
+
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+		       unsigned int *num_buffers, unsigned int *num_planes,
+		       unsigned int sizes[], void *allocators[])
+{
+	const struct v4l2_pix_format_mplane *pixm = NULL;
+	struct fimc_lite *fimc = vq->drv_priv;
+	struct flite_frame *frame = &fimc->out_frame;
+	const struct fimc_fmt *fmt = fimc->fmt;
+	unsigned long wh;
+	int i;
+
+	if (pfmt) {
+		pixm = &pfmt->fmt.pix_mp;
+		fmt = fimc_lite_find_format(&pixm->pixelformat, NULL, -1);
+		wh = pixm->width * pixm->height;
+	} else {
+		wh = frame->f_width * frame->f_height;
+	}
+
+	if (fmt == NULL)
+		return -EINVAL;
+
+	*num_planes = fmt->memplanes;
+
+	for (i = 0; i < fmt->memplanes; i++) {
+		unsigned int size = (wh * fmt->depth[i]) / 8;
+		if (pixm)
+			sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
+		else
+			sizes[i] = size;
+		allocators[i] = fimc->alloc_ctx;
+	}
+
+	return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+	struct vb2_queue *vq = vb->vb2_queue;
+	struct fimc_lite *fimc = vq->drv_priv;
+	int i;
+
+	if (fimc->fmt == NULL)
+		return -EINVAL;
+
+	for (i = 0; i < fimc->fmt->memplanes; i++) {
+		unsigned long size = fimc->payload[i];
+
+		if (vb2_plane_size(vb, i) < size) {
+			v4l2_err(fimc->vfd,
+				 "User buffer too small (%ld < %ld)\n",
+				 vb2_plane_size(vb, i), size);
+			return -EINVAL;
+		}
+		vb2_set_plane_payload(vb, i, size);
+	}
+
+	return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+	struct flite_buffer *buf
+		= container_of(vb, struct flite_buffer, vb);
+	struct fimc_lite *fimc = vb2_get_drv_priv(vb->vb2_queue);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fimc->slock, flags);
+	buf->paddr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+	if (!test_bit(ST_FLITE_SUSPENDED, &fimc->state) &&
+	    !test_bit(ST_FLITE_STREAM, &fimc->state) &&
+	    list_empty(&fimc->active_buf_q)) {
+		flite_hw_set_output_addr(fimc, buf->paddr);
+		fimc_lite_active_queue_add(fimc, buf);
+	} else {
+		fimc_lite_pending_queue_add(fimc, buf);
+	}
+
+	if (vb2_is_streaming(&fimc->vb_queue) &&
+	    !list_empty(&fimc->pending_buf_q) &&
+	    !test_and_set_bit(ST_FLITE_STREAM, &fimc->state)) {
+		flite_hw_capture_start(fimc);
+		spin_unlock_irqrestore(&fimc->slock, flags);
+
+		if (!test_and_set_bit(ST_SENSOR_STREAM, &fimc->state))
+			fimc_pipeline_s_stream(&fimc->pipeline, 1);
+		return;
+	}
+	spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static void fimc_lock(struct vb2_queue *vq)
+{
+	struct fimc_lite *fimc = vb2_get_drv_priv(vq);
+	mutex_lock(&fimc->lock);
+}
+
+static void fimc_unlock(struct vb2_queue *vq)
+{
+	struct fimc_lite *fimc = vb2_get_drv_priv(vq);
+	mutex_unlock(&fimc->lock);
+}
+
+static const struct vb2_ops fimc_lite_qops = {
+	.queue_setup	 = queue_setup,
+	.buf_prepare	 = buffer_prepare,
+	.buf_queue	 = buffer_queue,
+	.wait_prepare	 = fimc_unlock,
+	.wait_finish	 = fimc_lock,
+	.start_streaming = start_streaming,
+	.stop_streaming	 = stop_streaming,
+};
+
+static void fimc_lite_clear_event_counters(struct fimc_lite *fimc)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&fimc->slock, flags);
+	memset(&fimc->events, 0, sizeof(fimc->events));
+	spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static int fimc_lite_open(struct file *file)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+	int ret;
+
+	if (mutex_lock_interruptible(&fimc->lock))
+		return -ERESTARTSYS;
+
+	set_bit(ST_FLITE_IN_USE, &fimc->state);
+	ret = pm_runtime_get_sync(&fimc->pdev->dev);
+	if (ret < 0)
+		goto done;
+
+	ret = v4l2_fh_open(file);
+	if (ret < 0)
+		goto done;
+
+	if (++fimc->ref_count == 1 && fimc->out_path == FIMC_IO_DMA) {
+		ret = fimc_pipeline_initialize(&fimc->pipeline,
+					       &fimc->vfd->entity, true);
+		if (ret < 0) {
+			pm_runtime_put_sync(&fimc->pdev->dev);
+			fimc->ref_count--;
+			v4l2_fh_release(file);
+			clear_bit(ST_FLITE_IN_USE, &fimc->state);
+		}
+
+		fimc_lite_clear_event_counters(fimc);
+	}
+done:
+	mutex_unlock(&fimc->lock);
+	return ret;
+}
+
+static int fimc_lite_close(struct file *file)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+	int ret;
+
+	if (mutex_lock_interruptible(&fimc->lock))
+		return -ERESTARTSYS;
+
+	if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
+		clear_bit(ST_FLITE_IN_USE, &fimc->state);
+		fimc_lite_stop_capture(fimc, false);
+		fimc_pipeline_shutdown(&fimc->pipeline);
+		clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
+	}
+
+	pm_runtime_put(&fimc->pdev->dev);
+
+	if (fimc->ref_count == 0)
+		vb2_queue_release(&fimc->vb_queue);
+
+	ret = v4l2_fh_release(file);
+
+	mutex_unlock(&fimc->lock);
+	return ret;
+}
+
+static unsigned int fimc_lite_poll(struct file *file,
+				   struct poll_table_struct *wait)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+	int ret;
+
+	if (mutex_lock_interruptible(&fimc->lock))
+		return POLL_ERR;
+
+	ret = vb2_poll(&fimc->vb_queue, file, wait);
+	mutex_unlock(&fimc->lock);
+
+	return ret;
+}
+
+static int fimc_lite_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+	int ret;
+
+	if (mutex_lock_interruptible(&fimc->lock))
+		return -ERESTARTSYS;
+
+	ret = vb2_mmap(&fimc->vb_queue, vma);
+	mutex_unlock(&fimc->lock);
+
+	return ret;
+}
+
+static const struct v4l2_file_operations fimc_lite_fops = {
+	.owner		= THIS_MODULE,
+	.open		= fimc_lite_open,
+	.release	= fimc_lite_close,
+	.poll		= fimc_lite_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= fimc_lite_mmap,
+};
+
+/*
+ * Format and crop negotiation helpers
+ */
+
+static const struct fimc_fmt *fimc_lite_try_format(struct fimc_lite *fimc,
+					u32 *width, u32 *height,
+					u32 *code, u32 *fourcc, int pad)
+{
+	struct flite_variant *variant = fimc->variant;
+	const struct fimc_fmt *fmt;
+
+	fmt = fimc_lite_find_format(fourcc, code, 0);
+	if (WARN_ON(!fmt))
+		return NULL;
+
+	if (code)
+		*code = fmt->mbus_code;
+	if (fourcc)
+		*fourcc = fmt->fourcc;
+
+	if (pad == FLITE_SD_PAD_SINK) {
+		v4l_bound_align_image(width, 8, variant->max_width,
+				      ffs(variant->out_width_align) - 1,
+				      height, 0, variant->max_height, 0, 0);
+	} else {
+		v4l_bound_align_image(width, 8, fimc->inp_frame.rect.width,
+				      ffs(variant->out_width_align) - 1,
+				      height, 0, fimc->inp_frame.rect.height,
+				      0, 0);
+	}
+
+	v4l2_dbg(1, debug, &fimc->subdev, "code: 0x%x, %dx%d\n",
+		 code ? *code : 0, *width, *height);
+
+	return fmt;
+}
+
+static void fimc_lite_try_crop(struct fimc_lite *fimc, struct v4l2_rect *r)
+{
+	struct flite_frame *frame = &fimc->inp_frame;
+
+	v4l_bound_align_image(&r->width, 0, frame->f_width, 0,
+			      &r->height, 0, frame->f_height, 0, 0);
+
+	/* Adjust left/top if cropping rectangle got out of bounds */
+	r->left = clamp_t(u32, r->left, 0, frame->f_width - r->width);
+	r->left = round_down(r->left, fimc->variant->win_hor_offs_align);
+	r->top  = clamp_t(u32, r->top, 0, frame->f_height - r->height);
+
+	v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, sink fmt: %dx%d",
+		 r->left, r->top, r->width, r->height,
+		 frame->f_width, frame->f_height);
+}
+
+static void fimc_lite_try_compose(struct fimc_lite *fimc, struct v4l2_rect *r)
+{
+	struct flite_frame *frame = &fimc->out_frame;
+	struct v4l2_rect *crop_rect = &fimc->inp_frame.rect;
+
+	/* Scaling is not supported so we enforce compose rectangle size
+	   same as size of the sink crop rectangle. */
+	r->width = crop_rect->width;
+	r->height = crop_rect->height;
+
+	/* Adjust left/top if the composing rectangle got out of bounds */
+	r->left = clamp_t(u32, r->left, 0, frame->f_width - r->width);
+	r->left = round_down(r->left, fimc->variant->out_hor_offs_align);
+	r->top  = clamp_t(u32, r->top, 0, fimc->out_frame.f_height - r->height);
+
+	v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, source fmt: %dx%d",
+		 r->left, r->top, r->width, r->height,
+		 frame->f_width, frame->f_height);
+}
+
+/*
+ * Video node ioctl operations
+ */
+static int fimc_vidioc_querycap_capture(struct file *file, void *priv,
+					struct v4l2_capability *cap)
+{
+	strlcpy(cap->driver, FIMC_LITE_DRV_NAME, sizeof(cap->driver));
+	cap->bus_info[0] = 0;
+	cap->card[0] = 0;
+	cap->capabilities = V4L2_CAP_STREAMING;
+	return 0;
+}
+
+static int fimc_lite_enum_fmt_mplane(struct file *file, void *priv,
+				     struct v4l2_fmtdesc *f)
+{
+	const struct fimc_fmt *fmt;
+
+	if (f->index >= ARRAY_SIZE(fimc_lite_formats))
+		return -EINVAL;
+
+	fmt = &fimc_lite_formats[f->index];
+	strlcpy(f->description, fmt->name, sizeof(f->description));
+	f->pixelformat = fmt->fourcc;
+
+	return 0;
+}
+
+static int fimc_lite_g_fmt_mplane(struct file *file, void *fh,
+				  struct v4l2_format *f)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+	struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+	struct v4l2_plane_pix_format *plane_fmt = &pixm->plane_fmt[0];
+	struct flite_frame *frame = &fimc->out_frame;
+	const struct fimc_fmt *fmt = fimc->fmt;
+
+	plane_fmt->bytesperline = (frame->f_width * fmt->depth[0]) / 8;
+	plane_fmt->sizeimage = plane_fmt->bytesperline * frame->f_height;
+
+	pixm->num_planes = fmt->memplanes;
+	pixm->pixelformat = fmt->fourcc;
+	pixm->width = frame->f_width;
+	pixm->height = frame->f_height;
+	pixm->field = V4L2_FIELD_NONE;
+	pixm->colorspace = V4L2_COLORSPACE_JPEG;
+	return 0;
+}
+
+static int fimc_lite_try_fmt(struct fimc_lite *fimc,
+			     struct v4l2_pix_format_mplane *pixm,
+			     const struct fimc_fmt **ffmt)
+{
+	struct flite_variant *variant = fimc->variant;
+	u32 bpl = pixm->plane_fmt[0].bytesperline;
+	const struct fimc_fmt *fmt;
+
+	fmt = fimc_lite_find_format(&pixm->pixelformat, NULL, 0);
+	if (WARN_ON(fmt == NULL))
+		return -EINVAL;
+	if (ffmt)
+		*ffmt = fmt;
+	v4l_bound_align_image(&pixm->width, 8, variant->max_width,
+			      ffs(variant->out_width_align) - 1,
+			      &pixm->height, 0, variant->max_height, 0, 0);
+
+	if ((bpl == 0 || ((bpl * 8) / fmt->depth[0]) < pixm->width))
+		pixm->plane_fmt[0].bytesperline = (pixm->width *
+						   fmt->depth[0]) / 8;
+
+	if (pixm->plane_fmt[0].sizeimage == 0)
+		pixm->plane_fmt[0].sizeimage = (pixm->width * pixm->height *
+						fmt->depth[0]) / 8;
+	pixm->num_planes = fmt->memplanes;
+	pixm->pixelformat = fmt->fourcc;
+	pixm->colorspace = V4L2_COLORSPACE_JPEG;
+	pixm->field = V4L2_FIELD_NONE;
+	return 0;
+}
+
+static int fimc_lite_try_fmt_mplane(struct file *file, void *fh,
+				    struct v4l2_format *f)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+
+	return fimc_lite_try_fmt(fimc, &f->fmt.pix_mp, NULL);
+}
+
+static int fimc_lite_s_fmt_mplane(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+	struct fimc_lite *fimc = video_drvdata(file);
+	struct flite_frame *frame = &fimc->out_frame;
+	const struct fimc_fmt *fmt = NULL;
+	int ret;
+
+	if (vb2_is_busy(&fimc->vb_queue))
+		return -EBUSY;
+
+	ret = fimc_lite_try_fmt(fimc, &f->fmt.pix_mp, &fmt);
+	if (ret < 0)
+		return ret;
+
+	fimc->fmt = fmt;
+	fimc->payload[0] = max((pixm->width * pixm->height * fmt->depth[0]) / 8,
+			       pixm->plane_fmt[0].sizeimage);
+	frame->f_width = pixm->width;
+	frame->f_height = pixm->height;
+
+	return 0;
+}
+
+static int fimc_pipeline_validate(struct fimc_lite *fimc)
+{
+	struct v4l2_subdev *sd = &fimc->subdev;
+	struct v4l2_subdev_format sink_fmt, src_fmt;
+	struct media_pad *pad;
+	int ret;
+
+	while (1) {
+		/* Retrieve format at the sink pad */
+		pad = &sd->entity.pads[0];
+		if (!(pad->flags & MEDIA_PAD_FL_SINK))
+			break;
+		/* Don't call FIMC subdev operation to avoid nested locking */
+		if (sd == &fimc->subdev) {
+			struct flite_frame *ff = &fimc->out_frame;
+			sink_fmt.format.width = ff->f_width;
+			sink_fmt.format.height = ff->f_height;
+			sink_fmt.format.code = fimc->fmt->mbus_code;
+		} else {
+			sink_fmt.pad = pad->index;
+			sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+			ret = v4l2_subdev_call(sd, pad, get_fmt, NULL,
+					       &sink_fmt);
+			if (ret < 0 && ret != -ENOIOCTLCMD)
+				return -EPIPE;
+		}
+		/* Retrieve format at the source pad */
+		pad = media_entity_remote_source(pad);
+		if (pad == NULL ||
+		    media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+			break;
+
+		sd = media_entity_to_v4l2_subdev(pad->entity);
+		src_fmt.pad = pad->index;
+		src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+		ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt);
+		if (ret < 0 && ret != -ENOIOCTLCMD)
+			return -EPIPE;
+
+		if (src_fmt.format.width != sink_fmt.format.width ||
+		    src_fmt.format.height != sink_fmt.format.height ||
+		    src_fmt.format.code != sink_fmt.format.code)
+			return -EPIPE;
+	}
+	return 0;
+}
+
+static int fimc_lite_streamon(struct file *file, void *priv,
+			      enum v4l2_buf_type type)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+	struct v4l2_subdev *sensor = fimc->pipeline.subdevs[IDX_SENSOR];
+	struct fimc_pipeline *p = &fimc->pipeline;
+	int ret;
+
+	if (fimc_lite_active(fimc))
+		return -EBUSY;
+
+	ret = media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+	if (ret < 0)
+		return ret;
+
+	ret = fimc_pipeline_validate(fimc);
+	if (ret) {
+		media_entity_pipeline_stop(&sensor->entity);
+		return ret;
+	}
+
+	return vb2_streamon(&fimc->vb_queue, type);
+}
+
+static int fimc_lite_streamoff(struct file *file, void *priv,
+			       enum v4l2_buf_type type)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+	struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR];
+	int ret;
+
+	ret = vb2_streamoff(&fimc->vb_queue, type);
+	if (ret == 0)
+		media_entity_pipeline_stop(&sd->entity);
+	return ret;
+}
+
+static int fimc_lite_reqbufs(struct file *file, void *priv,
+			     struct v4l2_requestbuffers *reqbufs)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+	int ret;
+
+	reqbufs->count = max_t(u32, FLITE_REQ_BUFS_MIN, reqbufs->count);
+	ret = vb2_reqbufs(&fimc->vb_queue, reqbufs);
+	if (!ret < 0)
+		fimc->reqbufs_count = reqbufs->count;
+
+	return ret;
+}
+
+static int fimc_lite_querybuf(struct file *file, void *priv,
+			      struct v4l2_buffer *buf)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+
+	return vb2_querybuf(&fimc->vb_queue, buf);
+}
+
+static int fimc_lite_qbuf(struct file *file, void *priv,
+			  struct v4l2_buffer *buf)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+
+	return vb2_qbuf(&fimc->vb_queue, buf);
+}
+
+static int fimc_lite_dqbuf(struct file *file, void *priv,
+			   struct v4l2_buffer *buf)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+
+	return vb2_dqbuf(&fimc->vb_queue, buf, file->f_flags & O_NONBLOCK);
+}
+
+static int fimc_lite_create_bufs(struct file *file, void *priv,
+				 struct v4l2_create_buffers *create)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+
+	return vb2_create_bufs(&fimc->vb_queue, create);
+}
+
+static int fimc_lite_prepare_buf(struct file *file, void *priv,
+				 struct v4l2_buffer *b)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+
+	return vb2_prepare_buf(&fimc->vb_queue, b);
+}
+
+/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+	if (a->left < b->left || a->top < b->top)
+		return 0;
+	if (a->left + a->width > b->left + b->width)
+		return 0;
+	if (a->top + a->height > b->top + b->height)
+		return 0;
+
+	return 1;
+}
+
+static int fimc_lite_g_selection(struct file *file, void *fh,
+				 struct v4l2_selection *sel)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+	struct flite_frame *f = &fimc->out_frame;
+
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		return -EINVAL;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = f->f_width;
+		sel->r.height = f->f_height;
+		return 0;
+
+	case V4L2_SEL_TGT_COMPOSE:
+		sel->r = f->rect;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int fimc_lite_s_selection(struct file *file, void *fh,
+				 struct v4l2_selection *sel)
+{
+	struct fimc_lite *fimc = video_drvdata(file);
+	struct flite_frame *f = &fimc->out_frame;
+	struct v4l2_rect rect = sel->r;
+	unsigned long flags;
+
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+	    sel->target != V4L2_SEL_TGT_COMPOSE)
+		return -EINVAL;
+
+	fimc_lite_try_compose(fimc, &rect);
+
+	if ((sel->flags & V4L2_SEL_FLAG_LE) &&
+	    !enclosed_rectangle(&rect, &sel->r))
+		return -ERANGE;
+
+	if ((sel->flags & V4L2_SEL_FLAG_GE) &&
+	    !enclosed_rectangle(&sel->r, &rect))
+		return -ERANGE;
+
+	sel->r = rect;
+	spin_lock_irqsave(&fimc->slock, flags);
+	f->rect = rect;
+	set_bit(ST_FLITE_CONFIG, &fimc->state);
+	spin_unlock_irqrestore(&fimc->slock, flags);
+
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops fimc_lite_ioctl_ops = {
+	.vidioc_querycap		= fimc_vidioc_querycap_capture,
+	.vidioc_enum_fmt_vid_cap_mplane	= fimc_lite_enum_fmt_mplane,
+	.vidioc_try_fmt_vid_cap_mplane	= fimc_lite_try_fmt_mplane,
+	.vidioc_s_fmt_vid_cap_mplane	= fimc_lite_s_fmt_mplane,
+	.vidioc_g_fmt_vid_cap_mplane	= fimc_lite_g_fmt_mplane,
+	.vidioc_g_selection		= fimc_lite_g_selection,
+	.vidioc_s_selection		= fimc_lite_s_selection,
+	.vidioc_reqbufs			= fimc_lite_reqbufs,
+	.vidioc_querybuf		= fimc_lite_querybuf,
+	.vidioc_prepare_buf		= fimc_lite_prepare_buf,
+	.vidioc_create_bufs		= fimc_lite_create_bufs,
+	.vidioc_qbuf			= fimc_lite_qbuf,
+	.vidioc_dqbuf			= fimc_lite_dqbuf,
+	.vidioc_streamon		= fimc_lite_streamon,
+	.vidioc_streamoff		= fimc_lite_streamoff,
+};
+
+/* Capture subdev media entity operations */
+static int fimc_lite_link_setup(struct media_entity *entity,
+				const struct media_pad *local,
+				const struct media_pad *remote, u32 flags)
+{
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+	struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+	unsigned int remote_ent_type = media_entity_type(remote->entity);
+
+	if (WARN_ON(fimc == NULL))
+		return 0;
+
+	v4l2_dbg(1, debug, sd, "%s: %s --> %s, flags: 0x%x. source_id: 0x%x",
+		 __func__, local->entity->name, remote->entity->name,
+		 flags, fimc->source_subdev_grp_id);
+
+	switch (local->index) {
+	case FIMC_SD_PAD_SINK:
+		if (remote_ent_type != MEDIA_ENT_T_V4L2_SUBDEV)
+			return -EINVAL;
+
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (fimc->source_subdev_grp_id != 0)
+				return -EBUSY;
+			fimc->source_subdev_grp_id = sd->grp_id;
+			return 0;
+		}
+
+		fimc->source_subdev_grp_id = 0;
+		break;
+
+	case FIMC_SD_PAD_SOURCE:
+		if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+			fimc->out_path = FIMC_IO_NONE;
+			return 0;
+		}
+		if (remote_ent_type == MEDIA_ENT_T_V4L2_SUBDEV)
+			fimc->out_path = FIMC_IO_ISP;
+		else
+			fimc->out_path = FIMC_IO_DMA;
+		break;
+
+	default:
+		v4l2_err(sd, "Invalid pad index\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct media_entity_operations fimc_lite_subdev_media_ops = {
+	.link_setup = fimc_lite_link_setup,
+};
+
+static int fimc_lite_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+					   struct v4l2_subdev_fh *fh,
+					   struct v4l2_subdev_mbus_code_enum *code)
+{
+	const struct fimc_fmt *fmt;
+
+	fmt = fimc_lite_find_format(NULL, NULL, code->index);
+	if (!fmt)
+		return -EINVAL;
+	code->code = fmt->mbus_code;
+	return 0;
+}
+
+static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
+				    struct v4l2_subdev_fh *fh,
+				    struct v4l2_subdev_format *fmt)
+{
+	struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *mf = &fmt->format;
+	struct flite_frame *f = &fimc->out_frame;
+
+	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+		mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+		fmt->format = *mf;
+		return 0;
+	}
+	mf->colorspace = V4L2_COLORSPACE_JPEG;
+
+	mutex_lock(&fimc->lock);
+	mf->code = fimc->fmt->mbus_code;
+
+	if (fmt->pad == FLITE_SD_PAD_SINK) {
+		/* full camera input frame size */
+		mf->width = f->f_width;
+		mf->height = f->f_height;
+	} else {
+		/* crop size */
+		mf->width = f->rect.width;
+		mf->height = f->rect.height;
+	}
+	mutex_unlock(&fimc->lock);
+	return 0;
+}
+
+static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
+				    struct v4l2_subdev_fh *fh,
+				    struct v4l2_subdev_format *fmt)
+{
+	struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *mf = &fmt->format;
+	struct flite_frame *sink = &fimc->inp_frame;
+	const struct fimc_fmt *ffmt;
+
+	v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %dx%d",
+		 fmt->pad, mf->code, mf->width, mf->height);
+
+	mf->colorspace = V4L2_COLORSPACE_JPEG;
+	mutex_lock(&fimc->lock);
+
+	if ((fimc->out_path == FIMC_IO_ISP && sd->entity.stream_count > 0) ||
+	    (fimc->out_path == FIMC_IO_DMA && vb2_is_busy(&fimc->vb_queue))) {
+		mutex_unlock(&fimc->lock);
+		return -EBUSY;
+	}
+
+	ffmt = fimc_lite_try_format(fimc, &mf->width, &mf->height,
+				    &mf->code, NULL, fmt->pad);
+
+	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+		mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+		*mf = fmt->format;
+		mutex_unlock(&fimc->lock);
+		return 0;
+	}
+
+	if (fmt->pad == FLITE_SD_PAD_SINK) {
+		sink->f_width = mf->width;
+		sink->f_height = mf->height;
+		fimc->fmt = ffmt;
+		/* Set sink crop rectangle */
+		sink->rect.width = mf->width;
+		sink->rect.height = mf->height;
+		sink->rect.left = 0;
+		sink->rect.top = 0;
+		/* Reset source crop rectangle */
+		fimc->out_frame.rect = sink->rect;
+	} else {
+		/* Allow changing format only on sink pad */
+		mf->code = fimc->fmt->mbus_code;
+		mf->width = sink->rect.width;
+		mf->height = sink->rect.height;
+	}
+
+	mutex_unlock(&fimc->lock);
+	return 0;
+}
+
+static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
+					  struct v4l2_subdev_fh *fh,
+					  struct v4l2_subdev_selection *sel)
+{
+	struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+	struct flite_frame *f = &fimc->inp_frame;
+
+	if ((sel->target != V4L2_SEL_TGT_CROP &&
+	     sel->target != V4L2_SEL_TGT_CROP_BOUNDS) ||
+	     sel->pad != FLITE_SD_PAD_SINK)
+		return -EINVAL;
+
+	if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+		sel->r = *v4l2_subdev_get_try_crop(fh, sel->pad);
+		return 0;
+	}
+
+	mutex_lock(&fimc->lock);
+	if (sel->target == V4L2_SEL_TGT_CROP) {
+		sel->r = f->rect;
+	} else {
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = f->f_width;
+		sel->r.height = f->f_height;
+	}
+	mutex_unlock(&fimc->lock);
+
+	v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d",
+		 __func__, f->rect.left, f->rect.top, f->rect.width,
+		 f->rect.height, f->f_width, f->f_height);
+
+	return 0;
+}
+
+static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
+					  struct v4l2_subdev_fh *fh,
+					  struct v4l2_subdev_selection *sel)
+{
+	struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+	struct flite_frame *f = &fimc->inp_frame;
+	int ret = 0;
+
+	if (sel->target != V4L2_SEL_TGT_CROP || sel->pad != FLITE_SD_PAD_SINK)
+		return -EINVAL;
+
+	mutex_lock(&fimc->lock);
+	fimc_lite_try_crop(fimc, &sel->r);
+
+	if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+		*v4l2_subdev_get_try_crop(fh, sel->pad) = sel->r;
+	} else {
+		unsigned long flags;
+		spin_lock_irqsave(&fimc->slock, flags);
+		f->rect = sel->r;
+		/* Same crop rectangle on the source pad */
+		fimc->out_frame.rect = sel->r;
+		set_bit(ST_FLITE_CONFIG, &fimc->state);
+		spin_unlock_irqrestore(&fimc->slock, flags);
+	}
+	mutex_unlock(&fimc->lock);
+
+	v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d",
+		 __func__, f->rect.left, f->rect.top, f->rect.width,
+		 f->rect.height, f->f_width, f->f_height);
+
+	return ret;
+}
+
+static int fimc_lite_subdev_s_stream(struct v4l2_subdev *sd, int on)
+{
+	struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+	if (fimc->out_path == FIMC_IO_DMA)
+		return -ENOIOCTLCMD;
+
+	/* TODO: */
+
+	return 0;
+}
+
+static int fimc_lite_subdev_s_power(struct v4l2_subdev *sd, int on)
+{
+	struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+	if (fimc->out_path == FIMC_IO_DMA)
+		return -ENOIOCTLCMD;
+
+	/* TODO: */
+
+	return 0;
+}
+
+static int fimc_lite_log_status(struct v4l2_subdev *sd)
+{
+	struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+	flite_hw_dump_regs(fimc, __func__);
+	return 0;
+}
+
+static int fimc_lite_subdev_registered(struct v4l2_subdev *sd)
+{
+	struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+	struct vb2_queue *q = &fimc->vb_queue;
+	struct video_device *vfd;
+	int ret;
+
+	fimc->fmt = &fimc_lite_formats[0];
+	fimc->out_path = FIMC_IO_DMA;
+
+	vfd = video_device_alloc();
+	if (!vfd) {
+		v4l2_err(sd->v4l2_dev, "Failed to allocate video device\n");
+		return -ENOMEM;
+	}
+
+	snprintf(vfd->name, sizeof(vfd->name), "fimc-lite.%d.capture",
+		 fimc->index);
+
+	vfd->fops = &fimc_lite_fops;
+	vfd->ioctl_ops = &fimc_lite_ioctl_ops;
+	vfd->v4l2_dev = sd->v4l2_dev;
+	vfd->minor = -1;
+	vfd->release = video_device_release;
+	vfd->lock = &fimc->lock;
+	fimc->vfd = vfd;
+	fimc->ref_count = 0;
+	fimc->reqbufs_count = 0;
+
+	INIT_LIST_HEAD(&fimc->pending_buf_q);
+	INIT_LIST_HEAD(&fimc->active_buf_q);
+
+	memset(q, 0, sizeof(*q));
+	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+	q->io_modes = VB2_MMAP | VB2_USERPTR;
+	q->ops = &fimc_lite_qops;
+	q->mem_ops = &vb2_dma_contig_memops;
+	q->buf_struct_size = sizeof(struct flite_buffer);
+	q->drv_priv = fimc;
+
+	vb2_queue_init(q);
+
+	fimc->vd_pad.flags = MEDIA_PAD_FL_SINK;
+	ret = media_entity_init(&vfd->entity, 1, &fimc->vd_pad, 0);
+	if (ret)
+		goto err;
+
+	video_set_drvdata(vfd, fimc);
+
+	ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+	if (ret)
+		goto err_vd;
+
+	v4l2_info(sd->v4l2_dev, "Registered %s as /dev/%s\n",
+		  vfd->name, video_device_node_name(vfd));
+	return 0;
+
+ err_vd:
+	media_entity_cleanup(&vfd->entity);
+ err:
+	video_device_release(vfd);
+	return ret;
+}
+
+static void fimc_lite_subdev_unregistered(struct v4l2_subdev *sd)
+{
+	struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+	if (fimc == NULL)
+		return;
+
+	if (fimc->vfd) {
+		video_unregister_device(fimc->vfd);
+		media_entity_cleanup(&fimc->vfd->entity);
+		fimc->vfd = NULL;
+	}
+}
+
+static const struct v4l2_subdev_internal_ops fimc_lite_subdev_internal_ops = {
+	.registered = fimc_lite_subdev_registered,
+	.unregistered = fimc_lite_subdev_unregistered,
+};
+
+static const struct v4l2_subdev_pad_ops fimc_lite_subdev_pad_ops = {
+	.enum_mbus_code = fimc_lite_subdev_enum_mbus_code,
+	.get_selection = fimc_lite_subdev_get_selection,
+	.set_selection = fimc_lite_subdev_set_selection,
+	.get_fmt = fimc_lite_subdev_get_fmt,
+	.set_fmt = fimc_lite_subdev_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops fimc_lite_subdev_video_ops = {
+	.s_stream = fimc_lite_subdev_s_stream,
+};
+
+static const struct v4l2_subdev_core_ops fimc_lite_core_ops = {
+	.s_power = fimc_lite_subdev_s_power,
+	.log_status = fimc_lite_log_status,
+};
+
+static struct v4l2_subdev_ops fimc_lite_subdev_ops = {
+	.core = &fimc_lite_core_ops,
+	.video = &fimc_lite_subdev_video_ops,
+	.pad = &fimc_lite_subdev_pad_ops,
+};
+
+static int fimc_lite_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct fimc_lite *fimc = container_of(ctrl->handler, struct fimc_lite,
+					      ctrl_handler);
+	set_bit(ST_FLITE_CONFIG, &fimc->state);
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops fimc_lite_ctrl_ops = {
+	.s_ctrl	= fimc_lite_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config fimc_lite_ctrl = {
+	.ops	= &fimc_lite_ctrl_ops,
+	.id	= V4L2_CTRL_CLASS_USER | 0x1001,
+	.type	= V4L2_CTRL_TYPE_BOOLEAN,
+	.name	= "Test Pattern 640x480",
+};
+
+static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc)
+{
+	struct v4l2_ctrl_handler *handler = &fimc->ctrl_handler;
+	struct v4l2_subdev *sd = &fimc->subdev;
+	int ret;
+
+	v4l2_subdev_init(sd, &fimc_lite_subdev_ops);
+	sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+	snprintf(sd->name, sizeof(sd->name), "FIMC-LITE.%d", fimc->index);
+
+	fimc->subdev_pads[FIMC_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	fimc->subdev_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+	ret = media_entity_init(&sd->entity, FIMC_SD_PADS_NUM,
+				fimc->subdev_pads, 0);
+	if (ret)
+		return ret;
+
+	v4l2_ctrl_handler_init(handler, 1);
+	fimc->test_pattern = v4l2_ctrl_new_custom(handler, &fimc_lite_ctrl,
+						  NULL);
+	if (handler->error) {
+		media_entity_cleanup(&sd->entity);
+		return handler->error;
+	}
+
+	sd->ctrl_handler = handler;
+	sd->internal_ops = &fimc_lite_subdev_internal_ops;
+	sd->entity.ops = &fimc_lite_subdev_media_ops;
+	v4l2_set_subdevdata(sd, fimc);
+
+	return 0;
+}
+
+static void fimc_lite_unregister_capture_subdev(struct fimc_lite *fimc)
+{
+	struct v4l2_subdev *sd = &fimc->subdev;
+
+	v4l2_device_unregister_subdev(sd);
+	media_entity_cleanup(&sd->entity);
+	v4l2_ctrl_handler_free(&fimc->ctrl_handler);
+	v4l2_set_subdevdata(sd, NULL);
+}
+
+static void fimc_lite_clk_put(struct fimc_lite *fimc)
+{
+	if (IS_ERR_OR_NULL(fimc->clock))
+		return;
+
+	clk_unprepare(fimc->clock);
+	clk_put(fimc->clock);
+	fimc->clock = NULL;
+}
+
+static int fimc_lite_clk_get(struct fimc_lite *fimc)
+{
+	int ret;
+
+	fimc->clock = clk_get(&fimc->pdev->dev, FLITE_CLK_NAME);
+	if (IS_ERR(fimc->clock))
+		return PTR_ERR(fimc->clock);
+
+	ret = clk_prepare(fimc->clock);
+	if (ret < 0) {
+		clk_put(fimc->clock);
+		fimc->clock = NULL;
+	}
+	return ret;
+}
+
+static int __devinit fimc_lite_probe(struct platform_device *pdev)
+{
+	struct flite_drvdata *drv_data = fimc_lite_get_drvdata(pdev);
+	struct fimc_lite *fimc;
+	struct resource *res;
+	int ret;
+
+	fimc = devm_kzalloc(&pdev->dev, sizeof(*fimc), GFP_KERNEL);
+	if (!fimc)
+		return -ENOMEM;
+
+	fimc->index = pdev->id;
+	fimc->variant = drv_data->variant[fimc->index];
+	fimc->pdev = pdev;
+
+	init_waitqueue_head(&fimc->irq_queue);
+	spin_lock_init(&fimc->slock);
+	mutex_init(&fimc->lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	fimc->regs = devm_request_and_ioremap(&pdev->dev, res);
+	if (fimc->regs == NULL) {
+		dev_err(&pdev->dev, "Failed to obtain io memory\n");
+		return -ENOENT;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "Failed to get IRQ resource\n");
+		return -ENXIO;
+	}
+
+	ret = fimc_lite_clk_get(fimc);
+	if (ret)
+		return ret;
+
+	ret = devm_request_irq(&pdev->dev, res->start, flite_irq_handler,
+			       0, dev_name(&pdev->dev), fimc);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
+		goto err_clk;
+	}
+
+	/* The video node will be created within the subdev's registered() op */
+	ret = fimc_lite_create_capture_subdev(fimc);
+	if (ret)
+		goto err_clk;
+
+	platform_set_drvdata(pdev, fimc);
+	pm_runtime_enable(&pdev->dev);
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0)
+		goto err_sd;
+
+	fimc->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+	if (IS_ERR(fimc->alloc_ctx)) {
+		ret = PTR_ERR(fimc->alloc_ctx);
+		goto err_pm;
+	}
+	pm_runtime_put(&pdev->dev);
+
+	dev_dbg(&pdev->dev, "FIMC-LITE.%d registered successfully\n",
+		fimc->index);
+	return 0;
+err_pm:
+	pm_runtime_put(&pdev->dev);
+err_sd:
+	fimc_lite_unregister_capture_subdev(fimc);
+err_clk:
+	fimc_lite_clk_put(fimc);
+	return ret;
+}
+
+static int fimc_lite_runtime_resume(struct device *dev)
+{
+	struct fimc_lite *fimc = dev_get_drvdata(dev);
+
+	clk_enable(fimc->clock);
+	return 0;
+}
+
+static int fimc_lite_runtime_suspend(struct device *dev)
+{
+	struct fimc_lite *fimc = dev_get_drvdata(dev);
+
+	clk_disable(fimc->clock);
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_lite_resume(struct device *dev)
+{
+	struct fimc_lite *fimc = dev_get_drvdata(dev);
+	struct flite_buffer *buf;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&fimc->slock, flags);
+	if (!test_and_clear_bit(ST_LPM, &fimc->state) ||
+	    !test_bit(ST_FLITE_IN_USE, &fimc->state)) {
+		spin_unlock_irqrestore(&fimc->slock, flags);
+		return 0;
+	}
+	flite_hw_reset(fimc);
+	spin_unlock_irqrestore(&fimc->slock, flags);
+
+	if (!test_and_clear_bit(ST_FLITE_SUSPENDED, &fimc->state))
+		return 0;
+
+	INIT_LIST_HEAD(&fimc->active_buf_q);
+	fimc_pipeline_initialize(&fimc->pipeline, &fimc->vfd->entity, false);
+	fimc_lite_hw_init(fimc);
+	clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
+
+	for (i = 0; i < fimc->reqbufs_count; i++) {
+		if (list_empty(&fimc->pending_buf_q))
+			break;
+		buf = fimc_lite_pending_queue_pop(fimc);
+		buffer_queue(&buf->vb);
+	}
+	return 0;
+}
+
+static int fimc_lite_suspend(struct device *dev)
+{
+	struct fimc_lite *fimc = dev_get_drvdata(dev);
+	bool suspend = test_bit(ST_FLITE_IN_USE, &fimc->state);
+	int ret;
+
+	if (test_and_set_bit(ST_LPM, &fimc->state))
+		return 0;
+
+	ret = fimc_lite_stop_capture(fimc, suspend);
+	if (ret < 0 || !fimc_lite_active(fimc))
+		return ret;
+
+	return fimc_pipeline_shutdown(&fimc->pipeline);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int __devexit fimc_lite_remove(struct platform_device *pdev)
+{
+	struct fimc_lite *fimc = platform_get_drvdata(pdev);
+	struct device *dev = &pdev->dev;
+
+	pm_runtime_disable(dev);
+	pm_runtime_set_suspended(dev);
+	fimc_lite_unregister_capture_subdev(fimc);
+	vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
+	fimc_lite_clk_put(fimc);
+
+	dev_info(dev, "Driver unloaded\n");
+	return 0;
+}
+
+static struct flite_variant fimc_lite0_variant_exynos4 = {
+	.max_width		= 8192,
+	.max_height		= 8192,
+	.out_width_align	= 8,
+	.win_hor_offs_align	= 2,
+	.out_hor_offs_align	= 8,
+};
+
+/* EXYNOS4212, EXYNOS4412 */
+static struct flite_drvdata fimc_lite_drvdata_exynos4 = {
+	.variant = {
+		[0] = &fimc_lite0_variant_exynos4,
+		[1] = &fimc_lite0_variant_exynos4,
+	},
+};
+
+static struct platform_device_id fimc_lite_driver_ids[] = {
+	{
+		.name		= "exynos-fimc-lite",
+		.driver_data	= (unsigned long)&fimc_lite_drvdata_exynos4,
+	},
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, fimc_lite_driver_ids);
+
+static const struct dev_pm_ops fimc_lite_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(fimc_lite_suspend, fimc_lite_resume)
+	SET_RUNTIME_PM_OPS(fimc_lite_runtime_suspend, fimc_lite_runtime_resume,
+			   NULL)
+};
+
+static struct platform_driver fimc_lite_driver = {
+	.probe		= fimc_lite_probe,
+	.remove		= __devexit_p(fimc_lite_remove),
+	.id_table	= fimc_lite_driver_ids,
+	.driver = {
+		.name		= FIMC_LITE_DRV_NAME,
+		.owner		= THIS_MODULE,
+		.pm		= &fimc_lite_pm_ops,
+	}
+};
+module_platform_driver(fimc_lite_driver);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" FIMC_LITE_DRV_NAME);
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite.h b/drivers/media/platform/s5p-fimc/fimc-lite.h
new file mode 100644
index 0000000..44424ee
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/fimc-lite.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_LITE_H_
+#define FIMC_LITE_H_
+
+#include <asm/sizes.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/s5p_fimc.h>
+
+#include "fimc-core.h"
+
+#define FIMC_LITE_DRV_NAME	"exynos-fimc-lite"
+#define FLITE_CLK_NAME		"flite"
+#define FIMC_LITE_MAX_DEVS	2
+#define FLITE_REQ_BUFS_MIN	2
+
+/* Bit index definitions for struct fimc_lite::state */
+enum {
+	ST_FLITE_LPM,
+	ST_FLITE_PENDING,
+	ST_FLITE_RUN,
+	ST_FLITE_STREAM,
+	ST_FLITE_SUSPENDED,
+	ST_FLITE_OFF,
+	ST_FLITE_IN_USE,
+	ST_FLITE_CONFIG,
+	ST_SENSOR_STREAM,
+};
+
+#define FLITE_SD_PAD_SINK	0
+#define FLITE_SD_PAD_SOURCE	1
+#define FLITE_SD_PADS_NUM	2
+
+struct flite_variant {
+	unsigned short max_width;
+	unsigned short max_height;
+	unsigned short out_width_align;
+	unsigned short win_hor_offs_align;
+	unsigned short out_hor_offs_align;
+};
+
+struct flite_drvdata {
+	struct flite_variant *variant[FIMC_LITE_MAX_DEVS];
+};
+
+#define fimc_lite_get_drvdata(_pdev) \
+	((struct flite_drvdata *) platform_get_device_id(_pdev)->driver_data)
+
+struct fimc_lite_events {
+	unsigned int data_overflow;
+};
+
+#define FLITE_MAX_PLANES	1
+
+/**
+ * struct flite_frame - source/target frame properties
+ * @f_width: full pixel width
+ * @f_height: full pixel height
+ * @rect: crop/composition rectangle
+ */
+struct flite_frame {
+	u16 f_width;
+	u16 f_height;
+	struct v4l2_rect rect;
+};
+
+/**
+ * struct flite_buffer - video buffer structure
+ * @vb:    vb2 buffer
+ * @list:  list head for the buffers queue
+ * @paddr: precalculated physical address
+ */
+struct flite_buffer {
+	struct vb2_buffer vb;
+	struct list_head list;
+	dma_addr_t paddr;
+};
+
+/**
+ * struct fimc_lite - fimc lite structure
+ * @pdev: pointer to FIMC-LITE platform device
+ * @variant: variant information for this IP
+ * @v4l2_dev: pointer to top the level v4l2_device
+ * @vfd: video device node
+ * @fh: v4l2 file handle
+ * @alloc_ctx: videobuf2 memory allocator context
+ * @subdev: FIMC-LITE subdev
+ * @vd_pad: media (sink) pad for the capture video node
+ * @subdev_pads: the subdev media pads
+ * @ctrl_handler: v4l2 control handler
+ * @test_pattern: test pattern controls
+ * @index: FIMC-LITE platform device index
+ * @pipeline: video capture pipeline data structure
+ * @slock: spinlock protecting this data structure and the hw registers
+ * @lock: mutex serializing video device and the subdev operations
+ * @clock: FIMC-LITE gate clock
+ * @regs: memory mapped io registers
+ * @irq_queue: interrupt handler waitqueue
+ * @fmt: pointer to color format description structure
+ * @payload: image size in bytes (w x h x bpp)
+ * @inp_frame: camera input frame structure
+ * @out_frame: DMA output frame structure
+ * @out_path: output data path (DMA or FIFO)
+ * @source_subdev_grp_id: source subdev group id
+ * @state: driver state flags
+ * @pending_buf_q: pending buffers queue head
+ * @active_buf_q: the queue head of buffers scheduled in hardware
+ * @vb_queue: vb2 buffers queue
+ * @active_buf_count: number of video buffers scheduled in hardware
+ * @frame_count: the captured frames counter
+ * @reqbufs_count: the number of buffers requested with REQBUFS ioctl
+ * @ref_count: driver's private reference counter
+ */
+struct fimc_lite {
+	struct platform_device	*pdev;
+	struct flite_variant	*variant;
+	struct v4l2_device	*v4l2_dev;
+	struct video_device	*vfd;
+	struct v4l2_fh		fh;
+	struct vb2_alloc_ctx	*alloc_ctx;
+	struct v4l2_subdev	subdev;
+	struct media_pad	vd_pad;
+	struct media_pad	subdev_pads[FLITE_SD_PADS_NUM];
+	struct v4l2_ctrl_handler ctrl_handler;
+	struct v4l2_ctrl	*test_pattern;
+	u32			index;
+	struct fimc_pipeline	pipeline;
+
+	struct mutex		lock;
+	spinlock_t		slock;
+
+	struct clk		*clock;
+	void __iomem		*regs;
+	wait_queue_head_t	irq_queue;
+
+	const struct fimc_fmt	*fmt;
+	unsigned long		payload[FLITE_MAX_PLANES];
+	struct flite_frame	inp_frame;
+	struct flite_frame	out_frame;
+	enum fimc_datapath	out_path;
+	unsigned int		source_subdev_grp_id;
+
+	unsigned long		state;
+	struct list_head	pending_buf_q;
+	struct list_head	active_buf_q;
+	struct vb2_queue	vb_queue;
+	unsigned int		frame_count;
+	unsigned int		reqbufs_count;
+	int			ref_count;
+
+	struct fimc_lite_events	events;
+};
+
+static inline bool fimc_lite_active(struct fimc_lite *fimc)
+{
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(&fimc->slock, flags);
+	ret = fimc->state & (1 << ST_FLITE_RUN) ||
+		fimc->state & (1 << ST_FLITE_PENDING);
+	spin_unlock_irqrestore(&fimc->slock, flags);
+	return ret;
+}
+
+static inline void fimc_lite_active_queue_add(struct fimc_lite *dev,
+					 struct flite_buffer *buf)
+{
+	list_add_tail(&buf->list, &dev->active_buf_q);
+}
+
+static inline struct flite_buffer *fimc_lite_active_queue_pop(
+					struct fimc_lite *dev)
+{
+	struct flite_buffer *buf = list_entry(dev->active_buf_q.next,
+					      struct flite_buffer, list);
+	list_del(&buf->list);
+	return buf;
+}
+
+static inline void fimc_lite_pending_queue_add(struct fimc_lite *dev,
+					struct flite_buffer *buf)
+{
+	list_add_tail(&buf->list, &dev->pending_buf_q);
+}
+
+static inline struct flite_buffer *fimc_lite_pending_queue_pop(
+					struct fimc_lite *dev)
+{
+	struct flite_buffer *buf = list_entry(dev->pending_buf_q.next,
+					      struct flite_buffer, list);
+	list_del(&buf->list);
+	return buf;
+}
+
+#endif /* FIMC_LITE_H_ */
diff --git a/drivers/media/platform/s5p-fimc/fimc-m2m.c b/drivers/media/platform/s5p-fimc/fimc-m2m.c
new file mode 100644
index 0000000..c587011
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/fimc-m2m.c
@@ -0,0 +1,854 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series FIMC (video postprocessor) driver
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "fimc-core.h"
+#include "fimc-reg.h"
+#include "fimc-mdevice.h"
+
+
+static unsigned int get_m2m_fmt_flags(unsigned int stream_type)
+{
+	if (stream_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		return FMT_FLAGS_M2M_IN;
+	else
+		return FMT_FLAGS_M2M_OUT;
+}
+
+void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
+{
+	struct vb2_buffer *src_vb, *dst_vb;
+
+	if (!ctx || !ctx->m2m_ctx)
+		return;
+
+	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+	if (src_vb && dst_vb) {
+		v4l2_m2m_buf_done(src_vb, vb_state);
+		v4l2_m2m_buf_done(dst_vb, vb_state);
+		v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
+				    ctx->m2m_ctx);
+	}
+}
+
+/* Complete the transaction which has been scheduled for execution. */
+static int fimc_m2m_shutdown(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	int ret;
+
+	if (!fimc_m2m_pending(fimc))
+		return 0;
+
+	fimc_ctx_state_set(FIMC_CTX_SHUT, ctx);
+
+	ret = wait_event_timeout(fimc->irq_queue,
+			   !fimc_ctx_state_is_set(FIMC_CTX_SHUT, ctx),
+			   FIMC_SHUTDOWN_TIMEOUT);
+
+	return ret == 0 ? -ETIMEDOUT : ret;
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct fimc_ctx *ctx = q->drv_priv;
+	int ret;
+
+	ret = pm_runtime_get_sync(&ctx->fimc_dev->pdev->dev);
+	return ret > 0 ? 0 : ret;
+}
+
+static int stop_streaming(struct vb2_queue *q)
+{
+	struct fimc_ctx *ctx = q->drv_priv;
+	int ret;
+
+	ret = fimc_m2m_shutdown(ctx);
+	if (ret == -ETIMEDOUT)
+		fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+
+	pm_runtime_put(&ctx->fimc_dev->pdev->dev);
+	return 0;
+}
+
+static void fimc_device_run(void *priv)
+{
+	struct vb2_buffer *vb = NULL;
+	struct fimc_ctx *ctx = priv;
+	struct fimc_frame *sf, *df;
+	struct fimc_dev *fimc;
+	unsigned long flags;
+	u32 ret;
+
+	if (WARN(!ctx, "Null context\n"))
+		return;
+
+	fimc = ctx->fimc_dev;
+	spin_lock_irqsave(&fimc->slock, flags);
+
+	set_bit(ST_M2M_PEND, &fimc->state);
+	sf = &ctx->s_frame;
+	df = &ctx->d_frame;
+
+	if (ctx->state & FIMC_PARAMS) {
+		/* Prepare the DMA offsets for scaler */
+		fimc_prepare_dma_offset(ctx, sf);
+		fimc_prepare_dma_offset(ctx, df);
+	}
+
+	vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+	ret = fimc_prepare_addr(ctx, vb, sf, &sf->paddr);
+	if (ret)
+		goto dma_unlock;
+
+	vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+	ret = fimc_prepare_addr(ctx, vb, df, &df->paddr);
+	if (ret)
+		goto dma_unlock;
+
+	/* Reconfigure hardware if the context has changed. */
+	if (fimc->m2m.ctx != ctx) {
+		ctx->state |= FIMC_PARAMS;
+		fimc->m2m.ctx = ctx;
+	}
+
+	if (ctx->state & FIMC_PARAMS) {
+		fimc_set_yuv_order(ctx);
+		fimc_hw_set_input_path(ctx);
+		fimc_hw_set_in_dma(ctx);
+		ret = fimc_set_scaler_info(ctx);
+		if (ret)
+			goto dma_unlock;
+		fimc_hw_set_prescaler(ctx);
+		fimc_hw_set_mainscaler(ctx);
+		fimc_hw_set_target_format(ctx);
+		fimc_hw_set_rotation(ctx);
+		fimc_hw_set_effect(ctx);
+		fimc_hw_set_out_dma(ctx);
+		if (fimc->variant->has_alpha)
+			fimc_hw_set_rgb_alpha(ctx);
+		fimc_hw_set_output_path(ctx);
+	}
+	fimc_hw_set_input_addr(fimc, &sf->paddr);
+	fimc_hw_set_output_addr(fimc, &df->paddr, -1);
+
+	fimc_activate_capture(ctx);
+	ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP |
+		       FIMC_SRC_FMT | FIMC_DST_FMT);
+	fimc_hw_activate_input_dma(fimc, true);
+
+dma_unlock:
+	spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static void fimc_job_abort(void *priv)
+{
+	fimc_m2m_shutdown(priv);
+}
+
+static int fimc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+			    unsigned int *num_buffers, unsigned int *num_planes,
+			    unsigned int sizes[], void *allocators[])
+{
+	struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+	struct fimc_frame *f;
+	int i;
+
+	f = ctx_get_frame(ctx, vq->type);
+	if (IS_ERR(f))
+		return PTR_ERR(f);
+	/*
+	 * Return number of non-contigous planes (plane buffers)
+	 * depending on the configured color format.
+	 */
+	if (!f->fmt)
+		return -EINVAL;
+
+	*num_planes = f->fmt->memplanes;
+	for (i = 0; i < f->fmt->memplanes; i++) {
+		sizes[i] = (f->f_width * f->f_height * f->fmt->depth[i]) / 8;
+		allocators[i] = ctx->fimc_dev->alloc_ctx;
+	}
+	return 0;
+}
+
+static int fimc_buf_prepare(struct vb2_buffer *vb)
+{
+	struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct fimc_frame *frame;
+	int i;
+
+	frame = ctx_get_frame(ctx, vb->vb2_queue->type);
+	if (IS_ERR(frame))
+		return PTR_ERR(frame);
+
+	for (i = 0; i < frame->fmt->memplanes; i++)
+		vb2_set_plane_payload(vb, i, frame->payload[i]);
+
+	return 0;
+}
+
+static void fimc_buf_queue(struct vb2_buffer *vb)
+{
+	struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+	dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
+
+	if (ctx->m2m_ctx)
+		v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static void fimc_lock(struct vb2_queue *vq)
+{
+	struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+	mutex_lock(&ctx->fimc_dev->lock);
+}
+
+static void fimc_unlock(struct vb2_queue *vq)
+{
+	struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+	mutex_unlock(&ctx->fimc_dev->lock);
+}
+
+static struct vb2_ops fimc_qops = {
+	.queue_setup	 = fimc_queue_setup,
+	.buf_prepare	 = fimc_buf_prepare,
+	.buf_queue	 = fimc_buf_queue,
+	.wait_prepare	 = fimc_unlock,
+	.wait_finish	 = fimc_lock,
+	.stop_streaming	 = stop_streaming,
+	.start_streaming = start_streaming,
+};
+
+/*
+ * V4L2 ioctl handlers
+ */
+static int fimc_m2m_querycap(struct file *file, void *fh,
+			     struct v4l2_capability *cap)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(fh);
+	struct fimc_dev *fimc = ctx->fimc_dev;
+
+	strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
+	strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
+	cap->bus_info[0] = 0;
+	/*
+	 * This is only a mem-to-mem video device. The capture and output
+	 * device capability flags are left only for backward compatibility
+	 * and are scheduled for removal.
+	 */
+	cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE |
+		V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+
+	return 0;
+}
+
+static int fimc_m2m_enum_fmt_mplane(struct file *file, void *priv,
+				    struct v4l2_fmtdesc *f)
+{
+	struct fimc_fmt *fmt;
+
+	fmt = fimc_find_format(NULL, NULL, get_m2m_fmt_flags(f->type),
+			       f->index);
+	if (!fmt)
+		return -EINVAL;
+
+	strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+	f->pixelformat = fmt->fourcc;
+	return 0;
+}
+
+static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
+				 struct v4l2_format *f)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(fh);
+	struct fimc_frame *frame = ctx_get_frame(ctx, f->type);
+
+	if (IS_ERR(frame))
+		return PTR_ERR(frame);
+
+	return fimc_fill_format(frame, f);
+}
+
+static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
+{
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	struct fimc_variant *variant = fimc->variant;
+	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+	struct fimc_fmt *fmt;
+	u32 max_w, mod_x, mod_y;
+
+	if (!IS_M2M(f->type))
+		return -EINVAL;
+
+	dbg("w: %d, h: %d", pix->width, pix->height);
+
+	fmt = fimc_find_format(&pix->pixelformat, NULL,
+			       get_m2m_fmt_flags(f->type), 0);
+	if (WARN(fmt == NULL, "Pixel format lookup failed"))
+		return -EINVAL;
+
+	if (pix->field == V4L2_FIELD_ANY)
+		pix->field = V4L2_FIELD_NONE;
+	else if (pix->field != V4L2_FIELD_NONE)
+		return -EINVAL;
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		max_w = variant->pix_limit->scaler_dis_w;
+		mod_x = ffs(variant->min_inp_pixsize) - 1;
+	} else {
+		max_w = variant->pix_limit->out_rot_dis_w;
+		mod_x = ffs(variant->min_out_pixsize) - 1;
+	}
+
+	if (tiled_fmt(fmt)) {
+		mod_x = 6; /* 64 x 32 pixels tile */
+		mod_y = 5;
+	} else {
+		if (variant->min_vsize_align == 1)
+			mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
+		else
+			mod_y = ffs(variant->min_vsize_align) - 1;
+	}
+
+	v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
+		&pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
+
+	fimc_adjust_mplane_format(fmt, pix->width, pix->height, &f->fmt.pix_mp);
+	return 0;
+}
+
+static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
+				   struct v4l2_format *f)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+	return fimc_try_fmt_mplane(ctx, f);
+}
+
+static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
+				 struct v4l2_format *f)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(fh);
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	struct vb2_queue *vq;
+	struct fimc_frame *frame;
+	struct v4l2_pix_format_mplane *pix;
+	int i, ret = 0;
+
+	ret = fimc_try_fmt_mplane(ctx, f);
+	if (ret)
+		return ret;
+
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+
+	if (vb2_is_busy(vq)) {
+		v4l2_err(fimc->m2m.vfd, "queue (%d) busy\n", f->type);
+		return -EBUSY;
+	}
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		frame = &ctx->s_frame;
+	else
+		frame = &ctx->d_frame;
+
+	pix = &f->fmt.pix_mp;
+	frame->fmt = fimc_find_format(&pix->pixelformat, NULL,
+				      get_m2m_fmt_flags(f->type), 0);
+	if (!frame->fmt)
+		return -EINVAL;
+
+	/* Update RGB Alpha control state and value range */
+	fimc_alpha_ctrl_update(ctx);
+
+	for (i = 0; i < frame->fmt->colplanes; i++) {
+		frame->payload[i] =
+			(pix->width * pix->height * frame->fmt->depth[i]) / 8;
+	}
+
+	fimc_fill_frame(frame, f);
+
+	ctx->scaler.enabled = 1;
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		fimc_ctx_state_set(FIMC_PARAMS | FIMC_DST_FMT, ctx);
+	else
+		fimc_ctx_state_set(FIMC_PARAMS | FIMC_SRC_FMT, ctx);
+
+	dbg("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
+
+	return 0;
+}
+
+static int fimc_m2m_reqbufs(struct file *file, void *fh,
+			    struct v4l2_requestbuffers *reqbufs)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int fimc_m2m_querybuf(struct file *file, void *fh,
+			     struct v4l2_buffer *buf)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+	return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int fimc_m2m_qbuf(struct file *file, void *fh,
+			 struct v4l2_buffer *buf)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+	return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int fimc_m2m_dqbuf(struct file *file, void *fh,
+			  struct v4l2_buffer *buf)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int fimc_m2m_streamon(struct file *file, void *fh,
+			     enum v4l2_buf_type type)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+	/* The source and target color format need to be set */
+	if (V4L2_TYPE_IS_OUTPUT(type)) {
+		if (!fimc_ctx_state_is_set(FIMC_SRC_FMT, ctx))
+			return -EINVAL;
+	} else if (!fimc_ctx_state_is_set(FIMC_DST_FMT, ctx)) {
+		return -EINVAL;
+	}
+
+	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int fimc_m2m_streamoff(struct file *file, void *fh,
+			    enum v4l2_buf_type type)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+	return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static int fimc_m2m_cropcap(struct file *file, void *fh,
+			    struct v4l2_cropcap *cr)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(fh);
+	struct fimc_frame *frame;
+
+	frame = ctx_get_frame(ctx, cr->type);
+	if (IS_ERR(frame))
+		return PTR_ERR(frame);
+
+	cr->bounds.left = 0;
+	cr->bounds.top = 0;
+	cr->bounds.width = frame->o_width;
+	cr->bounds.height = frame->o_height;
+	cr->defrect = cr->bounds;
+
+	return 0;
+}
+
+static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(fh);
+	struct fimc_frame *frame;
+
+	frame = ctx_get_frame(ctx, cr->type);
+	if (IS_ERR(frame))
+		return PTR_ERR(frame);
+
+	cr->c.left = frame->offs_h;
+	cr->c.top = frame->offs_v;
+	cr->c.width = frame->width;
+	cr->c.height = frame->height;
+
+	return 0;
+}
+
+static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
+{
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	struct fimc_frame *f;
+	u32 min_size, halign, depth = 0;
+	int i;
+
+	if (cr->c.top < 0 || cr->c.left < 0) {
+		v4l2_err(fimc->m2m.vfd,
+			"doesn't support negative values for top & left\n");
+		return -EINVAL;
+	}
+	if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		f = &ctx->d_frame;
+	else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		f = &ctx->s_frame;
+	else
+		return -EINVAL;
+
+	min_size = (f == &ctx->s_frame) ?
+		fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
+
+	/* Get pixel alignment constraints. */
+	if (fimc->variant->min_vsize_align == 1)
+		halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
+	else
+		halign = ffs(fimc->variant->min_vsize_align) - 1;
+
+	for (i = 0; i < f->fmt->colplanes; i++)
+		depth += f->fmt->depth[i];
+
+	v4l_bound_align_image(&cr->c.width, min_size, f->o_width,
+			      ffs(min_size) - 1,
+			      &cr->c.height, min_size, f->o_height,
+			      halign, 64/(ALIGN(depth, 8)));
+
+	/* adjust left/top if cropping rectangle is out of bounds */
+	if (cr->c.left + cr->c.width > f->o_width)
+		cr->c.left = f->o_width - cr->c.width;
+	if (cr->c.top + cr->c.height > f->o_height)
+		cr->c.top = f->o_height - cr->c.height;
+
+	cr->c.left = round_down(cr->c.left, min_size);
+	cr->c.top  = round_down(cr->c.top, fimc->variant->hor_offs_align);
+
+	dbg("l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
+	    cr->c.left, cr->c.top, cr->c.width, cr->c.height,
+	    f->f_width, f->f_height);
+
+	return 0;
+}
+
+static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(fh);
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	struct fimc_frame *f;
+	int ret;
+
+	ret = fimc_m2m_try_crop(ctx, cr);
+	if (ret)
+		return ret;
+
+	f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
+		&ctx->s_frame : &ctx->d_frame;
+
+	/* Check to see if scaling ratio is within supported range */
+	if (fimc_ctx_state_is_set(FIMC_DST_FMT | FIMC_SRC_FMT, ctx)) {
+		if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+			ret = fimc_check_scaler_ratio(ctx, cr->c.width,
+					cr->c.height, ctx->d_frame.width,
+					ctx->d_frame.height, ctx->rotation);
+		} else {
+			ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
+					ctx->s_frame.height, cr->c.width,
+					cr->c.height, ctx->rotation);
+		}
+		if (ret) {
+			v4l2_err(fimc->m2m.vfd, "Out of scaler range\n");
+			return -EINVAL;
+		}
+	}
+
+	f->offs_h = cr->c.left;
+	f->offs_v = cr->c.top;
+	f->width  = cr->c.width;
+	f->height = cr->c.height;
+
+	fimc_ctx_state_set(FIMC_PARAMS, ctx);
+
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
+	.vidioc_querycap		= fimc_m2m_querycap,
+	.vidioc_enum_fmt_vid_cap_mplane	= fimc_m2m_enum_fmt_mplane,
+	.vidioc_enum_fmt_vid_out_mplane	= fimc_m2m_enum_fmt_mplane,
+	.vidioc_g_fmt_vid_cap_mplane	= fimc_m2m_g_fmt_mplane,
+	.vidioc_g_fmt_vid_out_mplane	= fimc_m2m_g_fmt_mplane,
+	.vidioc_try_fmt_vid_cap_mplane	= fimc_m2m_try_fmt_mplane,
+	.vidioc_try_fmt_vid_out_mplane	= fimc_m2m_try_fmt_mplane,
+	.vidioc_s_fmt_vid_cap_mplane	= fimc_m2m_s_fmt_mplane,
+	.vidioc_s_fmt_vid_out_mplane	= fimc_m2m_s_fmt_mplane,
+	.vidioc_reqbufs			= fimc_m2m_reqbufs,
+	.vidioc_querybuf		= fimc_m2m_querybuf,
+	.vidioc_qbuf			= fimc_m2m_qbuf,
+	.vidioc_dqbuf			= fimc_m2m_dqbuf,
+	.vidioc_streamon		= fimc_m2m_streamon,
+	.vidioc_streamoff		= fimc_m2m_streamoff,
+	.vidioc_g_crop			= fimc_m2m_g_crop,
+	.vidioc_s_crop			= fimc_m2m_s_crop,
+	.vidioc_cropcap			= fimc_m2m_cropcap
+
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+		      struct vb2_queue *dst_vq)
+{
+	struct fimc_ctx *ctx = priv;
+	int ret;
+
+	memset(src_vq, 0, sizeof(*src_vq));
+	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+	src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+	src_vq->drv_priv = ctx;
+	src_vq->ops = &fimc_qops;
+	src_vq->mem_ops = &vb2_dma_contig_memops;
+	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+
+	ret = vb2_queue_init(src_vq);
+	if (ret)
+		return ret;
+
+	memset(dst_vq, 0, sizeof(*dst_vq));
+	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+	dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+	dst_vq->drv_priv = ctx;
+	dst_vq->ops = &fimc_qops;
+	dst_vq->mem_ops = &vb2_dma_contig_memops;
+	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+
+	return vb2_queue_init(dst_vq);
+}
+
+static int fimc_m2m_open(struct file *file)
+{
+	struct fimc_dev *fimc = video_drvdata(file);
+	struct fimc_ctx *ctx;
+	int ret = -EBUSY;
+
+	dbg("pid: %d, state: 0x%lx, refcnt: %d",
+	    task_pid_nr(current), fimc->state, fimc->vid_cap.refcnt);
+
+	if (mutex_lock_interruptible(&fimc->lock))
+		return -ERESTARTSYS;
+	/*
+	 * Return if the corresponding video capture node
+	 * is already opened.
+	 */
+	if (fimc->vid_cap.refcnt > 0)
+		goto unlock;
+
+	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+	if (!ctx) {
+		ret = -ENOMEM;
+		goto unlock;
+	}
+	v4l2_fh_init(&ctx->fh, fimc->m2m.vfd);
+	ctx->fimc_dev = fimc;
+
+	/* Default color format */
+	ctx->s_frame.fmt = fimc_get_format(0);
+	ctx->d_frame.fmt = fimc_get_format(0);
+
+	ret = fimc_ctrls_create(ctx);
+	if (ret)
+		goto error_fh;
+
+	/* Use separate control handler per file handle */
+	ctx->fh.ctrl_handler = &ctx->ctrls.handler;
+	file->private_data = &ctx->fh;
+	v4l2_fh_add(&ctx->fh);
+
+	/* Setup the device context for memory-to-memory mode */
+	ctx->state = FIMC_CTX_M2M;
+	ctx->flags = 0;
+	ctx->in_path = FIMC_IO_DMA;
+	ctx->out_path = FIMC_IO_DMA;
+
+	ctx->m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
+	if (IS_ERR(ctx->m2m_ctx)) {
+		ret = PTR_ERR(ctx->m2m_ctx);
+		goto error_c;
+	}
+
+	if (fimc->m2m.refcnt++ == 0)
+		set_bit(ST_M2M_RUN, &fimc->state);
+
+	mutex_unlock(&fimc->lock);
+	return 0;
+
+error_c:
+	fimc_ctrls_delete(ctx);
+error_fh:
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx);
+unlock:
+	mutex_unlock(&fimc->lock);
+	return ret;
+}
+
+static int fimc_m2m_release(struct file *file)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+	struct fimc_dev *fimc = ctx->fimc_dev;
+
+	dbg("pid: %d, state: 0x%lx, refcnt= %d",
+		task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
+
+	if (mutex_lock_interruptible(&fimc->lock))
+		return -ERESTARTSYS;
+
+	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+	fimc_ctrls_delete(ctx);
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+
+	if (--fimc->m2m.refcnt <= 0)
+		clear_bit(ST_M2M_RUN, &fimc->state);
+	kfree(ctx);
+
+	mutex_unlock(&fimc->lock);
+	return 0;
+}
+
+static unsigned int fimc_m2m_poll(struct file *file,
+				  struct poll_table_struct *wait)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	int ret;
+
+	if (mutex_lock_interruptible(&fimc->lock))
+		return -ERESTARTSYS;
+
+	ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+	mutex_unlock(&fimc->lock);
+
+	return ret;
+}
+
+
+static int fimc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+	struct fimc_dev *fimc = ctx->fimc_dev;
+	int ret;
+
+	if (mutex_lock_interruptible(&fimc->lock))
+		return -ERESTARTSYS;
+
+	ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+	mutex_unlock(&fimc->lock);
+
+	return ret;
+}
+
+static const struct v4l2_file_operations fimc_m2m_fops = {
+	.owner		= THIS_MODULE,
+	.open		= fimc_m2m_open,
+	.release	= fimc_m2m_release,
+	.poll		= fimc_m2m_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= fimc_m2m_mmap,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+	.device_run	= fimc_device_run,
+	.job_abort	= fimc_job_abort,
+};
+
+int fimc_register_m2m_device(struct fimc_dev *fimc,
+			     struct v4l2_device *v4l2_dev)
+{
+	struct video_device *vfd;
+	struct platform_device *pdev;
+	int ret = 0;
+
+	if (!fimc)
+		return -ENODEV;
+
+	pdev = fimc->pdev;
+	fimc->v4l2_dev = v4l2_dev;
+
+	vfd = video_device_alloc();
+	if (!vfd) {
+		v4l2_err(v4l2_dev, "Failed to allocate video device\n");
+		return -ENOMEM;
+	}
+
+	vfd->fops = &fimc_m2m_fops;
+	vfd->ioctl_ops = &fimc_m2m_ioctl_ops;
+	vfd->v4l2_dev = v4l2_dev;
+	vfd->minor = -1;
+	vfd->release = video_device_release;
+	vfd->lock = &fimc->lock;
+
+	snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.m2m", fimc->id);
+	video_set_drvdata(vfd, fimc);
+
+	fimc->m2m.vfd = vfd;
+	fimc->m2m.m2m_dev = v4l2_m2m_init(&m2m_ops);
+	if (IS_ERR(fimc->m2m.m2m_dev)) {
+		v4l2_err(v4l2_dev, "failed to initialize v4l2-m2m device\n");
+		ret = PTR_ERR(fimc->m2m.m2m_dev);
+		goto err_init;
+	}
+
+	ret = media_entity_init(&vfd->entity, 0, NULL, 0);
+	if (ret)
+		goto err_me;
+
+	ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+	if (ret)
+		goto err_vd;
+
+	v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
+		  vfd->name, video_device_node_name(vfd));
+	return 0;
+
+err_vd:
+	media_entity_cleanup(&vfd->entity);
+err_me:
+	v4l2_m2m_release(fimc->m2m.m2m_dev);
+err_init:
+	video_device_release(fimc->m2m.vfd);
+	return ret;
+}
+
+void fimc_unregister_m2m_device(struct fimc_dev *fimc)
+{
+	if (!fimc)
+		return;
+
+	if (fimc->m2m.m2m_dev)
+		v4l2_m2m_release(fimc->m2m.m2m_dev);
+	if (fimc->m2m.vfd) {
+		media_entity_cleanup(&fimc->m2m.vfd->entity);
+		/* Can also be called if video device wasn't registered */
+		video_unregister_device(fimc->m2m.vfd);
+	}
+}
diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.c b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
new file mode 100644
index 0000000..e65bb28
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
@@ -0,0 +1,1037 @@
+/*
+ * S5P/EXYNOS4 SoC series camera host interface media device driver
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <media/v4l2-ctrls.h>
+#include <media/media-device.h>
+
+#include "fimc-core.h"
+#include "fimc-lite.h"
+#include "fimc-mdevice.h"
+#include "mipi-csis.h"
+
+static int __fimc_md_set_camclk(struct fimc_md *fmd,
+				struct fimc_sensor_info *s_info,
+				bool on);
+/**
+ * fimc_pipeline_prepare - update pipeline information with subdevice pointers
+ * @fimc: fimc device terminating the pipeline
+ *
+ * Caller holds the graph mutex.
+ */
+void fimc_pipeline_prepare(struct fimc_pipeline *p, struct media_entity *me)
+{
+	struct media_pad *pad = &me->pads[0];
+	struct v4l2_subdev *sd;
+	int i;
+
+	for (i = 0; i < IDX_MAX; i++)
+		p->subdevs[i] = NULL;
+
+	while (1) {
+		if (!(pad->flags & MEDIA_PAD_FL_SINK))
+			break;
+
+		/* source pad */
+		pad = media_entity_remote_source(pad);
+		if (pad == NULL ||
+		    media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+			break;
+
+		sd = media_entity_to_v4l2_subdev(pad->entity);
+
+		switch (sd->grp_id) {
+		case SENSOR_GROUP_ID:
+			p->subdevs[IDX_SENSOR] = sd;
+			break;
+		case CSIS_GROUP_ID:
+			p->subdevs[IDX_CSIS] = sd;
+			break;
+		case FLITE_GROUP_ID:
+			p->subdevs[IDX_FLITE] = sd;
+			break;
+		case FIMC_GROUP_ID:
+			/* No need to control FIMC subdev through subdev ops */
+			break;
+		default:
+			pr_warn("%s: Unknown subdev grp_id: %#x\n",
+				__func__, sd->grp_id);
+		}
+		/* sink pad */
+		pad = &sd->entity.pads[0];
+	}
+}
+
+/**
+ * __subdev_set_power - change power state of a single subdev
+ * @sd: subdevice to change power state for
+ * @on: 1 to enable power or 0 to disable
+ *
+ * Return result of s_power subdev operation or -ENXIO if sd argument
+ * is NULL. Return 0 if the subdevice does not implement s_power.
+ */
+static int __subdev_set_power(struct v4l2_subdev *sd, int on)
+{
+	int *use_count;
+	int ret;
+
+	if (sd == NULL)
+		return -ENXIO;
+
+	use_count = &sd->entity.use_count;
+	if (on && (*use_count)++ > 0)
+		return 0;
+	else if (!on && (*use_count == 0 || --(*use_count) > 0))
+		return 0;
+	ret = v4l2_subdev_call(sd, core, s_power, on);
+
+	return ret != -ENOIOCTLCMD ? ret : 0;
+}
+
+/**
+ * fimc_pipeline_s_power - change power state of all pipeline subdevs
+ * @fimc: fimc device terminating the pipeline
+ * @state: true to power on, false to power off
+ *
+ * Needs to be called with the graph mutex held.
+ */
+int fimc_pipeline_s_power(struct fimc_pipeline *p, bool state)
+{
+	unsigned int i;
+	int ret;
+
+	if (p->subdevs[IDX_SENSOR] == NULL)
+		return -ENXIO;
+
+	for (i = 0; i < IDX_MAX; i++) {
+		unsigned int idx = state ? (IDX_MAX - 1) - i : i;
+
+		ret = __subdev_set_power(p->subdevs[idx], state);
+		if (ret < 0 && ret != -ENXIO)
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * __fimc_pipeline_initialize - update the pipeline information, enable power
+ *                              of all pipeline subdevs and the sensor clock
+ * @me: media entity to start graph walk with
+ * @prep: true to acquire sensor (and csis) subdevs
+ *
+ * This function must be called with the graph mutex held.
+ */
+static int __fimc_pipeline_initialize(struct fimc_pipeline *p,
+				      struct media_entity *me, bool prep)
+{
+	int ret;
+
+	if (prep)
+		fimc_pipeline_prepare(p, me);
+
+	if (p->subdevs[IDX_SENSOR] == NULL)
+		return -EINVAL;
+
+	ret = fimc_md_set_camclk(p->subdevs[IDX_SENSOR], true);
+	if (ret)
+		return ret;
+
+	return fimc_pipeline_s_power(p, 1);
+}
+
+int fimc_pipeline_initialize(struct fimc_pipeline *p, struct media_entity *me,
+			     bool prep)
+{
+	int ret;
+
+	mutex_lock(&me->parent->graph_mutex);
+	ret =  __fimc_pipeline_initialize(p, me, prep);
+	mutex_unlock(&me->parent->graph_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(fimc_pipeline_initialize);
+
+/**
+ * __fimc_pipeline_shutdown - disable the sensor clock and pipeline power
+ * @fimc: fimc device terminating the pipeline
+ *
+ * Disable power of all subdevs in the pipeline and turn off the external
+ * sensor clock.
+ * Called with the graph mutex held.
+ */
+static int __fimc_pipeline_shutdown(struct fimc_pipeline *p)
+{
+	int ret = 0;
+
+	if (p->subdevs[IDX_SENSOR]) {
+		ret = fimc_pipeline_s_power(p, 0);
+		fimc_md_set_camclk(p->subdevs[IDX_SENSOR], false);
+	}
+	return ret == -ENXIO ? 0 : ret;
+}
+
+int fimc_pipeline_shutdown(struct fimc_pipeline *p)
+{
+	struct media_entity *me;
+	int ret;
+
+	if (!p || !p->subdevs[IDX_SENSOR])
+		return -EINVAL;
+
+	me = &p->subdevs[IDX_SENSOR]->entity;
+	mutex_lock(&me->parent->graph_mutex);
+	ret = __fimc_pipeline_shutdown(p);
+	mutex_unlock(&me->parent->graph_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(fimc_pipeline_shutdown);
+
+/**
+ * fimc_pipeline_s_stream - invoke s_stream on pipeline subdevs
+ * @pipeline: video pipeline structure
+ * @on: passed as the s_stream call argument
+ */
+int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
+{
+	int i, ret;
+
+	if (p->subdevs[IDX_SENSOR] == NULL)
+		return -ENODEV;
+
+	for (i = 0; i < IDX_MAX; i++) {
+		unsigned int idx = on ? (IDX_MAX - 1) - i : i;
+
+		ret = v4l2_subdev_call(p->subdevs[idx], video, s_stream, on);
+
+		if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+			return ret;
+	}
+
+	return 0;
+
+}
+EXPORT_SYMBOL_GPL(fimc_pipeline_s_stream);
+
+/*
+ * Sensor subdevice helper functions
+ */
+static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
+				   struct fimc_sensor_info *s_info)
+{
+	struct i2c_adapter *adapter;
+	struct v4l2_subdev *sd = NULL;
+
+	if (!s_info || !fmd)
+		return NULL;
+
+	adapter = i2c_get_adapter(s_info->pdata->i2c_bus_num);
+	if (!adapter) {
+		v4l2_warn(&fmd->v4l2_dev,
+			  "Failed to get I2C adapter %d, deferring probe\n",
+			  s_info->pdata->i2c_bus_num);
+		return ERR_PTR(-EPROBE_DEFER);
+	}
+	sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter,
+				       s_info->pdata->board_info, NULL);
+	if (IS_ERR_OR_NULL(sd)) {
+		i2c_put_adapter(adapter);
+		v4l2_warn(&fmd->v4l2_dev,
+			  "Failed to acquire subdev %s, deferring probe\n",
+			  s_info->pdata->board_info->type);
+		return ERR_PTR(-EPROBE_DEFER);
+	}
+	v4l2_set_subdev_hostdata(sd, s_info);
+	sd->grp_id = SENSOR_GROUP_ID;
+
+	v4l2_info(&fmd->v4l2_dev, "Registered sensor subdevice %s\n",
+		  s_info->pdata->board_info->type);
+	return sd;
+}
+
+static void fimc_md_unregister_sensor(struct v4l2_subdev *sd)
+{
+	struct i2c_client *client = v4l2_get_subdevdata(sd);
+	struct i2c_adapter *adapter;
+
+	if (!client)
+		return;
+	v4l2_device_unregister_subdev(sd);
+	adapter = client->adapter;
+	i2c_unregister_device(client);
+	if (adapter)
+		i2c_put_adapter(adapter);
+}
+
+static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
+{
+	struct s5p_platform_fimc *pdata = fmd->pdev->dev.platform_data;
+	struct fimc_dev *fd = NULL;
+	int num_clients, ret, i;
+
+	/*
+	 * Runtime resume one of the FIMC entities to make sure
+	 * the sclk_cam clocks are not globally disabled.
+	 */
+	for (i = 0; !fd && i < ARRAY_SIZE(fmd->fimc); i++)
+		if (fmd->fimc[i])
+			fd = fmd->fimc[i];
+	if (!fd)
+		return -ENXIO;
+	ret = pm_runtime_get_sync(&fd->pdev->dev);
+	if (ret < 0)
+		return ret;
+
+	WARN_ON(pdata->num_clients > ARRAY_SIZE(fmd->sensor));
+	num_clients = min_t(u32, pdata->num_clients, ARRAY_SIZE(fmd->sensor));
+
+	fmd->num_sensors = num_clients;
+	for (i = 0; i < num_clients; i++) {
+		struct v4l2_subdev *sd;
+
+		fmd->sensor[i].pdata = &pdata->isp_info[i];
+		ret = __fimc_md_set_camclk(fmd, &fmd->sensor[i], true);
+		if (ret)
+			break;
+		sd = fimc_md_register_sensor(fmd, &fmd->sensor[i]);
+		ret = __fimc_md_set_camclk(fmd, &fmd->sensor[i], false);
+
+		if (!IS_ERR(sd)) {
+			fmd->sensor[i].subdev = sd;
+		} else {
+			fmd->sensor[i].subdev = NULL;
+			ret = PTR_ERR(sd);
+			break;
+		}
+		if (ret)
+			break;
+	}
+	pm_runtime_put(&fd->pdev->dev);
+	return ret;
+}
+
+/*
+ * MIPI CSIS and FIMC platform devices registration.
+ */
+static int fimc_register_callback(struct device *dev, void *p)
+{
+	struct fimc_dev *fimc = dev_get_drvdata(dev);
+	struct v4l2_subdev *sd = &fimc->vid_cap.subdev;
+	struct fimc_md *fmd = p;
+	int ret = 0;
+
+	if (!fimc || !fimc->pdev)
+		return 0;
+
+	if (fimc->pdev->id < 0 || fimc->pdev->id >= FIMC_MAX_DEVS)
+		return 0;
+
+	fmd->fimc[fimc->pdev->id] = fimc;
+	sd->grp_id = FIMC_GROUP_ID;
+
+	ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+	if (ret) {
+		v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.%d (%d)\n",
+			 fimc->id, ret);
+	}
+
+	return ret;
+}
+
+static int fimc_lite_register_callback(struct device *dev, void *p)
+{
+	struct fimc_lite *fimc = dev_get_drvdata(dev);
+	struct v4l2_subdev *sd = &fimc->subdev;
+	struct fimc_md *fmd = p;
+	int ret;
+
+	if (fimc == NULL)
+		return 0;
+
+	if (fimc->index >= FIMC_LITE_MAX_DEVS)
+		return 0;
+
+	fmd->fimc_lite[fimc->index] = fimc;
+	sd->grp_id = FLITE_GROUP_ID;
+
+	ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+	if (ret) {
+		v4l2_err(&fmd->v4l2_dev,
+			 "Failed to register FIMC-LITE.%d (%d)\n",
+			 fimc->index, ret);
+	}
+	return ret;
+}
+
+static int csis_register_callback(struct device *dev, void *p)
+{
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct platform_device *pdev;
+	struct fimc_md *fmd = p;
+	int id, ret;
+
+	if (!sd)
+		return 0;
+	pdev = v4l2_get_subdevdata(sd);
+	if (!pdev || pdev->id < 0 || pdev->id >= CSIS_MAX_ENTITIES)
+		return 0;
+	v4l2_info(sd, "csis%d sd: %s\n", pdev->id, sd->name);
+
+	id = pdev->id < 0 ? 0 : pdev->id;
+	fmd->csis[id].sd = sd;
+	sd->grp_id = CSIS_GROUP_ID;
+	ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+	if (ret)
+		v4l2_err(&fmd->v4l2_dev,
+			 "Failed to register CSIS subdevice: %d\n", ret);
+	return ret;
+}
+
+/**
+ * fimc_md_register_platform_entities - register FIMC and CSIS media entities
+ */
+static int fimc_md_register_platform_entities(struct fimc_md *fmd)
+{
+	struct s5p_platform_fimc *pdata = fmd->pdev->dev.platform_data;
+	struct device_driver *driver;
+	int ret, i;
+
+	driver = driver_find(FIMC_MODULE_NAME, &platform_bus_type);
+	if (!driver) {
+		v4l2_warn(&fmd->v4l2_dev,
+			 "%s driver not found, deffering probe\n",
+			 FIMC_MODULE_NAME);
+		return -EPROBE_DEFER;
+	}
+
+	ret = driver_for_each_device(driver, NULL, fmd,
+				     fimc_register_callback);
+	if (ret)
+		return ret;
+
+	driver = driver_find(FIMC_LITE_DRV_NAME, &platform_bus_type);
+	if (driver && try_module_get(driver->owner)) {
+		ret = driver_for_each_device(driver, NULL, fmd,
+					     fimc_lite_register_callback);
+		if (ret)
+			return ret;
+		module_put(driver->owner);
+	}
+	/*
+	 * Check if there is any sensor on the MIPI-CSI2 bus and
+	 * if not skip the s5p-csis module loading.
+	 */
+	if (pdata == NULL)
+		return 0;
+	for (i = 0; i < pdata->num_clients; i++) {
+		if (pdata->isp_info[i].bus_type == FIMC_MIPI_CSI2) {
+			ret = 1;
+			break;
+		}
+	}
+	if (!ret)
+		return 0;
+
+	driver = driver_find(CSIS_DRIVER_NAME, &platform_bus_type);
+	if (!driver || !try_module_get(driver->owner)) {
+		v4l2_warn(&fmd->v4l2_dev,
+			 "%s driver not found, deffering probe\n",
+			 CSIS_DRIVER_NAME);
+		return -EPROBE_DEFER;
+	}
+
+	return driver_for_each_device(driver, NULL, fmd,
+				      csis_register_callback);
+}
+
+static void fimc_md_unregister_entities(struct fimc_md *fmd)
+{
+	int i;
+
+	for (i = 0; i < FIMC_MAX_DEVS; i++) {
+		if (fmd->fimc[i] == NULL)
+			continue;
+		v4l2_device_unregister_subdev(&fmd->fimc[i]->vid_cap.subdev);
+		fmd->fimc[i] = NULL;
+	}
+	for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+		if (fmd->fimc_lite[i] == NULL)
+			continue;
+		v4l2_device_unregister_subdev(&fmd->fimc_lite[i]->subdev);
+		fmd->fimc_lite[i] = NULL;
+	}
+	for (i = 0; i < CSIS_MAX_ENTITIES; i++) {
+		if (fmd->csis[i].sd == NULL)
+			continue;
+		v4l2_device_unregister_subdev(fmd->csis[i].sd);
+		module_put(fmd->csis[i].sd->owner);
+		fmd->csis[i].sd = NULL;
+	}
+	for (i = 0; i < fmd->num_sensors; i++) {
+		if (fmd->sensor[i].subdev == NULL)
+			continue;
+		fimc_md_unregister_sensor(fmd->sensor[i].subdev);
+		fmd->sensor[i].subdev = NULL;
+	}
+}
+
+/**
+ * __fimc_md_create_fimc_links - create links to all FIMC entities
+ * @fmd: fimc media device
+ * @source: the source entity to create links to all fimc entities from
+ * @sensor: sensor subdev linked to FIMC[fimc_id] entity, may be null
+ * @pad: the source entity pad index
+ * @link_mask: bitmask of the fimc devices for which link should be enabled
+ */
+static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
+					    struct media_entity *source,
+					    struct v4l2_subdev *sensor,
+					    int pad, int link_mask)
+{
+	struct fimc_sensor_info *s_info;
+	struct media_entity *sink;
+	unsigned int flags = 0;
+	int ret, i;
+
+	for (i = 0; i < FIMC_MAX_DEVS; i++) {
+		if (!fmd->fimc[i])
+			continue;
+		/*
+		 * Some FIMC variants are not fitted with camera capture
+		 * interface. Skip creating a link from sensor for those.
+		 */
+		if (!fmd->fimc[i]->variant->has_cam_if)
+			continue;
+
+		flags = ((1 << i) & link_mask) ? MEDIA_LNK_FL_ENABLED : 0;
+
+		sink = &fmd->fimc[i]->vid_cap.subdev.entity;
+		ret = media_entity_create_link(source, pad, sink,
+					      FIMC_SD_PAD_SINK, flags);
+		if (ret)
+			return ret;
+
+		/* Notify FIMC capture subdev entity */
+		ret = media_entity_call(sink, link_setup, &sink->pads[0],
+					&source->pads[pad], flags);
+		if (ret)
+			break;
+
+		v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]",
+			  source->name, flags ? '=' : '-', sink->name);
+
+		if (flags == 0 || sensor == NULL)
+			continue;
+		s_info = v4l2_get_subdev_hostdata(sensor);
+		if (!WARN_ON(s_info == NULL)) {
+			unsigned long irq_flags;
+			spin_lock_irqsave(&fmd->slock, irq_flags);
+			s_info->host = fmd->fimc[i];
+			spin_unlock_irqrestore(&fmd->slock, irq_flags);
+		}
+	}
+
+	for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+		if (!fmd->fimc_lite[i])
+			continue;
+
+		if (link_mask & (1 << (i + FIMC_MAX_DEVS)))
+			flags = MEDIA_LNK_FL_ENABLED;
+		else
+			flags = 0;
+
+		sink = &fmd->fimc_lite[i]->subdev.entity;
+		ret = media_entity_create_link(source, pad, sink,
+					       FLITE_SD_PAD_SINK, flags);
+		if (ret)
+			return ret;
+
+		/* Notify FIMC-LITE subdev entity */
+		ret = media_entity_call(sink, link_setup, &sink->pads[0],
+					&source->pads[pad], flags);
+		if (ret)
+			break;
+
+		v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]",
+			  source->name, flags ? '=' : '-', sink->name);
+	}
+	return 0;
+}
+
+/* Create links from FIMC-LITE source pads to other entities */
+static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
+{
+	struct media_entity *source, *sink;
+	unsigned int flags = MEDIA_LNK_FL_ENABLED;
+	int i, ret;
+
+	for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+		struct fimc_lite *fimc = fmd->fimc_lite[i];
+		if (fimc == NULL)
+			continue;
+		source = &fimc->subdev.entity;
+		sink = &fimc->vfd->entity;
+		/* FIMC-LITE's subdev and video node */
+		ret = media_entity_create_link(source, FIMC_SD_PAD_SOURCE,
+					       sink, 0, flags);
+		if (ret)
+			break;
+		/* TODO: create links to other entities */
+	}
+
+	return ret;
+}
+
+/**
+ * fimc_md_create_links - create default links between registered entities
+ *
+ * Parallel interface sensor entities are connected directly to FIMC capture
+ * entities. The sensors using MIPI CSIS bus are connected through immutable
+ * link with CSI receiver entity specified by mux_id. Any registered CSIS
+ * entity has a link to each registered FIMC capture entity. Enabled links
+ * are created by default between each subsequent registered sensor and
+ * subsequent FIMC capture entity. The number of default active links is
+ * determined by the number of available sensors or FIMC entities,
+ * whichever is less.
+ */
+static int fimc_md_create_links(struct fimc_md *fmd)
+{
+	struct v4l2_subdev *sensor, *csis;
+	struct s5p_fimc_isp_info *pdata;
+	struct fimc_sensor_info *s_info;
+	struct media_entity *source, *sink;
+	int i, pad, fimc_id = 0, ret = 0;
+	u32 flags, link_mask = 0;
+
+	for (i = 0; i < fmd->num_sensors; i++) {
+		if (fmd->sensor[i].subdev == NULL)
+			continue;
+
+		sensor = fmd->sensor[i].subdev;
+		s_info = v4l2_get_subdev_hostdata(sensor);
+		if (!s_info || !s_info->pdata)
+			continue;
+
+		source = NULL;
+		pdata = s_info->pdata;
+
+		switch (pdata->bus_type) {
+		case FIMC_MIPI_CSI2:
+			if (WARN(pdata->mux_id >= CSIS_MAX_ENTITIES,
+				"Wrong CSI channel id: %d\n", pdata->mux_id))
+				return -EINVAL;
+
+			csis = fmd->csis[pdata->mux_id].sd;
+			if (WARN(csis == NULL,
+				 "MIPI-CSI interface specified "
+				 "but s5p-csis module is not loaded!\n"))
+				return -EINVAL;
+
+			ret = media_entity_create_link(&sensor->entity, 0,
+					      &csis->entity, CSIS_PAD_SINK,
+					      MEDIA_LNK_FL_IMMUTABLE |
+					      MEDIA_LNK_FL_ENABLED);
+			if (ret)
+				return ret;
+
+			v4l2_info(&fmd->v4l2_dev, "created link [%s] => [%s]",
+				  sensor->entity.name, csis->entity.name);
+
+			source = NULL;
+			break;
+
+		case FIMC_ITU_601...FIMC_ITU_656:
+			source = &sensor->entity;
+			pad = 0;
+			break;
+
+		default:
+			v4l2_err(&fmd->v4l2_dev, "Wrong bus_type: %x\n",
+				 pdata->bus_type);
+			return -EINVAL;
+		}
+		if (source == NULL)
+			continue;
+
+		link_mask = 1 << fimc_id++;
+		ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
+						       pad, link_mask);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(fmd->csis); i++) {
+		if (fmd->csis[i].sd == NULL)
+			continue;
+		source = &fmd->csis[i].sd->entity;
+		pad = CSIS_PAD_SOURCE;
+
+		link_mask = 1 << fimc_id++;
+		ret = __fimc_md_create_fimc_sink_links(fmd, source, NULL,
+						       pad, link_mask);
+	}
+
+	/* Create immutable links between each FIMC's subdev and video node */
+	flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
+	for (i = 0; i < FIMC_MAX_DEVS; i++) {
+		if (!fmd->fimc[i])
+			continue;
+		source = &fmd->fimc[i]->vid_cap.subdev.entity;
+		sink = &fmd->fimc[i]->vid_cap.vfd->entity;
+		ret = media_entity_create_link(source, FIMC_SD_PAD_SOURCE,
+					      sink, 0, flags);
+		if (ret)
+			break;
+	}
+
+	return __fimc_md_create_flite_source_links(fmd);
+}
+
+/*
+ * The peripheral sensor clock management.
+ */
+static int fimc_md_get_clocks(struct fimc_md *fmd)
+{
+	char clk_name[32];
+	struct clk *clock;
+	int i;
+
+	for (i = 0; i < FIMC_MAX_CAMCLKS; i++) {
+		snprintf(clk_name, sizeof(clk_name), "sclk_cam%u", i);
+		clock = clk_get(NULL, clk_name);
+		if (IS_ERR_OR_NULL(clock)) {
+			v4l2_err(&fmd->v4l2_dev, "Failed to get clock: %s",
+				  clk_name);
+			return -ENXIO;
+		}
+		fmd->camclk[i].clock = clock;
+	}
+	return 0;
+}
+
+static void fimc_md_put_clocks(struct fimc_md *fmd)
+{
+	int i = FIMC_MAX_CAMCLKS;
+
+	while (--i >= 0) {
+		if (IS_ERR_OR_NULL(fmd->camclk[i].clock))
+			continue;
+		clk_put(fmd->camclk[i].clock);
+		fmd->camclk[i].clock = NULL;
+	}
+}
+
+static int __fimc_md_set_camclk(struct fimc_md *fmd,
+				struct fimc_sensor_info *s_info,
+				bool on)
+{
+	struct s5p_fimc_isp_info *pdata = s_info->pdata;
+	struct fimc_camclk_info *camclk;
+	int ret = 0;
+
+	if (WARN_ON(pdata->clk_id >= FIMC_MAX_CAMCLKS) || fmd == NULL)
+		return -EINVAL;
+
+	camclk = &fmd->camclk[pdata->clk_id];
+
+	dbg("camclk %d, f: %lu, use_count: %d, on: %d",
+	    pdata->clk_id, pdata->clk_frequency, camclk->use_count, on);
+
+	if (on) {
+		if (camclk->use_count > 0 &&
+		    camclk->frequency != pdata->clk_frequency)
+			return -EINVAL;
+
+		if (camclk->use_count++ == 0) {
+			clk_set_rate(camclk->clock, pdata->clk_frequency);
+			camclk->frequency = pdata->clk_frequency;
+			ret = clk_enable(camclk->clock);
+			dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
+			    clk_get_rate(camclk->clock));
+		}
+		return ret;
+	}
+
+	if (WARN_ON(camclk->use_count == 0))
+		return 0;
+
+	if (--camclk->use_count == 0) {
+		clk_disable(camclk->clock);
+		dbg("Disabled camclk %d", pdata->clk_id);
+	}
+	return ret;
+}
+
+/**
+ * fimc_md_set_camclk - peripheral sensor clock setup
+ * @sd: sensor subdev to configure sclk_cam clock for
+ * @on: 1 to enable or 0 to disable the clock
+ *
+ * There are 2 separate clock outputs available in the SoC for external
+ * image processors. These clocks are shared between all registered FIMC
+ * devices to which sensors can be attached, either directly or through
+ * the MIPI CSI receiver. The clock is allowed here to be used by
+ * multiple sensors concurrently if they use same frequency.
+ * This function should only be called when the graph mutex is held.
+ */
+int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on)
+{
+	struct fimc_sensor_info *s_info = v4l2_get_subdev_hostdata(sd);
+	struct fimc_md *fmd = entity_to_fimc_mdev(&sd->entity);
+
+	return __fimc_md_set_camclk(fmd, s_info, on);
+}
+
+static int fimc_md_link_notify(struct media_pad *source,
+			       struct media_pad *sink, u32 flags)
+{
+	struct fimc_lite *fimc_lite = NULL;
+	struct fimc_dev *fimc = NULL;
+	struct fimc_pipeline *pipeline;
+	struct v4l2_subdev *sd;
+	int ret = 0;
+
+	if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+		return 0;
+
+	sd = media_entity_to_v4l2_subdev(sink->entity);
+
+	switch (sd->grp_id) {
+	case FLITE_GROUP_ID:
+		fimc_lite = v4l2_get_subdevdata(sd);
+		pipeline = &fimc_lite->pipeline;
+		break;
+	case FIMC_GROUP_ID:
+		fimc = v4l2_get_subdevdata(sd);
+		pipeline = &fimc->pipeline;
+		break;
+	default:
+		return 0;
+	}
+
+	if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+		ret = __fimc_pipeline_shutdown(pipeline);
+		pipeline->subdevs[IDX_SENSOR] = NULL;
+		pipeline->subdevs[IDX_CSIS] = NULL;
+
+		if (fimc) {
+			mutex_lock(&fimc->lock);
+			fimc_ctrls_delete(fimc->vid_cap.ctx);
+			mutex_unlock(&fimc->lock);
+		}
+		return ret;
+	}
+	/*
+	 * Link activation. Enable power of pipeline elements only if the
+	 * pipeline is already in use, i.e. its video node is opened.
+	 * Recreate the controls destroyed during the link deactivation.
+	 */
+	if (fimc) {
+		mutex_lock(&fimc->lock);
+		if (fimc->vid_cap.refcnt > 0) {
+			ret = __fimc_pipeline_initialize(pipeline,
+							 source->entity, true);
+		if (!ret)
+			ret = fimc_capture_ctrls_create(fimc);
+		}
+		mutex_unlock(&fimc->lock);
+	} else {
+		mutex_lock(&fimc_lite->lock);
+		if (fimc_lite->ref_count > 0) {
+			ret = __fimc_pipeline_initialize(pipeline,
+							 source->entity, true);
+		}
+		mutex_unlock(&fimc_lite->lock);
+	}
+	return ret ? -EPIPE : ret;
+}
+
+static ssize_t fimc_md_sysfs_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct fimc_md *fmd = platform_get_drvdata(pdev);
+
+	if (fmd->user_subdev_api)
+		return strlcpy(buf, "Sub-device API (sub-dev)\n", PAGE_SIZE);
+
+	return strlcpy(buf, "V4L2 video node only API (vid-dev)\n", PAGE_SIZE);
+}
+
+static ssize_t fimc_md_sysfs_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct fimc_md *fmd = platform_get_drvdata(pdev);
+	bool subdev_api;
+	int i;
+
+	if (!strcmp(buf, "vid-dev\n"))
+		subdev_api = false;
+	else if (!strcmp(buf, "sub-dev\n"))
+		subdev_api = true;
+	else
+		return count;
+
+	fmd->user_subdev_api = subdev_api;
+	for (i = 0; i < FIMC_MAX_DEVS; i++)
+		if (fmd->fimc[i])
+			fmd->fimc[i]->vid_cap.user_subdev_api = subdev_api;
+	return count;
+}
+/*
+ * This device attribute is to select video pipeline configuration method.
+ * There are following valid values:
+ *  vid-dev - for V4L2 video node API only, subdevice will be configured
+ *  by the host driver.
+ *  sub-dev - for media controller API, subdevs must be configured in user
+ *  space before starting streaming.
+ */
+static DEVICE_ATTR(subdev_conf_mode, S_IWUSR | S_IRUGO,
+		   fimc_md_sysfs_show, fimc_md_sysfs_store);
+
+static int fimc_md_probe(struct platform_device *pdev)
+{
+	struct v4l2_device *v4l2_dev;
+	struct fimc_md *fmd;
+	int ret;
+
+	fmd = devm_kzalloc(&pdev->dev, sizeof(*fmd), GFP_KERNEL);
+	if (!fmd)
+		return -ENOMEM;
+
+	spin_lock_init(&fmd->slock);
+	fmd->pdev = pdev;
+
+	strlcpy(fmd->media_dev.model, "SAMSUNG S5P FIMC",
+		sizeof(fmd->media_dev.model));
+	fmd->media_dev.link_notify = fimc_md_link_notify;
+	fmd->media_dev.dev = &pdev->dev;
+
+	v4l2_dev = &fmd->v4l2_dev;
+	v4l2_dev->mdev = &fmd->media_dev;
+	v4l2_dev->notify = fimc_sensor_notify;
+	snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s",
+		 dev_name(&pdev->dev));
+
+	ret = v4l2_device_register(&pdev->dev, &fmd->v4l2_dev);
+	if (ret < 0) {
+		v4l2_err(v4l2_dev, "Failed to register v4l2_device: %d\n", ret);
+		return ret;
+	}
+	ret = media_device_register(&fmd->media_dev);
+	if (ret < 0) {
+		v4l2_err(v4l2_dev, "Failed to register media device: %d\n", ret);
+		goto err_md;
+	}
+	ret = fimc_md_get_clocks(fmd);
+	if (ret)
+		goto err_clk;
+
+	fmd->user_subdev_api = false;
+
+	/* Protect the media graph while we're registering entities */
+	mutex_lock(&fmd->media_dev.graph_mutex);
+
+	ret = fimc_md_register_platform_entities(fmd);
+	if (ret)
+		goto err_unlock;
+
+	if (pdev->dev.platform_data) {
+		ret = fimc_md_register_sensor_entities(fmd);
+		if (ret)
+			goto err_unlock;
+	}
+	ret = fimc_md_create_links(fmd);
+	if (ret)
+		goto err_unlock;
+	ret = v4l2_device_register_subdev_nodes(&fmd->v4l2_dev);
+	if (ret)
+		goto err_unlock;
+
+	ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
+	if (ret)
+		goto err_unlock;
+
+	platform_set_drvdata(pdev, fmd);
+	mutex_unlock(&fmd->media_dev.graph_mutex);
+	return 0;
+
+err_unlock:
+	mutex_unlock(&fmd->media_dev.graph_mutex);
+err_clk:
+	media_device_unregister(&fmd->media_dev);
+	fimc_md_put_clocks(fmd);
+	fimc_md_unregister_entities(fmd);
+err_md:
+	v4l2_device_unregister(&fmd->v4l2_dev);
+	return ret;
+}
+
+static int __devexit fimc_md_remove(struct platform_device *pdev)
+{
+	struct fimc_md *fmd = platform_get_drvdata(pdev);
+
+	if (!fmd)
+		return 0;
+	device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
+	fimc_md_unregister_entities(fmd);
+	media_device_unregister(&fmd->media_dev);
+	fimc_md_put_clocks(fmd);
+	return 0;
+}
+
+static struct platform_driver fimc_md_driver = {
+	.probe		= fimc_md_probe,
+	.remove		= __devexit_p(fimc_md_remove),
+	.driver = {
+		.name	= "s5p-fimc-md",
+		.owner	= THIS_MODULE,
+	}
+};
+
+static int __init fimc_md_init(void)
+{
+	int ret;
+
+	request_module("s5p-csis");
+	ret = fimc_register_driver();
+	if (ret)
+		return ret;
+
+	return platform_driver_register(&fimc_md_driver);
+}
+
+static void __exit fimc_md_exit(void)
+{
+	platform_driver_unregister(&fimc_md_driver);
+	fimc_unregister_driver();
+}
+
+module_init(fimc_md_init);
+module_exit(fimc_md_exit);
+
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_DESCRIPTION("S5P FIMC camera host interface/video postprocessor driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("2.0.1");
diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.h b/drivers/media/platform/s5p-fimc/fimc-mdevice.h
new file mode 100644
index 0000000..1f5dbaf
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2011 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_MDEVICE_H_
+#define FIMC_MDEVICE_H_
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "fimc-core.h"
+#include "fimc-lite.h"
+#include "mipi-csis.h"
+
+/* Group IDs of sensor, MIPI-CSIS, FIMC-LITE and the writeback subdevs. */
+#define SENSOR_GROUP_ID		(1 << 8)
+#define CSIS_GROUP_ID		(1 << 9)
+#define WRITEBACK_GROUP_ID	(1 << 10)
+#define FIMC_GROUP_ID		(1 << 11)
+#define FLITE_GROUP_ID		(1 << 12)
+
+#define FIMC_MAX_SENSORS	8
+#define FIMC_MAX_CAMCLKS	2
+
+struct fimc_csis_info {
+	struct v4l2_subdev *sd;
+	int id;
+};
+
+struct fimc_camclk_info {
+	struct clk *clock;
+	int use_count;
+	unsigned long frequency;
+};
+
+/**
+ * struct fimc_sensor_info - image data source subdev information
+ * @pdata: sensor's atrributes passed as media device's platform data
+ * @subdev: image sensor v4l2 subdev
+ * @host: fimc device the sensor is currently linked to
+ *
+ * This data structure applies to image sensor and the writeback subdevs.
+ */
+struct fimc_sensor_info {
+	struct s5p_fimc_isp_info *pdata;
+	struct v4l2_subdev *subdev;
+	struct fimc_dev *host;
+};
+
+/**
+ * struct fimc_md - fimc media device information
+ * @csis: MIPI CSIS subdevs data
+ * @sensor: array of registered sensor subdevs
+ * @num_sensors: actual number of registered sensors
+ * @camclk: external sensor clock information
+ * @fimc: array of registered fimc devices
+ * @media_dev: top level media device
+ * @v4l2_dev: top level v4l2_device holding up the subdevs
+ * @pdev: platform device this media device is hooked up into
+ * @user_subdev_api: true if subdevs are not configured by the host driver
+ * @slock: spinlock protecting @sensor array
+ */
+struct fimc_md {
+	struct fimc_csis_info csis[CSIS_MAX_ENTITIES];
+	struct fimc_sensor_info sensor[FIMC_MAX_SENSORS];
+	int num_sensors;
+	struct fimc_camclk_info camclk[FIMC_MAX_CAMCLKS];
+	struct fimc_lite *fimc_lite[FIMC_LITE_MAX_DEVS];
+	struct fimc_dev *fimc[FIMC_MAX_DEVS];
+	struct media_device media_dev;
+	struct v4l2_device v4l2_dev;
+	struct platform_device *pdev;
+	bool user_subdev_api;
+	spinlock_t slock;
+};
+
+#define is_subdev_pad(pad) (pad == NULL || \
+	media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV)
+
+#define me_subtype(me) \
+	((me->type) & (MEDIA_ENT_TYPE_MASK | MEDIA_ENT_SUBTYPE_MASK))
+
+#define subdev_has_devnode(__sd) (__sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE)
+
+static inline struct fimc_md *entity_to_fimc_mdev(struct media_entity *me)
+{
+	return me->parent == NULL ? NULL :
+		container_of(me->parent, struct fimc_md, media_dev);
+}
+
+static inline void fimc_md_graph_lock(struct fimc_dev *fimc)
+{
+	BUG_ON(fimc->vid_cap.vfd == NULL);
+	mutex_lock(&fimc->vid_cap.vfd->entity.parent->graph_mutex);
+}
+
+static inline void fimc_md_graph_unlock(struct fimc_dev *fimc)
+{
+	BUG_ON(fimc->vid_cap.vfd == NULL);
+	mutex_unlock(&fimc->vid_cap.vfd->entity.parent->graph_mutex);
+}
+
+int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on);
+void fimc_pipeline_prepare(struct fimc_pipeline *p, struct media_entity *me);
+int fimc_pipeline_initialize(struct fimc_pipeline *p, struct media_entity *me,
+			     bool resume);
+int fimc_pipeline_shutdown(struct fimc_pipeline *p);
+int fimc_pipeline_s_power(struct fimc_pipeline *p, bool state);
+int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool state);
+
+#endif
diff --git a/drivers/media/platform/s5p-fimc/fimc-reg.c b/drivers/media/platform/s5p-fimc/fimc-reg.c
new file mode 100644
index 0000000..0e3eb9c
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/fimc-reg.c
@@ -0,0 +1,775 @@
+/*
+ * Register interface file for Samsung Camera Interface (FIMC) driver
+ *
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <media/s5p_fimc.h>
+
+#include "fimc-reg.h"
+#include "fimc-core.h"
+
+
+void fimc_hw_reset(struct fimc_dev *dev)
+{
+	u32 cfg;
+
+	cfg = readl(dev->regs + FIMC_REG_CISRCFMT);
+	cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
+	writel(cfg, dev->regs + FIMC_REG_CISRCFMT);
+
+	/* Software reset. */
+	cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+	cfg |= (FIMC_REG_CIGCTRL_SWRST | FIMC_REG_CIGCTRL_IRQ_LEVEL);
+	writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
+	udelay(10);
+
+	cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+	cfg &= ~FIMC_REG_CIGCTRL_SWRST;
+	writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
+
+	if (dev->variant->out_buf_count > 4)
+		fimc_hw_set_dma_seq(dev, 0xF);
+}
+
+static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
+{
+	u32 flip = FIMC_REG_MSCTRL_FLIP_NORMAL;
+
+	if (ctx->hflip)
+		flip = FIMC_REG_MSCTRL_FLIP_X_MIRROR;
+	if (ctx->vflip)
+		flip = FIMC_REG_MSCTRL_FLIP_Y_MIRROR;
+
+	if (ctx->rotation <= 90)
+		return flip;
+
+	return (flip ^ FIMC_REG_MSCTRL_FLIP_180) & FIMC_REG_MSCTRL_FLIP_180;
+}
+
+static u32 fimc_hw_get_target_flip(struct fimc_ctx *ctx)
+{
+	u32 flip = FIMC_REG_CITRGFMT_FLIP_NORMAL;
+
+	if (ctx->hflip)
+		flip |= FIMC_REG_CITRGFMT_FLIP_X_MIRROR;
+	if (ctx->vflip)
+		flip |= FIMC_REG_CITRGFMT_FLIP_Y_MIRROR;
+
+	if (ctx->rotation <= 90)
+		return flip;
+
+	return (flip ^ FIMC_REG_CITRGFMT_FLIP_180) & FIMC_REG_CITRGFMT_FLIP_180;
+}
+
+void fimc_hw_set_rotation(struct fimc_ctx *ctx)
+{
+	u32 cfg, flip;
+	struct fimc_dev *dev = ctx->fimc_dev;
+
+	cfg = readl(dev->regs + FIMC_REG_CITRGFMT);
+	cfg &= ~(FIMC_REG_CITRGFMT_INROT90 | FIMC_REG_CITRGFMT_OUTROT90 |
+		 FIMC_REG_CITRGFMT_FLIP_180);
+
+	/*
+	 * The input and output rotator cannot work simultaneously.
+	 * Use the output rotator in output DMA mode or the input rotator
+	 * in direct fifo output mode.
+	 */
+	if (ctx->rotation == 90 || ctx->rotation == 270) {
+		if (ctx->out_path == FIMC_IO_LCDFIFO)
+			cfg |= FIMC_REG_CITRGFMT_INROT90;
+		else
+			cfg |= FIMC_REG_CITRGFMT_OUTROT90;
+	}
+
+	if (ctx->out_path == FIMC_IO_DMA) {
+		cfg |= fimc_hw_get_target_flip(ctx);
+		writel(cfg, dev->regs + FIMC_REG_CITRGFMT);
+	} else {
+		/* LCD FIFO path */
+		flip = readl(dev->regs + FIMC_REG_MSCTRL);
+		flip &= ~FIMC_REG_MSCTRL_FLIP_MASK;
+		flip |= fimc_hw_get_in_flip(ctx);
+		writel(flip, dev->regs + FIMC_REG_MSCTRL);
+	}
+}
+
+void fimc_hw_set_target_format(struct fimc_ctx *ctx)
+{
+	u32 cfg;
+	struct fimc_dev *dev = ctx->fimc_dev;
+	struct fimc_frame *frame = &ctx->d_frame;
+
+	dbg("w= %d, h= %d color: %d", frame->width,
+	    frame->height, frame->fmt->color);
+
+	cfg = readl(dev->regs + FIMC_REG_CITRGFMT);
+	cfg &= ~(FIMC_REG_CITRGFMT_FMT_MASK | FIMC_REG_CITRGFMT_HSIZE_MASK |
+		 FIMC_REG_CITRGFMT_VSIZE_MASK);
+
+	switch (frame->fmt->color) {
+	case FIMC_FMT_RGB444...FIMC_FMT_RGB888:
+		cfg |= FIMC_REG_CITRGFMT_RGB;
+		break;
+	case FIMC_FMT_YCBCR420:
+		cfg |= FIMC_REG_CITRGFMT_YCBCR420;
+		break;
+	case FIMC_FMT_YCBYCR422...FIMC_FMT_CRYCBY422:
+		if (frame->fmt->colplanes == 1)
+			cfg |= FIMC_REG_CITRGFMT_YCBCR422_1P;
+		else
+			cfg |= FIMC_REG_CITRGFMT_YCBCR422;
+		break;
+	default:
+		break;
+	}
+
+	if (ctx->rotation == 90 || ctx->rotation == 270)
+		cfg |= (frame->height << 16) | frame->width;
+	else
+		cfg |= (frame->width << 16) | frame->height;
+
+	writel(cfg, dev->regs + FIMC_REG_CITRGFMT);
+
+	cfg = readl(dev->regs + FIMC_REG_CITAREA);
+	cfg &= ~FIMC_REG_CITAREA_MASK;
+	cfg |= (frame->width * frame->height);
+	writel(cfg, dev->regs + FIMC_REG_CITAREA);
+}
+
+static void fimc_hw_set_out_dma_size(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *dev = ctx->fimc_dev;
+	struct fimc_frame *frame = &ctx->d_frame;
+	u32 cfg;
+
+	cfg = (frame->f_height << 16) | frame->f_width;
+	writel(cfg, dev->regs + FIMC_REG_ORGOSIZE);
+
+	/* Select color space conversion equation (HD/SD size).*/
+	cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+	if (frame->f_width >= 1280) /* HD */
+		cfg |= FIMC_REG_CIGCTRL_CSC_ITU601_709;
+	else	/* SD */
+		cfg &= ~FIMC_REG_CIGCTRL_CSC_ITU601_709;
+	writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
+
+}
+
+void fimc_hw_set_out_dma(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *dev = ctx->fimc_dev;
+	struct fimc_frame *frame = &ctx->d_frame;
+	struct fimc_dma_offset *offset = &frame->dma_offset;
+	struct fimc_fmt *fmt = frame->fmt;
+	u32 cfg;
+
+	/* Set the input dma offsets. */
+	cfg = (offset->y_v << 16) | offset->y_h;
+	writel(cfg, dev->regs + FIMC_REG_CIOYOFF);
+
+	cfg = (offset->cb_v << 16) | offset->cb_h;
+	writel(cfg, dev->regs + FIMC_REG_CIOCBOFF);
+
+	cfg = (offset->cr_v << 16) | offset->cr_h;
+	writel(cfg, dev->regs + FIMC_REG_CIOCROFF);
+
+	fimc_hw_set_out_dma_size(ctx);
+
+	/* Configure chroma components order. */
+	cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
+
+	cfg &= ~(FIMC_REG_CIOCTRL_ORDER2P_MASK |
+		 FIMC_REG_CIOCTRL_ORDER422_MASK |
+		 FIMC_REG_CIOCTRL_YCBCR_PLANE_MASK |
+		 FIMC_REG_CIOCTRL_RGB16FMT_MASK);
+
+	if (fmt->colplanes == 1)
+		cfg |= ctx->out_order_1p;
+	else if (fmt->colplanes == 2)
+		cfg |= ctx->out_order_2p | FIMC_REG_CIOCTRL_YCBCR_2PLANE;
+	else if (fmt->colplanes == 3)
+		cfg |= FIMC_REG_CIOCTRL_YCBCR_3PLANE;
+
+	if (fmt->color == FIMC_FMT_RGB565)
+		cfg |= FIMC_REG_CIOCTRL_RGB565;
+	else if (fmt->color == FIMC_FMT_RGB555)
+		cfg |= FIMC_REG_CIOCTRL_ARGB1555;
+	else if (fmt->color == FIMC_FMT_RGB444)
+		cfg |= FIMC_REG_CIOCTRL_ARGB4444;
+
+	writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
+}
+
+static void fimc_hw_en_autoload(struct fimc_dev *dev, int enable)
+{
+	u32 cfg = readl(dev->regs + FIMC_REG_ORGISIZE);
+	if (enable)
+		cfg |= FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
+	else
+		cfg &= ~FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
+	writel(cfg, dev->regs + FIMC_REG_ORGISIZE);
+}
+
+void fimc_hw_en_lastirq(struct fimc_dev *dev, int enable)
+{
+	u32 cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
+	if (enable)
+		cfg |= FIMC_REG_CIOCTRL_LASTIRQ_ENABLE;
+	else
+		cfg &= ~FIMC_REG_CIOCTRL_LASTIRQ_ENABLE;
+	writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
+}
+
+void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *dev =  ctx->fimc_dev;
+	struct fimc_scaler *sc = &ctx->scaler;
+	u32 cfg, shfactor;
+
+	shfactor = 10 - (sc->hfactor + sc->vfactor);
+	cfg = shfactor << 28;
+
+	cfg |= (sc->pre_hratio << 16) | sc->pre_vratio;
+	writel(cfg, dev->regs + FIMC_REG_CISCPRERATIO);
+
+	cfg = (sc->pre_dst_width << 16) | sc->pre_dst_height;
+	writel(cfg, dev->regs + FIMC_REG_CISCPREDST);
+}
+
+static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *dev = ctx->fimc_dev;
+	struct fimc_scaler *sc = &ctx->scaler;
+	struct fimc_frame *src_frame = &ctx->s_frame;
+	struct fimc_frame *dst_frame = &ctx->d_frame;
+
+	u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+
+	cfg &= ~(FIMC_REG_CISCCTRL_CSCR2Y_WIDE | FIMC_REG_CISCCTRL_CSCY2R_WIDE |
+		 FIMC_REG_CISCCTRL_SCALEUP_H | FIMC_REG_CISCCTRL_SCALEUP_V |
+		 FIMC_REG_CISCCTRL_SCALERBYPASS | FIMC_REG_CISCCTRL_ONE2ONE |
+		 FIMC_REG_CISCCTRL_INRGB_FMT_MASK | FIMC_REG_CISCCTRL_OUTRGB_FMT_MASK |
+		 FIMC_REG_CISCCTRL_INTERLACE | FIMC_REG_CISCCTRL_RGB_EXT);
+
+	if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
+		cfg |= (FIMC_REG_CISCCTRL_CSCR2Y_WIDE |
+			FIMC_REG_CISCCTRL_CSCY2R_WIDE);
+
+	if (!sc->enabled)
+		cfg |= FIMC_REG_CISCCTRL_SCALERBYPASS;
+
+	if (sc->scaleup_h)
+		cfg |= FIMC_REG_CISCCTRL_SCALEUP_H;
+
+	if (sc->scaleup_v)
+		cfg |= FIMC_REG_CISCCTRL_SCALEUP_V;
+
+	if (sc->copy_mode)
+		cfg |= FIMC_REG_CISCCTRL_ONE2ONE;
+
+	if (ctx->in_path == FIMC_IO_DMA) {
+		switch (src_frame->fmt->color) {
+		case FIMC_FMT_RGB565:
+			cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB565;
+			break;
+		case FIMC_FMT_RGB666:
+			cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB666;
+			break;
+		case FIMC_FMT_RGB888:
+			cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB888;
+			break;
+		}
+	}
+
+	if (ctx->out_path == FIMC_IO_DMA) {
+		u32 color = dst_frame->fmt->color;
+
+		if (color >= FIMC_FMT_RGB444 && color <= FIMC_FMT_RGB565)
+			cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB565;
+		else if (color == FIMC_FMT_RGB666)
+			cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB666;
+		else if (color == FIMC_FMT_RGB888)
+			cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888;
+	} else {
+		cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888;
+
+		if (ctx->flags & FIMC_SCAN_MODE_INTERLACED)
+			cfg |= FIMC_REG_CISCCTRL_INTERLACE;
+	}
+
+	writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+}
+
+void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *dev = ctx->fimc_dev;
+	struct fimc_variant *variant = dev->variant;
+	struct fimc_scaler *sc = &ctx->scaler;
+	u32 cfg;
+
+	dbg("main_hratio= 0x%X  main_vratio= 0x%X",
+	    sc->main_hratio, sc->main_vratio);
+
+	fimc_hw_set_scaler(ctx);
+
+	cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+	cfg &= ~(FIMC_REG_CISCCTRL_MHRATIO_MASK |
+		 FIMC_REG_CISCCTRL_MVRATIO_MASK);
+
+	if (variant->has_mainscaler_ext) {
+		cfg |= FIMC_REG_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
+		cfg |= FIMC_REG_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
+		writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+
+		cfg = readl(dev->regs + FIMC_REG_CIEXTEN);
+
+		cfg &= ~(FIMC_REG_CIEXTEN_MVRATIO_EXT_MASK |
+			 FIMC_REG_CIEXTEN_MHRATIO_EXT_MASK);
+		cfg |= FIMC_REG_CIEXTEN_MHRATIO_EXT(sc->main_hratio);
+		cfg |= FIMC_REG_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
+		writel(cfg, dev->regs + FIMC_REG_CIEXTEN);
+	} else {
+		cfg |= FIMC_REG_CISCCTRL_MHRATIO(sc->main_hratio);
+		cfg |= FIMC_REG_CISCCTRL_MVRATIO(sc->main_vratio);
+		writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+	}
+}
+
+void fimc_hw_en_capture(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *dev = ctx->fimc_dev;
+
+	u32 cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
+
+	if (ctx->out_path == FIMC_IO_DMA) {
+		/* one shot mode */
+		cfg |= FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE |
+			FIMC_REG_CIIMGCPT_IMGCPTEN;
+	} else {
+		/* Continuous frame capture mode (freerun). */
+		cfg &= ~(FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE |
+			 FIMC_REG_CIIMGCPT_CPT_FRMOD_CNT);
+		cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN;
+	}
+
+	if (ctx->scaler.enabled)
+		cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN_SC;
+
+	cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN;
+	writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
+}
+
+void fimc_hw_set_effect(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *dev = ctx->fimc_dev;
+	struct fimc_effect *effect = &ctx->effect;
+	u32 cfg = 0;
+
+	if (effect->type != FIMC_REG_CIIMGEFF_FIN_BYPASS) {
+		cfg |= FIMC_REG_CIIMGEFF_IE_SC_AFTER |
+			FIMC_REG_CIIMGEFF_IE_ENABLE;
+		cfg |= effect->type;
+		if (effect->type == FIMC_REG_CIIMGEFF_FIN_ARBITRARY)
+			cfg |= (effect->pat_cb << 13) | effect->pat_cr;
+	}
+
+	writel(cfg, dev->regs + FIMC_REG_CIIMGEFF);
+}
+
+void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *dev = ctx->fimc_dev;
+	struct fimc_frame *frame = &ctx->d_frame;
+	u32 cfg;
+
+	if (!(frame->fmt->flags & FMT_HAS_ALPHA))
+		return;
+
+	cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
+	cfg &= ~FIMC_REG_CIOCTRL_ALPHA_OUT_MASK;
+	cfg |= (frame->alpha << 4);
+	writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
+}
+
+static void fimc_hw_set_in_dma_size(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *dev = ctx->fimc_dev;
+	struct fimc_frame *frame = &ctx->s_frame;
+	u32 cfg_o = 0;
+	u32 cfg_r = 0;
+
+	if (FIMC_IO_LCDFIFO == ctx->out_path)
+		cfg_r |= FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
+
+	cfg_o |= (frame->f_height << 16) | frame->f_width;
+	cfg_r |= (frame->height << 16) | frame->width;
+
+	writel(cfg_o, dev->regs + FIMC_REG_ORGISIZE);
+	writel(cfg_r, dev->regs + FIMC_REG_CIREAL_ISIZE);
+}
+
+void fimc_hw_set_in_dma(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *dev = ctx->fimc_dev;
+	struct fimc_frame *frame = &ctx->s_frame;
+	struct fimc_dma_offset *offset = &frame->dma_offset;
+	u32 cfg;
+
+	/* Set the pixel offsets. */
+	cfg = (offset->y_v << 16) | offset->y_h;
+	writel(cfg, dev->regs + FIMC_REG_CIIYOFF);
+
+	cfg = (offset->cb_v << 16) | offset->cb_h;
+	writel(cfg, dev->regs + FIMC_REG_CIICBOFF);
+
+	cfg = (offset->cr_v << 16) | offset->cr_h;
+	writel(cfg, dev->regs + FIMC_REG_CIICROFF);
+
+	/* Input original and real size. */
+	fimc_hw_set_in_dma_size(ctx);
+
+	/* Use DMA autoload only in FIFO mode. */
+	fimc_hw_en_autoload(dev, ctx->out_path == FIMC_IO_LCDFIFO);
+
+	/* Set the input DMA to process single frame only. */
+	cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+	cfg &= ~(FIMC_REG_MSCTRL_INFORMAT_MASK
+		 | FIMC_REG_MSCTRL_IN_BURST_COUNT_MASK
+		 | FIMC_REG_MSCTRL_INPUT_MASK
+		 | FIMC_REG_MSCTRL_C_INT_IN_MASK
+		 | FIMC_REG_MSCTRL_2P_IN_ORDER_MASK);
+
+	cfg |= (FIMC_REG_MSCTRL_IN_BURST_COUNT(4)
+		| FIMC_REG_MSCTRL_INPUT_MEMORY
+		| FIMC_REG_MSCTRL_FIFO_CTRL_FULL);
+
+	switch (frame->fmt->color) {
+	case FIMC_FMT_RGB565...FIMC_FMT_RGB888:
+		cfg |= FIMC_REG_MSCTRL_INFORMAT_RGB;
+		break;
+	case FIMC_FMT_YCBCR420:
+		cfg |= FIMC_REG_MSCTRL_INFORMAT_YCBCR420;
+
+		if (frame->fmt->colplanes == 2)
+			cfg |= ctx->in_order_2p | FIMC_REG_MSCTRL_C_INT_IN_2PLANE;
+		else
+			cfg |= FIMC_REG_MSCTRL_C_INT_IN_3PLANE;
+
+		break;
+	case FIMC_FMT_YCBYCR422...FIMC_FMT_CRYCBY422:
+		if (frame->fmt->colplanes == 1) {
+			cfg |= ctx->in_order_1p
+				| FIMC_REG_MSCTRL_INFORMAT_YCBCR422_1P;
+		} else {
+			cfg |= FIMC_REG_MSCTRL_INFORMAT_YCBCR422;
+
+			if (frame->fmt->colplanes == 2)
+				cfg |= ctx->in_order_2p
+					| FIMC_REG_MSCTRL_C_INT_IN_2PLANE;
+			else
+				cfg |= FIMC_REG_MSCTRL_C_INT_IN_3PLANE;
+		}
+		break;
+	default:
+		break;
+	}
+
+	writel(cfg, dev->regs + FIMC_REG_MSCTRL);
+
+	/* Input/output DMA linear/tiled mode. */
+	cfg = readl(dev->regs + FIMC_REG_CIDMAPARAM);
+	cfg &= ~FIMC_REG_CIDMAPARAM_TILE_MASK;
+
+	if (tiled_fmt(ctx->s_frame.fmt))
+		cfg |= FIMC_REG_CIDMAPARAM_R_64X32;
+
+	if (tiled_fmt(ctx->d_frame.fmt))
+		cfg |= FIMC_REG_CIDMAPARAM_W_64X32;
+
+	writel(cfg, dev->regs + FIMC_REG_CIDMAPARAM);
+}
+
+
+void fimc_hw_set_input_path(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *dev = ctx->fimc_dev;
+
+	u32 cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+	cfg &= ~FIMC_REG_MSCTRL_INPUT_MASK;
+
+	if (ctx->in_path == FIMC_IO_DMA)
+		cfg |= FIMC_REG_MSCTRL_INPUT_MEMORY;
+	else
+		cfg |= FIMC_REG_MSCTRL_INPUT_EXTCAM;
+
+	writel(cfg, dev->regs + FIMC_REG_MSCTRL);
+}
+
+void fimc_hw_set_output_path(struct fimc_ctx *ctx)
+{
+	struct fimc_dev *dev = ctx->fimc_dev;
+
+	u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+	cfg &= ~FIMC_REG_CISCCTRL_LCDPATHEN_FIFO;
+	if (ctx->out_path == FIMC_IO_LCDFIFO)
+		cfg |= FIMC_REG_CISCCTRL_LCDPATHEN_FIFO;
+	writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+}
+
+void fimc_hw_set_input_addr(struct fimc_dev *dev, struct fimc_addr *paddr)
+{
+	u32 cfg = readl(dev->regs + FIMC_REG_CIREAL_ISIZE);
+	cfg |= FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS;
+	writel(cfg, dev->regs + FIMC_REG_CIREAL_ISIZE);
+
+	writel(paddr->y, dev->regs + FIMC_REG_CIIYSA(0));
+	writel(paddr->cb, dev->regs + FIMC_REG_CIICBSA(0));
+	writel(paddr->cr, dev->regs + FIMC_REG_CIICRSA(0));
+
+	cfg &= ~FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS;
+	writel(cfg, dev->regs + FIMC_REG_CIREAL_ISIZE);
+}
+
+void fimc_hw_set_output_addr(struct fimc_dev *dev,
+			     struct fimc_addr *paddr, int index)
+{
+	int i = (index == -1) ? 0 : index;
+	do {
+		writel(paddr->y, dev->regs + FIMC_REG_CIOYSA(i));
+		writel(paddr->cb, dev->regs + FIMC_REG_CIOCBSA(i));
+		writel(paddr->cr, dev->regs + FIMC_REG_CIOCRSA(i));
+		dbg("dst_buf[%d]: 0x%X, cb: 0x%X, cr: 0x%X",
+		    i, paddr->y, paddr->cb, paddr->cr);
+	} while (index == -1 && ++i < FIMC_MAX_OUT_BUFS);
+}
+
+int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
+				struct s5p_fimc_isp_info *cam)
+{
+	u32 cfg = readl(fimc->regs + FIMC_REG_CIGCTRL);
+
+	cfg &= ~(FIMC_REG_CIGCTRL_INVPOLPCLK | FIMC_REG_CIGCTRL_INVPOLVSYNC |
+		 FIMC_REG_CIGCTRL_INVPOLHREF | FIMC_REG_CIGCTRL_INVPOLHSYNC |
+		 FIMC_REG_CIGCTRL_INVPOLFIELD);
+
+	if (cam->flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+		cfg |= FIMC_REG_CIGCTRL_INVPOLPCLK;
+
+	if (cam->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+		cfg |= FIMC_REG_CIGCTRL_INVPOLVSYNC;
+
+	if (cam->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+		cfg |= FIMC_REG_CIGCTRL_INVPOLHREF;
+
+	if (cam->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+		cfg |= FIMC_REG_CIGCTRL_INVPOLHSYNC;
+
+	if (cam->flags & V4L2_MBUS_FIELD_EVEN_LOW)
+		cfg |= FIMC_REG_CIGCTRL_INVPOLFIELD;
+
+	writel(cfg, fimc->regs + FIMC_REG_CIGCTRL);
+
+	return 0;
+}
+
+struct mbus_pixfmt_desc {
+	u32 pixelcode;
+	u32 cisrcfmt;
+	u16 bus_width;
+};
+
+static const struct mbus_pixfmt_desc pix_desc[] = {
+	{ V4L2_MBUS_FMT_YUYV8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCBYCR, 8 },
+	{ V4L2_MBUS_FMT_YVYU8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCRYCB, 8 },
+	{ V4L2_MBUS_FMT_VYUY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CRYCBY, 8 },
+	{ V4L2_MBUS_FMT_UYVY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CBYCRY, 8 },
+};
+
+int fimc_hw_set_camera_source(struct fimc_dev *fimc,
+			      struct s5p_fimc_isp_info *cam)
+{
+	struct fimc_frame *f = &fimc->vid_cap.ctx->s_frame;
+	u32 cfg = 0;
+	u32 bus_width;
+	int i;
+
+	if (cam->bus_type == FIMC_ITU_601 || cam->bus_type == FIMC_ITU_656) {
+		for (i = 0; i < ARRAY_SIZE(pix_desc); i++) {
+			if (fimc->vid_cap.mf.code == pix_desc[i].pixelcode) {
+				cfg = pix_desc[i].cisrcfmt;
+				bus_width = pix_desc[i].bus_width;
+				break;
+			}
+		}
+
+		if (i == ARRAY_SIZE(pix_desc)) {
+			v4l2_err(fimc->vid_cap.vfd,
+				 "Camera color format not supported: %d\n",
+				 fimc->vid_cap.mf.code);
+			return -EINVAL;
+		}
+
+		if (cam->bus_type == FIMC_ITU_601) {
+			if (bus_width == 8)
+				cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
+			else if (bus_width == 16)
+				cfg |= FIMC_REG_CISRCFMT_ITU601_16BIT;
+		} /* else defaults to ITU-R BT.656 8-bit */
+	} else if (cam->bus_type == FIMC_MIPI_CSI2) {
+		if (fimc_fmt_is_jpeg(f->fmt->color))
+			cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
+	}
+
+	cfg |= (f->o_width << 16) | f->o_height;
+	writel(cfg, fimc->regs + FIMC_REG_CISRCFMT);
+	return 0;
+}
+
+void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f)
+{
+	u32 hoff2, voff2;
+
+	u32 cfg = readl(fimc->regs + FIMC_REG_CIWDOFST);
+
+	cfg &= ~(FIMC_REG_CIWDOFST_HOROFF_MASK | FIMC_REG_CIWDOFST_VEROFF_MASK);
+	cfg |=  FIMC_REG_CIWDOFST_OFF_EN |
+		(f->offs_h << 16) | f->offs_v;
+
+	writel(cfg, fimc->regs + FIMC_REG_CIWDOFST);
+
+	/* See CIWDOFSTn register description in the datasheet for details. */
+	hoff2 = f->o_width - f->width - f->offs_h;
+	voff2 = f->o_height - f->height - f->offs_v;
+	cfg = (hoff2 << 16) | voff2;
+	writel(cfg, fimc->regs + FIMC_REG_CIWDOFST2);
+}
+
+int fimc_hw_set_camera_type(struct fimc_dev *fimc,
+			    struct s5p_fimc_isp_info *cam)
+{
+	u32 cfg, tmp;
+	struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+	u32 csis_data_alignment = 32;
+
+	cfg = readl(fimc->regs + FIMC_REG_CIGCTRL);
+
+	/* Select ITU B interface, disable Writeback path and test pattern. */
+	cfg &= ~(FIMC_REG_CIGCTRL_TESTPAT_MASK | FIMC_REG_CIGCTRL_SELCAM_ITU_A |
+		FIMC_REG_CIGCTRL_SELCAM_MIPI | FIMC_REG_CIGCTRL_CAMIF_SELWB |
+		FIMC_REG_CIGCTRL_SELCAM_MIPI_A | FIMC_REG_CIGCTRL_CAM_JPEG);
+
+	switch (cam->bus_type) {
+	case FIMC_MIPI_CSI2:
+		cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI;
+
+		if (cam->mux_id == 0)
+			cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI_A;
+
+		/* TODO: add remaining supported formats. */
+		switch (vid_cap->mf.code) {
+		case V4L2_MBUS_FMT_VYUY8_2X8:
+			tmp = FIMC_REG_CSIIMGFMT_YCBCR422_8BIT;
+			break;
+		case V4L2_MBUS_FMT_JPEG_1X8:
+			tmp = FIMC_REG_CSIIMGFMT_USER(1);
+			cfg |= FIMC_REG_CIGCTRL_CAM_JPEG;
+			break;
+		default:
+			v4l2_err(vid_cap->vfd,
+				 "Not supported camera pixel format: %#x\n",
+				 vid_cap->mf.code);
+			return -EINVAL;
+		}
+		tmp |= (csis_data_alignment == 32) << 8;
+
+		writel(tmp, fimc->regs + FIMC_REG_CSIIMGFMT);
+		break;
+	case FIMC_ITU_601...FIMC_ITU_656:
+		if (cam->mux_id == 0) /* ITU-A, ITU-B: 0, 1 */
+			cfg |= FIMC_REG_CIGCTRL_SELCAM_ITU_A;
+		break;
+	case FIMC_LCD_WB:
+		cfg |= FIMC_REG_CIGCTRL_CAMIF_SELWB;
+		break;
+	default:
+		v4l2_err(vid_cap->vfd, "Invalid camera bus type selected\n");
+		return -EINVAL;
+	}
+	writel(cfg, fimc->regs + FIMC_REG_CIGCTRL);
+
+	return 0;
+}
+
+void fimc_hw_clear_irq(struct fimc_dev *dev)
+{
+	u32 cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+	cfg |= FIMC_REG_CIGCTRL_IRQ_CLR;
+	writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
+}
+
+void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on)
+{
+	u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+	if (on)
+		cfg |= FIMC_REG_CISCCTRL_SCALERSTART;
+	else
+		cfg &= ~FIMC_REG_CISCCTRL_SCALERSTART;
+	writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+}
+
+void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on)
+{
+	u32 cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+	if (on)
+		cfg |= FIMC_REG_MSCTRL_ENVID;
+	else
+		cfg &= ~FIMC_REG_MSCTRL_ENVID;
+	writel(cfg, dev->regs + FIMC_REG_MSCTRL);
+}
+
+void fimc_hw_dis_capture(struct fimc_dev *dev)
+{
+	u32 cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
+	cfg &= ~(FIMC_REG_CIIMGCPT_IMGCPTEN | FIMC_REG_CIIMGCPT_IMGCPTEN_SC);
+	writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
+}
+
+/* Return an index to the buffer actually being written. */
+u32 fimc_hw_get_frame_index(struct fimc_dev *dev)
+{
+	u32 reg;
+
+	if (dev->variant->has_cistatus2) {
+		reg = readl(dev->regs + FIMC_REG_CISTATUS2) & 0x3F;
+		return reg > 0 ? --reg : reg;
+	}
+
+	reg = readl(dev->regs + FIMC_REG_CISTATUS);
+
+	return (reg & FIMC_REG_CISTATUS_FRAMECNT_MASK) >>
+		FIMC_REG_CISTATUS_FRAMECNT_SHIFT;
+}
+
+/* Locking: the caller holds fimc->slock */
+void fimc_activate_capture(struct fimc_ctx *ctx)
+{
+	fimc_hw_enable_scaler(ctx->fimc_dev, ctx->scaler.enabled);
+	fimc_hw_en_capture(ctx);
+}
+
+void fimc_deactivate_capture(struct fimc_dev *fimc)
+{
+	fimc_hw_en_lastirq(fimc, true);
+	fimc_hw_dis_capture(fimc);
+	fimc_hw_enable_scaler(fimc, false);
+	fimc_hw_en_lastirq(fimc, false);
+}
diff --git a/drivers/media/platform/s5p-fimc/fimc-reg.h b/drivers/media/platform/s5p-fimc/fimc-reg.h
new file mode 100644
index 0000000..579ac8a
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/fimc-reg.h
@@ -0,0 +1,326 @@
+/*
+ * Samsung camera host interface (FIMC) registers definition
+ *
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_REG_H_
+#define FIMC_REG_H_
+
+#include "fimc-core.h"
+
+/* Input source format */
+#define FIMC_REG_CISRCFMT			0x00
+#define FIMC_REG_CISRCFMT_ITU601_8BIT		(1 << 31)
+#define FIMC_REG_CISRCFMT_ITU601_16BIT		(1 << 29)
+#define FIMC_REG_CISRCFMT_ORDER422_YCBYCR	(0 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_YCRYCB	(1 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_CBYCRY	(2 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_CRYCBY	(3 << 14)
+
+/* Window offset */
+#define FIMC_REG_CIWDOFST			0x04
+#define FIMC_REG_CIWDOFST_OFF_EN		(1 << 31)
+#define FIMC_REG_CIWDOFST_CLROVFIY		(1 << 30)
+#define FIMC_REG_CIWDOFST_CLROVRLB		(1 << 29)
+#define FIMC_REG_CIWDOFST_HOROFF_MASK		(0x7ff << 16)
+#define FIMC_REG_CIWDOFST_CLROVFICB		(1 << 15)
+#define FIMC_REG_CIWDOFST_CLROVFICR		(1 << 14)
+#define FIMC_REG_CIWDOFST_VEROFF_MASK		(0xfff << 0)
+
+/* Global control */
+#define FIMC_REG_CIGCTRL			0x08
+#define FIMC_REG_CIGCTRL_SWRST			(1 << 31)
+#define FIMC_REG_CIGCTRL_CAMRST_A		(1 << 30)
+#define FIMC_REG_CIGCTRL_SELCAM_ITU_A		(1 << 29)
+#define FIMC_REG_CIGCTRL_TESTPAT_NORMAL		(0 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_COLOR_BAR	(1 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_HOR_INC	(2 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_VER_INC	(3 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_MASK		(3 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_SHIFT		27
+#define FIMC_REG_CIGCTRL_INVPOLPCLK		(1 << 26)
+#define FIMC_REG_CIGCTRL_INVPOLVSYNC		(1 << 25)
+#define FIMC_REG_CIGCTRL_INVPOLHREF		(1 << 24)
+#define FIMC_REG_CIGCTRL_IRQ_OVFEN		(1 << 22)
+#define FIMC_REG_CIGCTRL_HREF_MASK		(1 << 21)
+#define FIMC_REG_CIGCTRL_IRQ_LEVEL		(1 << 20)
+#define FIMC_REG_CIGCTRL_IRQ_CLR		(1 << 19)
+#define FIMC_REG_CIGCTRL_IRQ_ENABLE		(1 << 16)
+#define FIMC_REG_CIGCTRL_SHDW_DISABLE		(1 << 12)
+#define FIMC_REG_CIGCTRL_CAM_JPEG		(1 << 8)
+#define FIMC_REG_CIGCTRL_SELCAM_MIPI_A		(1 << 7)
+#define FIMC_REG_CIGCTRL_CAMIF_SELWB		(1 << 6)
+/* 0 - ITU601; 1 - ITU709 */
+#define FIMC_REG_CIGCTRL_CSC_ITU601_709		(1 << 5)
+#define FIMC_REG_CIGCTRL_INVPOLHSYNC		(1 << 4)
+#define FIMC_REG_CIGCTRL_SELCAM_MIPI		(1 << 3)
+#define FIMC_REG_CIGCTRL_INVPOLFIELD		(1 << 1)
+#define FIMC_REG_CIGCTRL_INTERLACE		(1 << 0)
+
+/* Window offset 2 */
+#define FIMC_REG_CIWDOFST2			0x14
+#define FIMC_REG_CIWDOFST2_HOROFF_MASK		(0xfff << 16)
+#define FIMC_REG_CIWDOFST2_VEROFF_MASK		(0xfff << 0)
+
+/* Output DMA Y/Cb/Cr plane start addresses */
+#define FIMC_REG_CIOYSA(n)			(0x18 + (n) * 4)
+#define FIMC_REG_CIOCBSA(n)			(0x28 + (n) * 4)
+#define FIMC_REG_CIOCRSA(n)			(0x38 + (n) * 4)
+
+/* Target image format */
+#define FIMC_REG_CITRGFMT			0x48
+#define FIMC_REG_CITRGFMT_INROT90		(1 << 31)
+#define FIMC_REG_CITRGFMT_YCBCR420		(0 << 29)
+#define FIMC_REG_CITRGFMT_YCBCR422		(1 << 29)
+#define FIMC_REG_CITRGFMT_YCBCR422_1P		(2 << 29)
+#define FIMC_REG_CITRGFMT_RGB			(3 << 29)
+#define FIMC_REG_CITRGFMT_FMT_MASK		(3 << 29)
+#define FIMC_REG_CITRGFMT_HSIZE_MASK		(0xfff << 16)
+#define FIMC_REG_CITRGFMT_FLIP_SHIFT		14
+#define FIMC_REG_CITRGFMT_FLIP_NORMAL		(0 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_X_MIRROR		(1 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_Y_MIRROR		(2 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_180		(3 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_MASK		(3 << 14)
+#define FIMC_REG_CITRGFMT_OUTROT90		(1 << 13)
+#define FIMC_REG_CITRGFMT_VSIZE_MASK		(0xfff << 0)
+
+/* Output DMA control */
+#define FIMC_REG_CIOCTRL			0x4c
+#define FIMC_REG_CIOCTRL_ORDER422_MASK		(3 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_CRYCBY	(0 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_CBYCRY	(1 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_YCRYCB	(2 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_YCBYCR	(3 << 0)
+#define FIMC_REG_CIOCTRL_LASTIRQ_ENABLE		(1 << 2)
+#define FIMC_REG_CIOCTRL_YCBCR_3PLANE		(0 << 3)
+#define FIMC_REG_CIOCTRL_YCBCR_2PLANE		(1 << 3)
+#define FIMC_REG_CIOCTRL_YCBCR_PLANE_MASK	(1 << 3)
+#define FIMC_REG_CIOCTRL_ALPHA_OUT_MASK		(0xff << 4)
+#define FIMC_REG_CIOCTRL_RGB16FMT_MASK		(3 << 16)
+#define FIMC_REG_CIOCTRL_RGB565			(0 << 16)
+#define FIMC_REG_CIOCTRL_ARGB1555		(1 << 16)
+#define FIMC_REG_CIOCTRL_ARGB4444		(2 << 16)
+#define FIMC_REG_CIOCTRL_ORDER2P_SHIFT		24
+#define FIMC_REG_CIOCTRL_ORDER2P_MASK		(3 << 24)
+#define FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB	(0 << 24)
+
+/* Pre-scaler control 1 */
+#define FIMC_REG_CISCPRERATIO			0x50
+
+#define FIMC_REG_CISCPREDST			0x54
+
+/* Main scaler control */
+#define FIMC_REG_CISCCTRL			0x58
+#define FIMC_REG_CISCCTRL_SCALERBYPASS		(1 << 31)
+#define FIMC_REG_CISCCTRL_SCALEUP_H		(1 << 30)
+#define FIMC_REG_CISCCTRL_SCALEUP_V		(1 << 29)
+#define FIMC_REG_CISCCTRL_CSCR2Y_WIDE		(1 << 28)
+#define FIMC_REG_CISCCTRL_CSCY2R_WIDE		(1 << 27)
+#define FIMC_REG_CISCCTRL_LCDPATHEN_FIFO	(1 << 26)
+#define FIMC_REG_CISCCTRL_INTERLACE		(1 << 25)
+#define FIMC_REG_CISCCTRL_SCALERSTART		(1 << 15)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB565	(0 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB666	(1 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB888	(2 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_MASK	(3 << 13)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB565	(0 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB666	(1 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888	(2 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_MASK	(3 << 11)
+#define FIMC_REG_CISCCTRL_RGB_EXT		(1 << 10)
+#define FIMC_REG_CISCCTRL_ONE2ONE		(1 << 9)
+#define FIMC_REG_CISCCTRL_MHRATIO(x)		((x) << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO(x)		((x) << 0)
+#define FIMC_REG_CISCCTRL_MHRATIO_MASK		(0x1ff << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO_MASK		(0x1ff << 0)
+#define FIMC_REG_CISCCTRL_MHRATIO_EXT(x)	(((x) >> 6) << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO_EXT(x)	(((x) >> 6) << 0)
+
+/* Target area */
+#define FIMC_REG_CITAREA			0x5c
+#define FIMC_REG_CITAREA_MASK			0x0fffffff
+
+/* General status */
+#define FIMC_REG_CISTATUS			0x64
+#define FIMC_REG_CISTATUS_OVFIY			(1 << 31)
+#define FIMC_REG_CISTATUS_OVFICB		(1 << 30)
+#define FIMC_REG_CISTATUS_OVFICR		(1 << 29)
+#define FIMC_REG_CISTATUS_VSYNC			(1 << 28)
+#define FIMC_REG_CISTATUS_FRAMECNT_MASK		(3 << 26)
+#define FIMC_REG_CISTATUS_FRAMECNT_SHIFT	26
+#define FIMC_REG_CISTATUS_WINOFF_EN		(1 << 25)
+#define FIMC_REG_CISTATUS_IMGCPT_EN		(1 << 22)
+#define FIMC_REG_CISTATUS_IMGCPT_SCEN		(1 << 21)
+#define FIMC_REG_CISTATUS_VSYNC_A		(1 << 20)
+#define FIMC_REG_CISTATUS_VSYNC_B		(1 << 19)
+#define FIMC_REG_CISTATUS_OVRLB			(1 << 18)
+#define FIMC_REG_CISTATUS_FRAME_END		(1 << 17)
+#define FIMC_REG_CISTATUS_LASTCAPT_END		(1 << 16)
+#define FIMC_REG_CISTATUS_VVALID_A		(1 << 15)
+#define FIMC_REG_CISTATUS_VVALID_B		(1 << 14)
+
+/* Indexes to the last and the currently processed buffer. */
+#define FIMC_REG_CISTATUS2			0x68
+
+/* Image capture control */
+#define FIMC_REG_CIIMGCPT			0xc0
+#define FIMC_REG_CIIMGCPT_IMGCPTEN		(1 << 31)
+#define FIMC_REG_CIIMGCPT_IMGCPTEN_SC		(1 << 30)
+#define FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE	(1 << 25)
+#define FIMC_REG_CIIMGCPT_CPT_FRMOD_CNT		(1 << 18)
+
+/* Frame capture sequence */
+#define FIMC_REG_CICPTSEQ			0xc4
+
+/* Image effect */
+#define FIMC_REG_CIIMGEFF			0xd0
+#define FIMC_REG_CIIMGEFF_IE_ENABLE		(1 << 30)
+#define FIMC_REG_CIIMGEFF_IE_SC_BEFORE		(0 << 29)
+#define FIMC_REG_CIIMGEFF_IE_SC_AFTER		(1 << 29)
+#define FIMC_REG_CIIMGEFF_FIN_BYPASS		(0 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_ARBITRARY		(1 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_NEGATIVE		(2 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_ARTFREEZE		(3 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_EMBOSSING		(4 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_SILHOUETTE	(5 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_MASK		(7 << 26)
+#define FIMC_REG_CIIMGEFF_PAT_CBCR_MASK		((0xff << 13) | 0xff)
+
+/* Input DMA Y/Cb/Cr plane start address 0/1 */
+#define FIMC_REG_CIIYSA(n)			(0xd4 + (n) * 0x70)
+#define FIMC_REG_CIICBSA(n)			(0xd8 + (n) * 0x70)
+#define FIMC_REG_CIICRSA(n)			(0xdc + (n) * 0x70)
+
+/* Real input DMA image size */
+#define FIMC_REG_CIREAL_ISIZE			0xf8
+#define FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN	(1 << 31)
+#define FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS	(1 << 30)
+
+/* Input DMA control */
+#define FIMC_REG_MSCTRL				0xfc
+#define FIMC_REG_MSCTRL_IN_BURST_COUNT_MASK	(0xf << 24)
+#define FIMC_REG_MSCTRL_2P_IN_ORDER_MASK	(3 << 16)
+#define FIMC_REG_MSCTRL_2P_IN_ORDER_SHIFT	16
+#define FIMC_REG_MSCTRL_C_INT_IN_3PLANE		(0 << 15)
+#define FIMC_REG_MSCTRL_C_INT_IN_2PLANE		(1 << 15)
+#define FIMC_REG_MSCTRL_C_INT_IN_MASK		(1 << 15)
+#define FIMC_REG_MSCTRL_FLIP_SHIFT		13
+#define FIMC_REG_MSCTRL_FLIP_MASK		(3 << 13)
+#define FIMC_REG_MSCTRL_FLIP_NORMAL		(0 << 13)
+#define FIMC_REG_MSCTRL_FLIP_X_MIRROR		(1 << 13)
+#define FIMC_REG_MSCTRL_FLIP_Y_MIRROR		(2 << 13)
+#define FIMC_REG_MSCTRL_FLIP_180		(3 << 13)
+#define FIMC_REG_MSCTRL_FIFO_CTRL_FULL		(1 << 12)
+#define FIMC_REG_MSCTRL_ORDER422_SHIFT		4
+#define FIMC_REG_MSCTRL_ORDER422_YCBYCR		(0 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_CBYCRY		(1 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_YCRYCB		(2 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_CRYCBY		(3 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_MASK		(3 << 4)
+#define FIMC_REG_MSCTRL_INPUT_EXTCAM		(0 << 3)
+#define FIMC_REG_MSCTRL_INPUT_MEMORY		(1 << 3)
+#define FIMC_REG_MSCTRL_INPUT_MASK		(1 << 3)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR420	(0 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR422	(1 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR422_1P	(2 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_RGB		(3 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_MASK		(3 << 1)
+#define FIMC_REG_MSCTRL_ENVID			(1 << 0)
+#define FIMC_REG_MSCTRL_IN_BURST_COUNT(x)	((x) << 24)
+
+/* Output DMA Y/Cb/Cr offset */
+#define FIMC_REG_CIOYOFF			0x168
+#define FIMC_REG_CIOCBOFF			0x16c
+#define FIMC_REG_CIOCROFF			0x170
+
+/* Input DMA Y/Cb/Cr offset */
+#define FIMC_REG_CIIYOFF			0x174
+#define FIMC_REG_CIICBOFF			0x178
+#define FIMC_REG_CIICROFF			0x17c
+
+/* Input DMA original image size */
+#define FIMC_REG_ORGISIZE			0x180
+
+/* Output DMA original image size */
+#define FIMC_REG_ORGOSIZE			0x184
+
+/* Real output DMA image size (extension register) */
+#define FIMC_REG_CIEXTEN			0x188
+#define FIMC_REG_CIEXTEN_MHRATIO_EXT(x)		(((x) & 0x3f) << 10)
+#define FIMC_REG_CIEXTEN_MVRATIO_EXT(x)		((x) & 0x3f)
+#define FIMC_REG_CIEXTEN_MHRATIO_EXT_MASK	(0x3f << 10)
+#define FIMC_REG_CIEXTEN_MVRATIO_EXT_MASK	0x3f
+
+#define FIMC_REG_CIDMAPARAM			0x18c
+#define FIMC_REG_CIDMAPARAM_R_LINEAR		(0 << 29)
+#define FIMC_REG_CIDMAPARAM_R_64X32		(3 << 29)
+#define FIMC_REG_CIDMAPARAM_W_LINEAR		(0 << 13)
+#define FIMC_REG_CIDMAPARAM_W_64X32		(3 << 13)
+#define FIMC_REG_CIDMAPARAM_TILE_MASK		((3 << 29) | (3 << 13))
+
+/* MIPI CSI image format */
+#define FIMC_REG_CSIIMGFMT			0x194
+#define FIMC_REG_CSIIMGFMT_YCBCR422_8BIT	0x1e
+#define FIMC_REG_CSIIMGFMT_RAW8			0x2a
+#define FIMC_REG_CSIIMGFMT_RAW10		0x2b
+#define FIMC_REG_CSIIMGFMT_RAW12		0x2c
+/* User defined formats. x = 0...16. */
+#define FIMC_REG_CSIIMGFMT_USER(x)		(0x30 + x - 1)
+
+/* Output frame buffer sequence mask */
+#define FIMC_REG_CIFCNTSEQ			0x1fc
+
+/*
+ * Function declarations
+ */
+void fimc_hw_reset(struct fimc_dev *fimc);
+void fimc_hw_set_rotation(struct fimc_ctx *ctx);
+void fimc_hw_set_target_format(struct fimc_ctx *ctx);
+void fimc_hw_set_out_dma(struct fimc_ctx *ctx);
+void fimc_hw_en_lastirq(struct fimc_dev *fimc, int enable);
+void fimc_hw_en_irq(struct fimc_dev *fimc, int enable);
+void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
+void fimc_hw_set_mainscaler(struct fimc_ctx *ctx);
+void fimc_hw_en_capture(struct fimc_ctx *ctx);
+void fimc_hw_set_effect(struct fimc_ctx *ctx);
+void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx);
+void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
+void fimc_hw_set_input_path(struct fimc_ctx *ctx);
+void fimc_hw_set_output_path(struct fimc_ctx *ctx);
+void fimc_hw_set_input_addr(struct fimc_dev *fimc, struct fimc_addr *paddr);
+void fimc_hw_set_output_addr(struct fimc_dev *fimc, struct fimc_addr *paddr,
+			     int index);
+int fimc_hw_set_camera_source(struct fimc_dev *fimc,
+			      struct s5p_fimc_isp_info *cam);
+void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f);
+int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
+				struct s5p_fimc_isp_info *cam);
+int fimc_hw_set_camera_type(struct fimc_dev *fimc,
+			    struct s5p_fimc_isp_info *cam);
+void fimc_hw_clear_irq(struct fimc_dev *dev);
+void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on);
+void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on);
+void fimc_hw_dis_capture(struct fimc_dev *dev);
+u32 fimc_hw_get_frame_index(struct fimc_dev *dev);
+void fimc_activate_capture(struct fimc_ctx *ctx);
+void fimc_deactivate_capture(struct fimc_dev *fimc);
+
+/**
+ * fimc_hw_set_dma_seq - configure output DMA buffer sequence
+ * @mask: bitmask for the DMA output buffer registers, set to 0 to skip buffer
+ * This function masks output DMA ring buffers, it allows to select which of
+ * the 32 available output buffer address registers will be used by the DMA
+ * engine.
+ */
+static inline void fimc_hw_set_dma_seq(struct fimc_dev *dev, u32 mask)
+{
+	writel(mask, dev->regs + FIMC_REG_CIFCNTSEQ);
+}
+
+#endif /* FIMC_REG_H_ */
diff --git a/drivers/media/platform/s5p-fimc/mipi-csis.c b/drivers/media/platform/s5p-fimc/mipi-csis.c
new file mode 100644
index 0000000..2f73d9e
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/mipi-csis.c
@@ -0,0 +1,722 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series MIPI-CSI receiver driver
+ *
+ * Copyright (C) 2011 - 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-subdev.h>
+#include <plat/mipi_csis.h>
+#include "mipi-csis.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
+
+/* Register map definition */
+
+/* CSIS global control */
+#define S5PCSIS_CTRL			0x00
+#define S5PCSIS_CTRL_DPDN_DEFAULT	(0 << 31)
+#define S5PCSIS_CTRL_DPDN_SWAP		(1 << 31)
+#define S5PCSIS_CTRL_ALIGN_32BIT	(1 << 20)
+#define S5PCSIS_CTRL_UPDATE_SHADOW	(1 << 16)
+#define S5PCSIS_CTRL_WCLK_EXTCLK	(1 << 8)
+#define S5PCSIS_CTRL_RESET		(1 << 4)
+#define S5PCSIS_CTRL_ENABLE		(1 << 0)
+
+/* D-PHY control */
+#define S5PCSIS_DPHYCTRL		0x04
+#define S5PCSIS_DPHYCTRL_HSS_MASK	(0x1f << 27)
+#define S5PCSIS_DPHYCTRL_ENABLE		(0x1f << 0)
+
+#define S5PCSIS_CONFIG			0x08
+#define S5PCSIS_CFG_FMT_YCBCR422_8BIT	(0x1e << 2)
+#define S5PCSIS_CFG_FMT_RAW8		(0x2a << 2)
+#define S5PCSIS_CFG_FMT_RAW10		(0x2b << 2)
+#define S5PCSIS_CFG_FMT_RAW12		(0x2c << 2)
+/* User defined formats, x = 1...4 */
+#define S5PCSIS_CFG_FMT_USER(x)		((0x30 + x - 1) << 2)
+#define S5PCSIS_CFG_FMT_MASK		(0x3f << 2)
+#define S5PCSIS_CFG_NR_LANE_MASK	3
+
+/* Interrupt mask. */
+#define S5PCSIS_INTMSK			0x10
+#define S5PCSIS_INTMSK_EN_ALL		0xf000003f
+#define S5PCSIS_INTSRC			0x14
+
+/* Pixel resolution */
+#define S5PCSIS_RESOL			0x2c
+#define CSIS_MAX_PIX_WIDTH		0xffff
+#define CSIS_MAX_PIX_HEIGHT		0xffff
+
+enum {
+	CSIS_CLK_MUX,
+	CSIS_CLK_GATE,
+};
+
+static char *csi_clock_name[] = {
+	[CSIS_CLK_MUX]  = "sclk_csis",
+	[CSIS_CLK_GATE] = "csis",
+};
+#define NUM_CSIS_CLOCKS	ARRAY_SIZE(csi_clock_name)
+
+static const char * const csis_supply_name[] = {
+	"vdd11", /* 1.1V or 1.2V (s5pc100) MIPI CSI suppply */
+	"vdd18", /* VDD 1.8V and MIPI CSI PLL supply */
+};
+#define CSIS_NUM_SUPPLIES ARRAY_SIZE(csis_supply_name)
+
+enum {
+	ST_POWERED	= 1,
+	ST_STREAMING	= 2,
+	ST_SUSPENDED	= 4,
+};
+
+/**
+ * struct csis_state - the driver's internal state data structure
+ * @lock: mutex serializing the subdev and power management operations,
+ *        protecting @format and @flags members
+ * @pads: CSIS pads array
+ * @sd: v4l2_subdev associated with CSIS device instance
+ * @pdev: CSIS platform device
+ * @regs: mmaped I/O registers memory
+ * @clock: CSIS clocks
+ * @irq: requested s5p-mipi-csis irq number
+ * @flags: the state variable for power and streaming control
+ * @csis_fmt: current CSIS pixel format
+ * @format: common media bus format for the source and sink pad
+ */
+struct csis_state {
+	struct mutex lock;
+	struct media_pad pads[CSIS_PADS_NUM];
+	struct v4l2_subdev sd;
+	struct platform_device *pdev;
+	void __iomem *regs;
+	struct regulator_bulk_data supplies[CSIS_NUM_SUPPLIES];
+	struct clk *clock[NUM_CSIS_CLOCKS];
+	int irq;
+	u32 flags;
+	const struct csis_pix_format *csis_fmt;
+	struct v4l2_mbus_framefmt format;
+};
+
+/**
+ * struct csis_pix_format - CSIS pixel format description
+ * @pix_width_alignment: horizontal pixel alignment, width will be
+ *                       multiple of 2^pix_width_alignment
+ * @code: corresponding media bus code
+ * @fmt_reg: S5PCSIS_CONFIG register value
+ * @data_alignment: MIPI-CSI data alignment in bits
+ */
+struct csis_pix_format {
+	unsigned int pix_width_alignment;
+	enum v4l2_mbus_pixelcode code;
+	u32 fmt_reg;
+	u8 data_alignment;
+};
+
+static const struct csis_pix_format s5pcsis_formats[] = {
+	{
+		.code = V4L2_MBUS_FMT_VYUY8_2X8,
+		.fmt_reg = S5PCSIS_CFG_FMT_YCBCR422_8BIT,
+		.data_alignment = 32,
+	}, {
+		.code = V4L2_MBUS_FMT_JPEG_1X8,
+		.fmt_reg = S5PCSIS_CFG_FMT_USER(1),
+		.data_alignment = 32,
+	},
+};
+
+#define s5pcsis_write(__csis, __r, __v) writel(__v, __csis->regs + __r)
+#define s5pcsis_read(__csis, __r) readl(__csis->regs + __r)
+
+static struct csis_state *sd_to_csis_state(struct v4l2_subdev *sdev)
+{
+	return container_of(sdev, struct csis_state, sd);
+}
+
+static const struct csis_pix_format *find_csis_format(
+	struct v4l2_mbus_framefmt *mf)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(s5pcsis_formats); i++)
+		if (mf->code == s5pcsis_formats[i].code)
+			return &s5pcsis_formats[i];
+	return NULL;
+}
+
+static void s5pcsis_enable_interrupts(struct csis_state *state, bool on)
+{
+	u32 val = s5pcsis_read(state, S5PCSIS_INTMSK);
+
+	val = on ? val | S5PCSIS_INTMSK_EN_ALL :
+		   val & ~S5PCSIS_INTMSK_EN_ALL;
+	s5pcsis_write(state, S5PCSIS_INTMSK, val);
+}
+
+static void s5pcsis_reset(struct csis_state *state)
+{
+	u32 val = s5pcsis_read(state, S5PCSIS_CTRL);
+
+	s5pcsis_write(state, S5PCSIS_CTRL, val | S5PCSIS_CTRL_RESET);
+	udelay(10);
+}
+
+static void s5pcsis_system_enable(struct csis_state *state, int on)
+{
+	u32 val;
+
+	val = s5pcsis_read(state, S5PCSIS_CTRL);
+	if (on)
+		val |= S5PCSIS_CTRL_ENABLE;
+	else
+		val &= ~S5PCSIS_CTRL_ENABLE;
+	s5pcsis_write(state, S5PCSIS_CTRL, val);
+
+	val = s5pcsis_read(state, S5PCSIS_DPHYCTRL);
+	if (on)
+		val |= S5PCSIS_DPHYCTRL_ENABLE;
+	else
+		val &= ~S5PCSIS_DPHYCTRL_ENABLE;
+	s5pcsis_write(state, S5PCSIS_DPHYCTRL, val);
+}
+
+/* Called with the state.lock mutex held */
+static void __s5pcsis_set_format(struct csis_state *state)
+{
+	struct v4l2_mbus_framefmt *mf = &state->format;
+	u32 val;
+
+	v4l2_dbg(1, debug, &state->sd, "fmt: %d, %d x %d\n",
+		 mf->code, mf->width, mf->height);
+
+	/* Color format */
+	val = s5pcsis_read(state, S5PCSIS_CONFIG);
+	val = (val & ~S5PCSIS_CFG_FMT_MASK) | state->csis_fmt->fmt_reg;
+	s5pcsis_write(state, S5PCSIS_CONFIG, val);
+
+	/* Pixel resolution */
+	val = (mf->width << 16) | mf->height;
+	s5pcsis_write(state, S5PCSIS_RESOL, val);
+}
+
+static void s5pcsis_set_hsync_settle(struct csis_state *state, int settle)
+{
+	u32 val = s5pcsis_read(state, S5PCSIS_DPHYCTRL);
+
+	val = (val & ~S5PCSIS_DPHYCTRL_HSS_MASK) | (settle << 27);
+	s5pcsis_write(state, S5PCSIS_DPHYCTRL, val);
+}
+
+static void s5pcsis_set_params(struct csis_state *state)
+{
+	struct s5p_platform_mipi_csis *pdata = state->pdev->dev.platform_data;
+	u32 val;
+
+	val = s5pcsis_read(state, S5PCSIS_CONFIG);
+	val = (val & ~S5PCSIS_CFG_NR_LANE_MASK) | (pdata->lanes - 1);
+	s5pcsis_write(state, S5PCSIS_CONFIG, val);
+
+	__s5pcsis_set_format(state);
+	s5pcsis_set_hsync_settle(state, pdata->hs_settle);
+
+	val = s5pcsis_read(state, S5PCSIS_CTRL);
+	if (state->csis_fmt->data_alignment == 32)
+		val |= S5PCSIS_CTRL_ALIGN_32BIT;
+	else /* 24-bits */
+		val &= ~S5PCSIS_CTRL_ALIGN_32BIT;
+	/* Not using external clock. */
+	val &= ~S5PCSIS_CTRL_WCLK_EXTCLK;
+	s5pcsis_write(state, S5PCSIS_CTRL, val);
+
+	/* Update the shadow register. */
+	val = s5pcsis_read(state, S5PCSIS_CTRL);
+	s5pcsis_write(state, S5PCSIS_CTRL, val | S5PCSIS_CTRL_UPDATE_SHADOW);
+}
+
+static void s5pcsis_clk_put(struct csis_state *state)
+{
+	int i;
+
+	for (i = 0; i < NUM_CSIS_CLOCKS; i++) {
+		if (IS_ERR_OR_NULL(state->clock[i]))
+			continue;
+		clk_unprepare(state->clock[i]);
+		clk_put(state->clock[i]);
+		state->clock[i] = NULL;
+	}
+}
+
+static int s5pcsis_clk_get(struct csis_state *state)
+{
+	struct device *dev = &state->pdev->dev;
+	int i, ret;
+
+	for (i = 0; i < NUM_CSIS_CLOCKS; i++) {
+		state->clock[i] = clk_get(dev, csi_clock_name[i]);
+		if (IS_ERR(state->clock[i]))
+			goto err;
+		ret = clk_prepare(state->clock[i]);
+		if (ret < 0) {
+			clk_put(state->clock[i]);
+			state->clock[i] = NULL;
+			goto err;
+		}
+	}
+	return 0;
+err:
+	s5pcsis_clk_put(state);
+	dev_err(dev, "failed to get clock: %s\n", csi_clock_name[i]);
+	return -ENXIO;
+}
+
+static int s5pcsis_s_power(struct v4l2_subdev *sd, int on)
+{
+	struct csis_state *state = sd_to_csis_state(sd);
+	struct device *dev = &state->pdev->dev;
+
+	if (on)
+		return pm_runtime_get_sync(dev);
+
+	return pm_runtime_put_sync(dev);
+}
+
+static void s5pcsis_start_stream(struct csis_state *state)
+{
+	s5pcsis_reset(state);
+	s5pcsis_set_params(state);
+	s5pcsis_system_enable(state, true);
+	s5pcsis_enable_interrupts(state, true);
+}
+
+static void s5pcsis_stop_stream(struct csis_state *state)
+{
+	s5pcsis_enable_interrupts(state, false);
+	s5pcsis_system_enable(state, false);
+}
+
+/* v4l2_subdev operations */
+static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct csis_state *state = sd_to_csis_state(sd);
+	int ret = 0;
+
+	v4l2_dbg(1, debug, sd, "%s: %d, state: 0x%x\n",
+		 __func__, enable, state->flags);
+
+	if (enable) {
+		ret = pm_runtime_get_sync(&state->pdev->dev);
+		if (ret && ret != 1)
+			return ret;
+	}
+	mutex_lock(&state->lock);
+	if (enable) {
+		if (state->flags & ST_SUSPENDED) {
+			ret = -EBUSY;
+			goto unlock;
+		}
+		s5pcsis_start_stream(state);
+		state->flags |= ST_STREAMING;
+	} else {
+		s5pcsis_stop_stream(state);
+		state->flags &= ~ST_STREAMING;
+	}
+unlock:
+	mutex_unlock(&state->lock);
+	if (!enable)
+		pm_runtime_put(&state->pdev->dev);
+
+	return ret == 1 ? 0 : ret;
+}
+
+static int s5pcsis_enum_mbus_code(struct v4l2_subdev *sd,
+				  struct v4l2_subdev_fh *fh,
+				  struct v4l2_subdev_mbus_code_enum *code)
+{
+	if (code->index >= ARRAY_SIZE(s5pcsis_formats))
+		return -EINVAL;
+
+	code->code = s5pcsis_formats[code->index].code;
+	return 0;
+}
+
+static struct csis_pix_format const *s5pcsis_try_format(
+	struct v4l2_mbus_framefmt *mf)
+{
+	struct csis_pix_format const *csis_fmt;
+
+	csis_fmt = find_csis_format(mf);
+	if (csis_fmt == NULL)
+		csis_fmt = &s5pcsis_formats[0];
+
+	mf->code = csis_fmt->code;
+	v4l_bound_align_image(&mf->width, 1, CSIS_MAX_PIX_WIDTH,
+			      csis_fmt->pix_width_alignment,
+			      &mf->height, 1, CSIS_MAX_PIX_HEIGHT, 1,
+			      0);
+	return csis_fmt;
+}
+
+static struct v4l2_mbus_framefmt *__s5pcsis_get_format(
+		struct csis_state *state, struct v4l2_subdev_fh *fh,
+		u32 pad, enum v4l2_subdev_format_whence which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return fh ? v4l2_subdev_get_try_format(fh, pad) : NULL;
+
+	return &state->format;
+}
+
+static int s5pcsis_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct csis_state *state = sd_to_csis_state(sd);
+	struct csis_pix_format const *csis_fmt;
+	struct v4l2_mbus_framefmt *mf;
+
+	if (fmt->pad != CSIS_PAD_SOURCE && fmt->pad != CSIS_PAD_SINK)
+		return -EINVAL;
+
+	mf = __s5pcsis_get_format(state, fh, fmt->pad, fmt->which);
+
+	if (fmt->pad == CSIS_PAD_SOURCE) {
+		if (mf) {
+			mutex_lock(&state->lock);
+			fmt->format = *mf;
+			mutex_unlock(&state->lock);
+		}
+		return 0;
+	}
+	csis_fmt = s5pcsis_try_format(&fmt->format);
+	if (mf) {
+		mutex_lock(&state->lock);
+		*mf = fmt->format;
+		if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+			state->csis_fmt = csis_fmt;
+		mutex_unlock(&state->lock);
+	}
+	return 0;
+}
+
+static int s5pcsis_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct csis_state *state = sd_to_csis_state(sd);
+	struct v4l2_mbus_framefmt *mf;
+
+	if (fmt->pad != CSIS_PAD_SOURCE && fmt->pad != CSIS_PAD_SINK)
+		return -EINVAL;
+
+	mf = __s5pcsis_get_format(state, fh, fmt->pad, fmt->which);
+	if (!mf)
+		return -EINVAL;
+
+	mutex_lock(&state->lock);
+	fmt->format = *mf;
+	mutex_unlock(&state->lock);
+	return 0;
+}
+
+static int s5pcsis_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(fh, 0);
+
+	format->colorspace = V4L2_COLORSPACE_JPEG;
+	format->code = s5pcsis_formats[0].code;
+	format->width = S5PCSIS_DEF_PIX_WIDTH;
+	format->height = S5PCSIS_DEF_PIX_HEIGHT;
+	format->field = V4L2_FIELD_NONE;
+
+	return 0;
+}
+
+static const struct v4l2_subdev_internal_ops s5pcsis_sd_internal_ops = {
+	.open = s5pcsis_open,
+};
+
+static struct v4l2_subdev_core_ops s5pcsis_core_ops = {
+	.s_power = s5pcsis_s_power,
+};
+
+static struct v4l2_subdev_pad_ops s5pcsis_pad_ops = {
+	.enum_mbus_code = s5pcsis_enum_mbus_code,
+	.get_fmt = s5pcsis_get_fmt,
+	.set_fmt = s5pcsis_set_fmt,
+};
+
+static struct v4l2_subdev_video_ops s5pcsis_video_ops = {
+	.s_stream = s5pcsis_s_stream,
+};
+
+static struct v4l2_subdev_ops s5pcsis_subdev_ops = {
+	.core = &s5pcsis_core_ops,
+	.pad = &s5pcsis_pad_ops,
+	.video = &s5pcsis_video_ops,
+};
+
+static irqreturn_t s5pcsis_irq_handler(int irq, void *dev_id)
+{
+	struct csis_state *state = dev_id;
+	u32 val;
+
+	/* Just clear the interrupt pending bits. */
+	val = s5pcsis_read(state, S5PCSIS_INTSRC);
+	s5pcsis_write(state, S5PCSIS_INTSRC, val);
+
+	return IRQ_HANDLED;
+}
+
+static int __devinit s5pcsis_probe(struct platform_device *pdev)
+{
+	struct s5p_platform_mipi_csis *pdata;
+	struct resource *mem_res;
+	struct csis_state *state;
+	int ret = -ENOMEM;
+	int i;
+
+	state = devm_kzalloc(&pdev->dev, sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	mutex_init(&state->lock);
+	state->pdev = pdev;
+
+	pdata = pdev->dev.platform_data;
+	if (pdata == NULL || pdata->phy_enable == NULL) {
+		dev_err(&pdev->dev, "Platform data not fully specified\n");
+		return -EINVAL;
+	}
+
+	if ((pdev->id == 1 && pdata->lanes > CSIS1_MAX_LANES) ||
+	    pdata->lanes > CSIS0_MAX_LANES) {
+		dev_err(&pdev->dev, "Unsupported number of data lanes: %d\n",
+			pdata->lanes);
+		return -EINVAL;
+	}
+
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	state->regs = devm_request_and_ioremap(&pdev->dev, mem_res);
+	if (state->regs == NULL) {
+		dev_err(&pdev->dev, "Failed to request and remap io memory\n");
+		return -ENXIO;
+	}
+
+	state->irq = platform_get_irq(pdev, 0);
+	if (state->irq < 0) {
+		dev_err(&pdev->dev, "Failed to get irq\n");
+		return state->irq;
+	}
+
+	for (i = 0; i < CSIS_NUM_SUPPLIES; i++)
+		state->supplies[i].supply = csis_supply_name[i];
+
+	ret = regulator_bulk_get(&pdev->dev, CSIS_NUM_SUPPLIES,
+				 state->supplies);
+	if (ret)
+		return ret;
+
+	ret = s5pcsis_clk_get(state);
+	if (ret)
+		goto e_clkput;
+
+	clk_enable(state->clock[CSIS_CLK_MUX]);
+	if (pdata->clk_rate)
+		clk_set_rate(state->clock[CSIS_CLK_MUX], pdata->clk_rate);
+	else
+		dev_WARN(&pdev->dev, "No clock frequency specified!\n");
+
+	ret = devm_request_irq(&pdev->dev, state->irq, s5pcsis_irq_handler,
+			       0, dev_name(&pdev->dev), state);
+	if (ret) {
+		dev_err(&pdev->dev, "Interrupt request failed\n");
+		goto e_regput;
+	}
+
+	v4l2_subdev_init(&state->sd, &s5pcsis_subdev_ops);
+	state->sd.owner = THIS_MODULE;
+	strlcpy(state->sd.name, dev_name(&pdev->dev), sizeof(state->sd.name));
+	state->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	state->csis_fmt = &s5pcsis_formats[0];
+
+	state->format.code = s5pcsis_formats[0].code;
+	state->format.width = S5PCSIS_DEF_PIX_WIDTH;
+	state->format.height = S5PCSIS_DEF_PIX_HEIGHT;
+
+	state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+	ret = media_entity_init(&state->sd.entity,
+				CSIS_PADS_NUM, state->pads, 0);
+	if (ret < 0)
+		goto e_clkput;
+
+	/* This allows to retrieve the platform device id by the host driver */
+	v4l2_set_subdevdata(&state->sd, pdev);
+
+	/* .. and a pointer to the subdev. */
+	platform_set_drvdata(pdev, &state->sd);
+
+	pm_runtime_enable(&pdev->dev);
+	return 0;
+
+e_regput:
+	regulator_bulk_free(CSIS_NUM_SUPPLIES, state->supplies);
+e_clkput:
+	clk_disable(state->clock[CSIS_CLK_MUX]);
+	s5pcsis_clk_put(state);
+	return ret;
+}
+
+static int s5pcsis_pm_suspend(struct device *dev, bool runtime)
+{
+	struct s5p_platform_mipi_csis *pdata = dev->platform_data;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+	struct csis_state *state = sd_to_csis_state(sd);
+	int ret = 0;
+
+	v4l2_dbg(1, debug, sd, "%s: flags: 0x%x\n",
+		 __func__, state->flags);
+
+	mutex_lock(&state->lock);
+	if (state->flags & ST_POWERED) {
+		s5pcsis_stop_stream(state);
+		ret = pdata->phy_enable(state->pdev, false);
+		if (ret)
+			goto unlock;
+		ret = regulator_bulk_disable(CSIS_NUM_SUPPLIES,
+					     state->supplies);
+		if (ret)
+			goto unlock;
+		clk_disable(state->clock[CSIS_CLK_GATE]);
+		state->flags &= ~ST_POWERED;
+		if (!runtime)
+			state->flags |= ST_SUSPENDED;
+	}
+ unlock:
+	mutex_unlock(&state->lock);
+	return ret ? -EAGAIN : 0;
+}
+
+static int s5pcsis_pm_resume(struct device *dev, bool runtime)
+{
+	struct s5p_platform_mipi_csis *pdata = dev->platform_data;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+	struct csis_state *state = sd_to_csis_state(sd);
+	int ret = 0;
+
+	v4l2_dbg(1, debug, sd, "%s: flags: 0x%x\n",
+		 __func__, state->flags);
+
+	mutex_lock(&state->lock);
+	if (!runtime && !(state->flags & ST_SUSPENDED))
+		goto unlock;
+
+	if (!(state->flags & ST_POWERED)) {
+		ret = regulator_bulk_enable(CSIS_NUM_SUPPLIES,
+					    state->supplies);
+		if (ret)
+			goto unlock;
+		ret = pdata->phy_enable(state->pdev, true);
+		if (!ret) {
+			state->flags |= ST_POWERED;
+		} else {
+			regulator_bulk_disable(CSIS_NUM_SUPPLIES,
+					       state->supplies);
+			goto unlock;
+		}
+		clk_enable(state->clock[CSIS_CLK_GATE]);
+	}
+	if (state->flags & ST_STREAMING)
+		s5pcsis_start_stream(state);
+
+	state->flags &= ~ST_SUSPENDED;
+ unlock:
+	mutex_unlock(&state->lock);
+	return ret ? -EAGAIN : 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int s5pcsis_suspend(struct device *dev)
+{
+	return s5pcsis_pm_suspend(dev, false);
+}
+
+static int s5pcsis_resume(struct device *dev)
+{
+	return s5pcsis_pm_resume(dev, false);
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int s5pcsis_runtime_suspend(struct device *dev)
+{
+	return s5pcsis_pm_suspend(dev, true);
+}
+
+static int s5pcsis_runtime_resume(struct device *dev)
+{
+	return s5pcsis_pm_resume(dev, true);
+}
+#endif
+
+static int __devexit s5pcsis_remove(struct platform_device *pdev)
+{
+	struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+	struct csis_state *state = sd_to_csis_state(sd);
+
+	pm_runtime_disable(&pdev->dev);
+	s5pcsis_pm_suspend(&pdev->dev, false);
+	clk_disable(state->clock[CSIS_CLK_MUX]);
+	pm_runtime_set_suspended(&pdev->dev);
+	s5pcsis_clk_put(state);
+	regulator_bulk_free(CSIS_NUM_SUPPLIES, state->supplies);
+
+	media_entity_cleanup(&state->sd.entity);
+
+	return 0;
+}
+
+static const struct dev_pm_ops s5pcsis_pm_ops = {
+	SET_RUNTIME_PM_OPS(s5pcsis_runtime_suspend, s5pcsis_runtime_resume,
+			   NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(s5pcsis_suspend, s5pcsis_resume)
+};
+
+static struct platform_driver s5pcsis_driver = {
+	.probe		= s5pcsis_probe,
+	.remove		= __devexit_p(s5pcsis_remove),
+	.driver		= {
+		.name	= CSIS_DRIVER_NAME,
+		.owner	= THIS_MODULE,
+		.pm	= &s5pcsis_pm_ops,
+	},
+};
+
+module_platform_driver(s5pcsis_driver);
+
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_DESCRIPTION("Samsung S5P/EXYNOS SoC MIPI-CSI2 receiver driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/s5p-fimc/mipi-csis.h b/drivers/media/platform/s5p-fimc/mipi-csis.h
new file mode 100644
index 0000000..2709286
--- /dev/null
+++ b/drivers/media/platform/s5p-fimc/mipi-csis.h
@@ -0,0 +1,25 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series MIPI-CSI receiver driver
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef S5P_MIPI_CSIS_H_
+#define S5P_MIPI_CSIS_H_
+
+#define CSIS_DRIVER_NAME	"s5p-mipi-csis"
+#define CSIS_MAX_ENTITIES	2
+#define CSIS0_MAX_LANES		4
+#define CSIS1_MAX_LANES		2
+
+#define CSIS_PAD_SINK		0
+#define CSIS_PAD_SOURCE		1
+#define CSIS_PADS_NUM		2
+
+#define S5PCSIS_DEF_PIX_WIDTH	640
+#define S5PCSIS_DEF_PIX_HEIGHT	480
+
+#endif
diff --git a/drivers/media/platform/s5p-g2d/Makefile b/drivers/media/platform/s5p-g2d/Makefile
new file mode 100644
index 0000000..2c48c41
--- /dev/null
+++ b/drivers/media/platform/s5p-g2d/Makefile
@@ -0,0 +1,3 @@
+s5p-g2d-objs := g2d.o g2d-hw.o
+
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_G2D)	+= s5p-g2d.o
diff --git a/drivers/media/platform/s5p-g2d/g2d-hw.c b/drivers/media/platform/s5p-g2d/g2d-hw.c
new file mode 100644
index 0000000..5b86cbe
--- /dev/null
+++ b/drivers/media/platform/s5p-g2d/g2d-hw.c
@@ -0,0 +1,109 @@
+/*
+ * Samsung S5P G2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <linux/io.h>
+
+#include "g2d.h"
+#include "g2d-regs.h"
+
+#define w(x, a)	writel((x), d->regs + (a))
+#define r(a)	readl(d->regs + (a))
+
+/* g2d_reset clears all g2d registers */
+void g2d_reset(struct g2d_dev *d)
+{
+	w(1, SOFT_RESET_REG);
+}
+
+void g2d_set_src_size(struct g2d_dev *d, struct g2d_frame *f)
+{
+	u32 n;
+
+	w(f->stride & 0xFFFF, SRC_STRIDE_REG);
+
+	n = f->o_height & 0xFFF;
+	n <<= 16;
+	n |= f->o_width & 0xFFF;
+	w(n, SRC_LEFT_TOP_REG);
+
+	n = f->bottom & 0xFFF;
+	n <<= 16;
+	n |= f->right & 0xFFF;
+	w(n, SRC_RIGHT_BOTTOM_REG);
+
+	w(f->fmt->hw, SRC_COLOR_MODE_REG);
+}
+
+void g2d_set_src_addr(struct g2d_dev *d, dma_addr_t a)
+{
+	w(a, SRC_BASE_ADDR_REG);
+}
+
+void g2d_set_dst_size(struct g2d_dev *d, struct g2d_frame *f)
+{
+	u32 n;
+
+	w(f->stride & 0xFFFF, DST_STRIDE_REG);
+
+	n = f->o_height & 0xFFF;
+	n <<= 16;
+	n |= f->o_width & 0xFFF;
+	w(n, DST_LEFT_TOP_REG);
+
+	n = f->bottom & 0xFFF;
+	n <<= 16;
+	n |= f->right & 0xFFF;
+	w(n, DST_RIGHT_BOTTOM_REG);
+
+	w(f->fmt->hw, DST_COLOR_MODE_REG);
+}
+
+void g2d_set_dst_addr(struct g2d_dev *d, dma_addr_t a)
+{
+	w(a, DST_BASE_ADDR_REG);
+}
+
+void g2d_set_rop4(struct g2d_dev *d, u32 r)
+{
+	w(r, ROP4_REG);
+}
+
+void g2d_set_flip(struct g2d_dev *d, u32 r)
+{
+	w(r, SRC_MSK_DIRECT_REG);
+}
+
+u32 g2d_cmd_stretch(u32 e)
+{
+	e &= 1;
+	return e << 4;
+}
+
+void g2d_set_cmd(struct g2d_dev *d, u32 c)
+{
+	w(c, BITBLT_COMMAND_REG);
+}
+
+void g2d_start(struct g2d_dev *d)
+{
+	/* Clear cache */
+	w(0x7, CACHECTL_REG);
+	/* Enable interrupt */
+	w(1, INTEN_REG);
+	/* Start G2D engine */
+	w(1, BITBLT_START_REG);
+}
+
+void g2d_clear_int(struct g2d_dev *d)
+{
+	w(1, INTC_PEND_REG);
+}
diff --git a/drivers/media/platform/s5p-g2d/g2d-regs.h b/drivers/media/platform/s5p-g2d/g2d-regs.h
new file mode 100644
index 0000000..02e1cf5
--- /dev/null
+++ b/drivers/media/platform/s5p-g2d/g2d-regs.h
@@ -0,0 +1,115 @@
+/*
+ * Samsung S5P G2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+/* General Registers */
+#define SOFT_RESET_REG		0x0000	/* Software reset reg */
+#define INTEN_REG		0x0004	/* Interrupt Enable reg */
+#define INTC_PEND_REG		0x000C	/* Interrupt Control Pending reg */
+#define FIFO_STAT_REG		0x0010	/* Command FIFO Status reg */
+#define AXI_ID_MODE_REG		0x0014	/* AXI Read ID Mode reg */
+#define CACHECTL_REG		0x0018	/* Cache & Buffer clear reg */
+#define AXI_MODE_REG		0x001C	/* AXI Mode reg */
+
+/* Command Registers */
+#define BITBLT_START_REG	0x0100	/* BitBLT Start reg */
+#define BITBLT_COMMAND_REG	0x0104	/* Command reg for BitBLT */
+
+/* Parameter Setting Registers (Rotate & Direction) */
+#define ROTATE_REG		0x0200	/* Rotation reg */
+#define SRC_MSK_DIRECT_REG	0x0204	/* Src and Mask Direction reg */
+#define DST_PAT_DIRECT_REG	0x0208	/* Dest and Pattern Direction reg */
+
+/* Parameter Setting Registers (Src) */
+#define SRC_SELECT_REG		0x0300	/* Src Image Selection reg */
+#define SRC_BASE_ADDR_REG	0x0304	/* Src Image Base Address reg */
+#define SRC_STRIDE_REG		0x0308	/* Src Stride reg */
+#define SRC_COLOR_MODE_REG	0x030C	/* Src Image Color Mode reg */
+#define SRC_LEFT_TOP_REG	0x0310	/* Src Left Top Coordinate reg */
+#define SRC_RIGHT_BOTTOM_REG	0x0314	/* Src Right Bottom Coordinate reg */
+
+/* Parameter Setting Registers (Dest) */
+#define DST_SELECT_REG		0x0400	/* Dest Image Selection reg */
+#define DST_BASE_ADDR_REG	0x0404	/* Dest Image Base Address reg */
+#define DST_STRIDE_REG		0x0408	/* Dest Stride reg */
+#define DST_COLOR_MODE_REG	0x040C	/* Dest Image Color Mode reg */
+#define DST_LEFT_TOP_REG	0x0410	/* Dest Left Top Coordinate reg */
+#define DST_RIGHT_BOTTOM_REG	0x0414	/* Dest Right Bottom Coordinate reg */
+
+/* Parameter Setting Registers (Pattern) */
+#define PAT_BASE_ADDR_REG	0x0500	/* Pattern Image Base Address reg */
+#define PAT_SIZE_REG		0x0504	/* Pattern Image Size reg */
+#define PAT_COLOR_MODE_REG	0x0508	/* Pattern Image Color Mode reg */
+#define PAT_OFFSET_REG		0x050C	/* Pattern Left Top Coordinate reg */
+#define PAT_STRIDE_REG		0x0510	/* Pattern Stride reg */
+
+/* Parameter Setting Registers (Mask) */
+#define MASK_BASE_ADDR_REG	0x0520	/* Mask Base Address reg */
+#define MASK_STRIDE_REG		0x0524	/* Mask Stride reg */
+
+/* Parameter Setting Registers (Clipping Window) */
+#define CW_LT_REG		0x0600	/* LeftTop coordinates of Clip Window */
+#define CW_RB_REG		0x0604	/* RightBottom coordinates of Clip
+								Window */
+
+/* Parameter Setting Registers (ROP & Alpha Setting) */
+#define THIRD_OPERAND_REG	0x0610	/* Third Operand Selection reg */
+#define ROP4_REG		0x0614	/* Raster Operation reg */
+#define ALPHA_REG		0x0618	/* Alpha value, Fading offset value */
+
+/* Parameter Setting Registers (Color) */
+#define FG_COLOR_REG		0x0700	/* Foreground Color reg */
+#define BG_COLOR_REG		0x0704	/* Background Color reg */
+#define BS_COLOR_REG		0x0708	/* Blue Screen Color reg */
+
+/* Parameter Setting Registers (Color Key) */
+#define SRC_COLORKEY_CTRL_REG	0x0710	/* Src Colorkey control reg */
+#define SRC_COLORKEY_DR_MIN_REG	0x0714	/* Src Colorkey Decision Reference
+								Min reg */
+#define SRC_COLORKEY_DR_MAX_REG	0x0718	/* Src Colorkey Decision Reference
+								Max reg */
+#define DST_COLORKEY_CTRL_REG	0x071C	/* Dest Colorkey control reg */
+#define DST_COLORKEY_DR_MIN_REG	0x0720	/* Dest Colorkey Decision Reference
+								Min reg */
+#define DST_COLORKEY_DR_MAX_REG	0x0724	/* Dest Colorkey Decision Reference
+								Max reg */
+
+/* Color mode values */
+
+#define ORDER_XRGB		0
+#define ORDER_RGBX		1
+#define ORDER_XBGR		2
+#define ORDER_BGRX		3
+
+#define MODE_XRGB_8888		0
+#define MODE_ARGB_8888		1
+#define MODE_RGB_565		2
+#define MODE_XRGB_1555		3
+#define MODE_ARGB_1555		4
+#define MODE_XRGB_4444		5
+#define MODE_ARGB_4444		6
+#define MODE_PACKED_RGB_888	7
+
+#define COLOR_MODE(o, m)	(((o) << 4) | (m))
+
+/* ROP4 operation values */
+#define ROP4_COPY		0xCCCC
+#define ROP4_INVERT		0x3333
+
+/* Hardware limits */
+#define MAX_WIDTH		8000
+#define MAX_HEIGHT		8000
+
+#define G2D_TIMEOUT		500
+
+#define DEFAULT_WIDTH		100
+#define DEFAULT_HEIGHT		100
+
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
new file mode 100644
index 0000000..0edc2df
--- /dev/null
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -0,0 +1,847 @@
+/*
+ * Samsung S5P G2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "g2d.h"
+#include "g2d-regs.h"
+
+#define fh2ctx(__fh) container_of(__fh, struct g2d_ctx, fh)
+
+static struct g2d_fmt formats[] = {
+	{
+		.name	= "XRGB_8888",
+		.fourcc	= V4L2_PIX_FMT_RGB32,
+		.depth	= 32,
+		.hw	= COLOR_MODE(ORDER_XRGB, MODE_XRGB_8888),
+	},
+	{
+		.name	= "RGB_565",
+		.fourcc	= V4L2_PIX_FMT_RGB565X,
+		.depth	= 16,
+		.hw	= COLOR_MODE(ORDER_XRGB, MODE_RGB_565),
+	},
+	{
+		.name	= "XRGB_1555",
+		.fourcc	= V4L2_PIX_FMT_RGB555X,
+		.depth	= 16,
+		.hw	= COLOR_MODE(ORDER_XRGB, MODE_XRGB_1555),
+	},
+	{
+		.name	= "XRGB_4444",
+		.fourcc	= V4L2_PIX_FMT_RGB444,
+		.depth	= 16,
+		.hw	= COLOR_MODE(ORDER_XRGB, MODE_XRGB_4444),
+	},
+	{
+		.name	= "PACKED_RGB_888",
+		.fourcc	= V4L2_PIX_FMT_RGB24,
+		.depth	= 24,
+		.hw	= COLOR_MODE(ORDER_XRGB, MODE_PACKED_RGB_888),
+	},
+};
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static struct g2d_frame def_frame = {
+	.width		= DEFAULT_WIDTH,
+	.height		= DEFAULT_HEIGHT,
+	.c_width	= DEFAULT_WIDTH,
+	.c_height	= DEFAULT_HEIGHT,
+	.o_width	= 0,
+	.o_height	= 0,
+	.fmt		= &formats[0],
+	.right		= DEFAULT_WIDTH,
+	.bottom		= DEFAULT_HEIGHT,
+};
+
+static struct g2d_fmt *find_fmt(struct v4l2_format *f)
+{
+	unsigned int i;
+	for (i = 0; i < NUM_FORMATS; i++) {
+		if (formats[i].fourcc == f->fmt.pix.pixelformat)
+			return &formats[i];
+	}
+	return NULL;
+}
+
+
+static struct g2d_frame *get_frame(struct g2d_ctx *ctx,
+							enum v4l2_buf_type type)
+{
+	switch (type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+		return &ctx->in;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+		return &ctx->out;
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+}
+
+static int g2d_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+			   unsigned int *nbuffers, unsigned int *nplanes,
+			   unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct g2d_ctx *ctx = vb2_get_drv_priv(vq);
+	struct g2d_frame *f = get_frame(ctx, vq->type);
+
+	if (IS_ERR(f))
+		return PTR_ERR(f);
+
+	sizes[0] = f->size;
+	*nplanes = 1;
+	alloc_ctxs[0] = ctx->dev->alloc_ctx;
+
+	if (*nbuffers == 0)
+		*nbuffers = 1;
+
+	return 0;
+}
+
+static int g2d_buf_prepare(struct vb2_buffer *vb)
+{
+	struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct g2d_frame *f = get_frame(ctx, vb->vb2_queue->type);
+
+	if (IS_ERR(f))
+		return PTR_ERR(f);
+	vb2_set_plane_payload(vb, 0, f->size);
+	return 0;
+}
+
+static void g2d_buf_queue(struct vb2_buffer *vb)
+{
+	struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+
+static struct vb2_ops g2d_qops = {
+	.queue_setup	= g2d_queue_setup,
+	.buf_prepare	= g2d_buf_prepare,
+	.buf_queue	= g2d_buf_queue,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+						struct vb2_queue *dst_vq)
+{
+	struct g2d_ctx *ctx = priv;
+	int ret;
+
+	memset(src_vq, 0, sizeof(*src_vq));
+	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+	src_vq->drv_priv = ctx;
+	src_vq->ops = &g2d_qops;
+	src_vq->mem_ops = &vb2_dma_contig_memops;
+	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+
+	ret = vb2_queue_init(src_vq);
+	if (ret)
+		return ret;
+
+	memset(dst_vq, 0, sizeof(*dst_vq));
+	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+	dst_vq->drv_priv = ctx;
+	dst_vq->ops = &g2d_qops;
+	dst_vq->mem_ops = &vb2_dma_contig_memops;
+	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+
+	return vb2_queue_init(dst_vq);
+}
+
+static int g2d_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct g2d_ctx *ctx = container_of(ctrl->handler, struct g2d_ctx,
+								ctrl_handler);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->dev->ctrl_lock, flags);
+	switch (ctrl->id) {
+	case V4L2_CID_COLORFX:
+		if (ctrl->val == V4L2_COLORFX_NEGATIVE)
+			ctx->rop = ROP4_INVERT;
+		else
+			ctx->rop = ROP4_COPY;
+		break;
+
+	case V4L2_CID_HFLIP:
+		ctx->flip = ctx->ctrl_hflip->val | (ctx->ctrl_vflip->val << 1);
+		break;
+
+	}
+	spin_unlock_irqrestore(&ctx->dev->ctrl_lock, flags);
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops g2d_ctrl_ops = {
+	.s_ctrl		= g2d_s_ctrl,
+};
+
+static int g2d_setup_ctrls(struct g2d_ctx *ctx)
+{
+	struct g2d_dev *dev = ctx->dev;
+
+	v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3);
+
+	ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &g2d_ctrl_ops,
+						V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+	ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &g2d_ctrl_ops,
+						V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+	v4l2_ctrl_new_std_menu(
+		&ctx->ctrl_handler,
+		&g2d_ctrl_ops,
+		V4L2_CID_COLORFX,
+		V4L2_COLORFX_NEGATIVE,
+		~((1 << V4L2_COLORFX_NONE) | (1 << V4L2_COLORFX_NEGATIVE)),
+		V4L2_COLORFX_NONE);
+
+	if (ctx->ctrl_handler.error) {
+		int err = ctx->ctrl_handler.error;
+		v4l2_err(&dev->v4l2_dev, "g2d_setup_ctrls failed\n");
+		v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+		return err;
+	}
+
+	v4l2_ctrl_cluster(2, &ctx->ctrl_hflip);
+
+	return 0;
+}
+
+static int g2d_open(struct file *file)
+{
+	struct g2d_dev *dev = video_drvdata(file);
+	struct g2d_ctx *ctx = NULL;
+	int ret = 0;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+	ctx->dev = dev;
+	/* Set default formats */
+	ctx->in		= def_frame;
+	ctx->out	= def_frame;
+
+	if (mutex_lock_interruptible(&dev->mutex)) {
+		kfree(ctx);
+		return -ERESTARTSYS;
+	}
+	ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+	if (IS_ERR(ctx->m2m_ctx)) {
+		ret = PTR_ERR(ctx->m2m_ctx);
+		mutex_unlock(&dev->mutex);
+		kfree(ctx);
+		return ret;
+	}
+	v4l2_fh_init(&ctx->fh, video_devdata(file));
+	file->private_data = &ctx->fh;
+	v4l2_fh_add(&ctx->fh);
+
+	g2d_setup_ctrls(ctx);
+
+	/* Write the default values to the ctx struct */
+	v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+
+	ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+	mutex_unlock(&dev->mutex);
+
+	v4l2_info(&dev->v4l2_dev, "instance opened\n");
+	return 0;
+}
+
+static int g2d_release(struct file *file)
+{
+	struct g2d_dev *dev = video_drvdata(file);
+	struct g2d_ctx *ctx = fh2ctx(file->private_data);
+
+	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx);
+	v4l2_info(&dev->v4l2_dev, "instance closed\n");
+	return 0;
+}
+
+
+static int vidioc_querycap(struct file *file, void *priv,
+				struct v4l2_capability *cap)
+{
+	strncpy(cap->driver, G2D_NAME, sizeof(cap->driver) - 1);
+	strncpy(cap->card, G2D_NAME, sizeof(cap->card) - 1);
+	cap->bus_info[0] = 0;
+	cap->version = KERNEL_VERSION(1, 0, 0);
+	/*
+	 * This is only a mem-to-mem video device. The capture and output
+	 * device capability flags are left only for backward compatibility
+	 * and are scheduled for removal.
+	 */
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+			    V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+	return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+{
+	struct g2d_fmt *fmt;
+	if (f->index >= NUM_FORMATS)
+		return -EINVAL;
+	fmt = &formats[f->index];
+	f->pixelformat = fmt->fourcc;
+	strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+	return 0;
+}
+
+static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+	struct g2d_ctx *ctx = prv;
+	struct vb2_queue *vq;
+	struct g2d_frame *frm;
+
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	if (!vq)
+		return -EINVAL;
+	frm = get_frame(ctx, f->type);
+	if (IS_ERR(frm))
+		return PTR_ERR(frm);
+
+	f->fmt.pix.width		= frm->width;
+	f->fmt.pix.height		= frm->height;
+	f->fmt.pix.field		= V4L2_FIELD_NONE;
+	f->fmt.pix.pixelformat		= frm->fmt->fourcc;
+	f->fmt.pix.bytesperline		= (frm->width * frm->fmt->depth) >> 3;
+	f->fmt.pix.sizeimage		= frm->size;
+	return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+	struct g2d_fmt *fmt;
+	enum v4l2_field *field;
+
+	fmt = find_fmt(f);
+	if (!fmt)
+		return -EINVAL;
+
+	field = &f->fmt.pix.field;
+	if (*field == V4L2_FIELD_ANY)
+		*field = V4L2_FIELD_NONE;
+	else if (*field != V4L2_FIELD_NONE)
+		return -EINVAL;
+
+	if (f->fmt.pix.width > MAX_WIDTH)
+		f->fmt.pix.width = MAX_WIDTH;
+	if (f->fmt.pix.height > MAX_HEIGHT)
+		f->fmt.pix.height = MAX_HEIGHT;
+
+	if (f->fmt.pix.width < 1)
+		f->fmt.pix.width = 1;
+	if (f->fmt.pix.height < 1)
+		f->fmt.pix.height = 1;
+
+	f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+	f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+	return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+	struct g2d_ctx *ctx = prv;
+	struct g2d_dev *dev = ctx->dev;
+	struct vb2_queue *vq;
+	struct g2d_frame *frm;
+	struct g2d_fmt *fmt;
+	int ret = 0;
+
+	/* Adjust all values accordingly to the hardware capabilities
+	 * and chosen format. */
+	ret = vidioc_try_fmt(file, prv, f);
+	if (ret)
+		return ret;
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	if (vb2_is_busy(vq)) {
+		v4l2_err(&dev->v4l2_dev, "queue (%d) bust\n", f->type);
+		return -EBUSY;
+	}
+	frm = get_frame(ctx, f->type);
+	if (IS_ERR(frm))
+		return PTR_ERR(frm);
+	fmt = find_fmt(f);
+	if (!fmt)
+		return -EINVAL;
+	frm->width	= f->fmt.pix.width;
+	frm->height	= f->fmt.pix.height;
+	frm->size	= f->fmt.pix.sizeimage;
+	/* Reset crop settings */
+	frm->o_width	= 0;
+	frm->o_height	= 0;
+	frm->c_width	= frm->width;
+	frm->c_height	= frm->height;
+	frm->right	= frm->width;
+	frm->bottom	= frm->height;
+	frm->fmt	= fmt;
+	frm->stride	= f->fmt.pix.bytesperline;
+	return 0;
+}
+
+static unsigned int g2d_poll(struct file *file, struct poll_table_struct *wait)
+{
+	struct g2d_ctx *ctx = fh2ctx(file->private_data);
+	struct g2d_dev *dev = ctx->dev;
+	unsigned int res;
+
+	mutex_lock(&dev->mutex);
+	res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+	mutex_unlock(&dev->mutex);
+	return res;
+}
+
+static int g2d_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct g2d_ctx *ctx = fh2ctx(file->private_data);
+	struct g2d_dev *dev = ctx->dev;
+	int ret;
+
+	if (mutex_lock_interruptible(&dev->mutex))
+		return -ERESTARTSYS;
+	ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+	mutex_unlock(&dev->mutex);
+	return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+			struct v4l2_requestbuffers *reqbufs)
+{
+	struct g2d_ctx *ctx = priv;
+	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+			struct v4l2_buffer *buf)
+{
+	struct g2d_ctx *ctx = priv;
+	return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct g2d_ctx *ctx = priv;
+	return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct g2d_ctx *ctx = priv;
+	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+
+static int vidioc_streamon(struct file *file, void *priv,
+					enum v4l2_buf_type type)
+{
+	struct g2d_ctx *ctx = priv;
+	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv,
+					enum v4l2_buf_type type)
+{
+	struct g2d_ctx *ctx = priv;
+	return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_cropcap(struct file *file, void *priv,
+					struct v4l2_cropcap *cr)
+{
+	struct g2d_ctx *ctx = priv;
+	struct g2d_frame *f;
+
+	f = get_frame(ctx, cr->type);
+	if (IS_ERR(f))
+		return PTR_ERR(f);
+
+	cr->bounds.left		= 0;
+	cr->bounds.top		= 0;
+	cr->bounds.width	= f->width;
+	cr->bounds.height	= f->height;
+	cr->defrect		= cr->bounds;
+	return 0;
+}
+
+static int vidioc_g_crop(struct file *file, void *prv, struct v4l2_crop *cr)
+{
+	struct g2d_ctx *ctx = prv;
+	struct g2d_frame *f;
+
+	f = get_frame(ctx, cr->type);
+	if (IS_ERR(f))
+		return PTR_ERR(f);
+
+	cr->c.left	= f->o_height;
+	cr->c.top	= f->o_width;
+	cr->c.width	= f->c_width;
+	cr->c.height	= f->c_height;
+	return 0;
+}
+
+static int vidioc_try_crop(struct file *file, void *prv, struct v4l2_crop *cr)
+{
+	struct g2d_ctx *ctx = prv;
+	struct g2d_dev *dev = ctx->dev;
+	struct g2d_frame *f;
+
+	f = get_frame(ctx, cr->type);
+	if (IS_ERR(f))
+		return PTR_ERR(f);
+
+	if (cr->c.top < 0 || cr->c.left < 0) {
+		v4l2_err(&dev->v4l2_dev,
+			"doesn't support negative values for top & left\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vidioc_s_crop(struct file *file, void *prv, struct v4l2_crop *cr)
+{
+	struct g2d_ctx *ctx = prv;
+	struct g2d_frame *f;
+	int ret;
+
+	ret = vidioc_try_crop(file, prv, cr);
+	if (ret)
+		return ret;
+	f = get_frame(ctx, cr->type);
+	if (IS_ERR(f))
+		return PTR_ERR(f);
+
+	f->c_width	= cr->c.width;
+	f->c_height	= cr->c.height;
+	f->o_width	= cr->c.left;
+	f->o_height	= cr->c.top;
+	f->bottom	= f->o_height + f->c_height;
+	f->right	= f->o_width + f->c_width;
+	return 0;
+}
+
+static void g2d_lock(void *prv)
+{
+	struct g2d_ctx *ctx = prv;
+	struct g2d_dev *dev = ctx->dev;
+	mutex_lock(&dev->mutex);
+}
+
+static void g2d_unlock(void *prv)
+{
+	struct g2d_ctx *ctx = prv;
+	struct g2d_dev *dev = ctx->dev;
+	mutex_unlock(&dev->mutex);
+}
+
+static void job_abort(void *prv)
+{
+	struct g2d_ctx *ctx = prv;
+	struct g2d_dev *dev = ctx->dev;
+	int ret;
+
+	if (dev->curr == NULL) /* No job currently running */
+		return;
+
+	ret = wait_event_timeout(dev->irq_queue,
+		dev->curr == NULL,
+		msecs_to_jiffies(G2D_TIMEOUT));
+}
+
+static void device_run(void *prv)
+{
+	struct g2d_ctx *ctx = prv;
+	struct g2d_dev *dev = ctx->dev;
+	struct vb2_buffer *src, *dst;
+	unsigned long flags;
+	u32 cmd = 0;
+
+	dev->curr = ctx;
+
+	src = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+	dst = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+
+	clk_enable(dev->gate);
+	g2d_reset(dev);
+
+	spin_lock_irqsave(&dev->ctrl_lock, flags);
+
+	g2d_set_src_size(dev, &ctx->in);
+	g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(src, 0));
+
+	g2d_set_dst_size(dev, &ctx->out);
+	g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(dst, 0));
+
+	g2d_set_rop4(dev, ctx->rop);
+	g2d_set_flip(dev, ctx->flip);
+
+	if (ctx->in.c_width != ctx->out.c_width ||
+		ctx->in.c_height != ctx->out.c_height)
+		cmd |= g2d_cmd_stretch(1);
+	g2d_set_cmd(dev, cmd);
+	g2d_start(dev);
+
+	spin_unlock_irqrestore(&dev->ctrl_lock, flags);
+}
+
+static irqreturn_t g2d_isr(int irq, void *prv)
+{
+	struct g2d_dev *dev = prv;
+	struct g2d_ctx *ctx = dev->curr;
+	struct vb2_buffer *src, *dst;
+
+	g2d_clear_int(dev);
+	clk_disable(dev->gate);
+
+	BUG_ON(ctx == NULL);
+
+	src = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+	dst = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+	BUG_ON(src == NULL);
+	BUG_ON(dst == NULL);
+
+	v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
+	v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+	v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
+
+	dev->curr = NULL;
+	wake_up(&dev->irq_queue);
+	return IRQ_HANDLED;
+}
+
+static const struct v4l2_file_operations g2d_fops = {
+	.owner		= THIS_MODULE,
+	.open		= g2d_open,
+	.release	= g2d_release,
+	.poll		= g2d_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= g2d_mmap,
+};
+
+static const struct v4l2_ioctl_ops g2d_ioctl_ops = {
+	.vidioc_querycap	= vidioc_querycap,
+
+	.vidioc_enum_fmt_vid_cap	= vidioc_enum_fmt,
+	.vidioc_g_fmt_vid_cap		= vidioc_g_fmt,
+	.vidioc_try_fmt_vid_cap		= vidioc_try_fmt,
+	.vidioc_s_fmt_vid_cap		= vidioc_s_fmt,
+
+	.vidioc_enum_fmt_vid_out	= vidioc_enum_fmt,
+	.vidioc_g_fmt_vid_out		= vidioc_g_fmt,
+	.vidioc_try_fmt_vid_out		= vidioc_try_fmt,
+	.vidioc_s_fmt_vid_out		= vidioc_s_fmt,
+
+	.vidioc_reqbufs			= vidioc_reqbufs,
+	.vidioc_querybuf		= vidioc_querybuf,
+
+	.vidioc_qbuf			= vidioc_qbuf,
+	.vidioc_dqbuf			= vidioc_dqbuf,
+
+	.vidioc_streamon		= vidioc_streamon,
+	.vidioc_streamoff		= vidioc_streamoff,
+
+	.vidioc_g_crop			= vidioc_g_crop,
+	.vidioc_s_crop			= vidioc_s_crop,
+	.vidioc_cropcap			= vidioc_cropcap,
+};
+
+static struct video_device g2d_videodev = {
+	.name		= G2D_NAME,
+	.fops		= &g2d_fops,
+	.ioctl_ops	= &g2d_ioctl_ops,
+	.minor		= -1,
+	.release	= video_device_release,
+};
+
+static struct v4l2_m2m_ops g2d_m2m_ops = {
+	.device_run	= device_run,
+	.job_abort	= job_abort,
+	.lock		= g2d_lock,
+	.unlock		= g2d_unlock,
+};
+
+static int g2d_probe(struct platform_device *pdev)
+{
+	struct g2d_dev *dev;
+	struct video_device *vfd;
+	struct resource *res;
+	int ret = 0;
+
+	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->ctrl_lock);
+	mutex_init(&dev->mutex);
+	atomic_set(&dev->num_inst, 0);
+	init_waitqueue_head(&dev->irq_queue);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	dev->regs = devm_request_and_ioremap(&pdev->dev, res);
+	if (dev->regs == NULL) {
+			dev_err(&pdev->dev, "Failed to obtain io memory\n");
+			return -ENOENT;
+	}
+
+	dev->clk = clk_get(&pdev->dev, "sclk_fimg2d");
+	if (IS_ERR_OR_NULL(dev->clk)) {
+		dev_err(&pdev->dev, "failed to get g2d clock\n");
+		return -ENXIO;
+	}
+
+	ret = clk_prepare(dev->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to prepare g2d clock\n");
+		goto put_clk;
+	}
+
+	dev->gate = clk_get(&pdev->dev, "fimg2d");
+	if (IS_ERR_OR_NULL(dev->gate)) {
+		dev_err(&pdev->dev, "failed to get g2d clock gate\n");
+		ret = -ENXIO;
+		goto unprep_clk;
+	}
+
+	ret = clk_prepare(dev->gate);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to prepare g2d clock gate\n");
+		goto put_clk_gate;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "failed to find IRQ\n");
+		ret = -ENXIO;
+		goto unprep_clk_gate;
+	}
+
+	dev->irq = res->start;
+
+	ret = devm_request_irq(&pdev->dev, dev->irq, g2d_isr,
+						0, pdev->name, dev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to install IRQ\n");
+		goto put_clk_gate;
+	}
+
+	dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+	if (IS_ERR(dev->alloc_ctx)) {
+		ret = PTR_ERR(dev->alloc_ctx);
+		goto unprep_clk_gate;
+	}
+
+	ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+	if (ret)
+		goto alloc_ctx_cleanup;
+	vfd = video_device_alloc();
+	if (!vfd) {
+		v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+		ret = -ENOMEM;
+		goto unreg_v4l2_dev;
+	}
+	*vfd = g2d_videodev;
+	vfd->lock = &dev->mutex;
+	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+	if (ret) {
+		v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+		goto rel_vdev;
+	}
+	video_set_drvdata(vfd, dev);
+	snprintf(vfd->name, sizeof(vfd->name), "%s", g2d_videodev.name);
+	dev->vfd = vfd;
+	v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n",
+								vfd->num);
+	platform_set_drvdata(pdev, dev);
+	dev->m2m_dev = v4l2_m2m_init(&g2d_m2m_ops);
+	if (IS_ERR(dev->m2m_dev)) {
+		v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+		ret = PTR_ERR(dev->m2m_dev);
+		goto unreg_video_dev;
+	}
+
+	def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
+
+	return 0;
+
+unreg_video_dev:
+	video_unregister_device(dev->vfd);
+rel_vdev:
+	video_device_release(vfd);
+unreg_v4l2_dev:
+	v4l2_device_unregister(&dev->v4l2_dev);
+alloc_ctx_cleanup:
+	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+unprep_clk_gate:
+	clk_unprepare(dev->gate);
+put_clk_gate:
+	clk_put(dev->gate);
+unprep_clk:
+	clk_unprepare(dev->clk);
+put_clk:
+	clk_put(dev->clk);
+
+	return ret;
+}
+
+static int g2d_remove(struct platform_device *pdev)
+{
+	struct g2d_dev *dev = (struct g2d_dev *)platform_get_drvdata(pdev);
+
+	v4l2_info(&dev->v4l2_dev, "Removing " G2D_NAME);
+	v4l2_m2m_release(dev->m2m_dev);
+	video_unregister_device(dev->vfd);
+	v4l2_device_unregister(&dev->v4l2_dev);
+	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+	clk_unprepare(dev->gate);
+	clk_put(dev->gate);
+	clk_unprepare(dev->clk);
+	clk_put(dev->clk);
+	return 0;
+}
+
+static struct platform_driver g2d_pdrv = {
+	.probe		= g2d_probe,
+	.remove		= g2d_remove,
+	.driver		= {
+		.name = G2D_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+module_platform_driver(g2d_pdrv);
+
+MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
+MODULE_DESCRIPTION("S5P G2D 2d graphics accelerator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/s5p-g2d/g2d.h b/drivers/media/platform/s5p-g2d/g2d.h
new file mode 100644
index 0000000..6b765b0
--- /dev/null
+++ b/drivers/media/platform/s5p-g2d/g2d.h
@@ -0,0 +1,86 @@
+/*
+ * Samsung S5P G2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#define G2D_NAME "s5p-g2d"
+
+struct g2d_dev {
+	struct v4l2_device	v4l2_dev;
+	struct v4l2_m2m_dev	*m2m_dev;
+	struct video_device	*vfd;
+	struct mutex		mutex;
+	spinlock_t		ctrl_lock;
+	atomic_t		num_inst;
+	struct vb2_alloc_ctx	*alloc_ctx;
+	void __iomem		*regs;
+	struct clk		*clk;
+	struct clk		*gate;
+	struct g2d_ctx		*curr;
+	int irq;
+	wait_queue_head_t	irq_queue;
+};
+
+struct g2d_frame {
+	/* Original dimensions */
+	u32	width;
+	u32	height;
+	/* Crop size */
+	u32	c_width;
+	u32	c_height;
+	/* Offset */
+	u32	o_width;
+	u32	o_height;
+	/* Image format */
+	struct g2d_fmt *fmt;
+	/* Variables that can calculated once and reused */
+	u32	stride;
+	u32	bottom;
+	u32	right;
+	u32	size;
+};
+
+struct g2d_ctx {
+	struct v4l2_fh fh;
+	struct g2d_dev		*dev;
+	struct v4l2_m2m_ctx     *m2m_ctx;
+	struct g2d_frame	in;
+	struct g2d_frame	out;
+	struct v4l2_ctrl	*ctrl_hflip;
+	struct v4l2_ctrl	*ctrl_vflip;
+	struct v4l2_ctrl_handler ctrl_handler;
+	u32 rop;
+	u32 flip;
+};
+
+struct g2d_fmt {
+	char	*name;
+	u32	fourcc;
+	int	depth;
+	u32	hw;
+};
+
+
+void g2d_reset(struct g2d_dev *d);
+void g2d_set_src_size(struct g2d_dev *d, struct g2d_frame *f);
+void g2d_set_src_addr(struct g2d_dev *d, dma_addr_t a);
+void g2d_set_dst_size(struct g2d_dev *d, struct g2d_frame *f);
+void g2d_set_dst_addr(struct g2d_dev *d, dma_addr_t a);
+void g2d_start(struct g2d_dev *d);
+void g2d_clear_int(struct g2d_dev *d);
+void g2d_set_rop4(struct g2d_dev *d, u32 r);
+void g2d_set_flip(struct g2d_dev *d, u32 r);
+u32 g2d_cmd_stretch(u32 e);
+void g2d_set_cmd(struct g2d_dev *d, u32 c);
+
+
diff --git a/drivers/media/platform/s5p-jpeg/Makefile b/drivers/media/platform/s5p-jpeg/Makefile
new file mode 100644
index 0000000..ddc2900
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/Makefile
@@ -0,0 +1,2 @@
+s5p-jpeg-objs := jpeg-core.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) := s5p-jpeg.o
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
new file mode 100644
index 0000000..72c3e52
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -0,0 +1,1529 @@
+/* linux/drivers/media/platform/s5p-jpeg/jpeg-core.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "jpeg-core.h"
+#include "jpeg-hw.h"
+
+static struct s5p_jpeg_fmt formats_enc[] = {
+	{
+		.name		= "JPEG JFIF",
+		.fourcc		= V4L2_PIX_FMT_JPEG,
+		.colplanes	= 1,
+		.types		= MEM2MEM_CAPTURE,
+	},
+	{
+		.name		= "YUV 4:2:2 packed, YCbYCr",
+		.fourcc		= V4L2_PIX_FMT_YUYV,
+		.depth		= 16,
+		.colplanes	= 1,
+		.types		= MEM2MEM_OUTPUT,
+	},
+	{
+		.name		= "RGB565",
+		.fourcc		= V4L2_PIX_FMT_RGB565,
+		.depth		= 16,
+		.colplanes	= 1,
+		.types		= MEM2MEM_OUTPUT,
+	},
+};
+#define NUM_FORMATS_ENC ARRAY_SIZE(formats_enc)
+
+static struct s5p_jpeg_fmt formats_dec[] = {
+	{
+		.name		= "YUV 4:2:0 planar, YCbCr",
+		.fourcc		= V4L2_PIX_FMT_YUV420,
+		.depth		= 12,
+		.colplanes	= 3,
+		.h_align	= 4,
+		.v_align	= 4,
+		.types		= MEM2MEM_CAPTURE,
+	},
+	{
+		.name		= "YUV 4:2:2 packed, YCbYCr",
+		.fourcc		= V4L2_PIX_FMT_YUYV,
+		.depth		= 16,
+		.colplanes	= 1,
+		.h_align	= 4,
+		.v_align	= 3,
+		.types		= MEM2MEM_CAPTURE,
+	},
+	{
+		.name		= "JPEG JFIF",
+		.fourcc		= V4L2_PIX_FMT_JPEG,
+		.colplanes	= 1,
+		.types		= MEM2MEM_OUTPUT,
+	},
+};
+#define NUM_FORMATS_DEC ARRAY_SIZE(formats_dec)
+
+static const unsigned char qtbl_luminance[4][64] = {
+	{/* level 1 - high quality */
+		 8,  6,  6,  8, 12, 14, 16, 17,
+		 6,  6,  6,  8, 10, 13, 12, 15,
+		 6,  6,  7,  8, 13, 14, 18, 24,
+		 8,  8,  8, 14, 13, 19, 24, 35,
+		12, 10, 13, 13, 20, 26, 34, 39,
+		14, 13, 14, 19, 26, 34, 39, 39,
+		16, 12, 18, 24, 34, 39, 39, 39,
+		17, 15, 24, 35, 39, 39, 39, 39
+	},
+	{/* level 2 */
+		12,  8,  8, 12, 17, 21, 24, 23,
+		 8,  9,  9, 11, 15, 19, 18, 23,
+		 8,  9, 10, 12, 19, 20, 27, 36,
+		12, 11, 12, 21, 20, 28, 36, 53,
+		17, 15, 19, 20, 30, 39, 51, 59,
+		21, 19, 20, 28, 39, 51, 59, 59,
+		24, 18, 27, 36, 51, 59, 59, 59,
+		23, 23, 36, 53, 59, 59, 59, 59
+	},
+	{/* level 3 */
+		16, 11, 11, 16, 23, 27, 31, 30,
+		11, 12, 12, 15, 20, 23, 23, 30,
+		11, 12, 13, 16, 23, 26, 35, 47,
+		16, 15, 16, 23, 26, 37, 47, 64,
+		23, 20, 23, 26, 39, 51, 64, 64,
+		27, 23, 26, 37, 51, 64, 64, 64,
+		31, 23, 35, 47, 64, 64, 64, 64,
+		30, 30, 47, 64, 64, 64, 64, 64
+	},
+	{/*level 4 - low quality */
+		20, 16, 25, 39, 50, 46, 62, 68,
+		16, 18, 23, 38, 38, 53, 65, 68,
+		25, 23, 31, 38, 53, 65, 68, 68,
+		39, 38, 38, 53, 65, 68, 68, 68,
+		50, 38, 53, 65, 68, 68, 68, 68,
+		46, 53, 65, 68, 68, 68, 68, 68,
+		62, 65, 68, 68, 68, 68, 68, 68,
+		68, 68, 68, 68, 68, 68, 68, 68
+	}
+};
+
+static const unsigned char qtbl_chrominance[4][64] = {
+	{/* level 1 - high quality */
+		 9,  8,  9, 11, 14, 17, 19, 24,
+		 8, 10,  9, 11, 14, 13, 17, 22,
+		 9,  9, 13, 14, 13, 15, 23, 26,
+		11, 11, 14, 14, 15, 20, 26, 33,
+		14, 14, 13, 15, 20, 24, 33, 39,
+		17, 13, 15, 20, 24, 32, 39, 39,
+		19, 17, 23, 26, 33, 39, 39, 39,
+		24, 22, 26, 33, 39, 39, 39, 39
+	},
+	{/* level 2 */
+		13, 11, 13, 16, 20, 20, 29, 37,
+		11, 14, 14, 14, 16, 20, 26, 32,
+		13, 14, 15, 17, 20, 23, 35, 40,
+		16, 14, 17, 21, 23, 30, 40, 50,
+		20, 16, 20, 23, 30, 37, 50, 59,
+		20, 20, 23, 30, 37, 48, 59, 59,
+		29, 26, 35, 40, 50, 59, 59, 59,
+		37, 32, 40, 50, 59, 59, 59, 59
+	},
+	{/* level 3 */
+		17, 15, 17, 21, 20, 26, 38, 48,
+		15, 19, 18, 17, 20, 26, 35, 43,
+		17, 18, 20, 22, 26, 30, 46, 53,
+		21, 17, 22, 28, 30, 39, 53, 64,
+		20, 20, 26, 30, 39, 48, 64, 64,
+		26, 26, 30, 39, 48, 63, 64, 64,
+		38, 35, 46, 53, 64, 64, 64, 64,
+		48, 43, 53, 64, 64, 64, 64, 64
+	},
+	{/*level 4 - low quality */
+		21, 25, 32, 38, 54, 68, 68, 68,
+		25, 28, 24, 38, 54, 68, 68, 68,
+		32, 24, 32, 43, 66, 68, 68, 68,
+		38, 38, 43, 53, 68, 68, 68, 68,
+		54, 54, 66, 68, 68, 68, 68, 68,
+		68, 68, 68, 68, 68, 68, 68, 68,
+		68, 68, 68, 68, 68, 68, 68, 68,
+		68, 68, 68, 68, 68, 68, 68, 68
+	}
+};
+
+static const unsigned char hdctbl0[16] = {
+	0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0
+};
+
+static const unsigned char hdctblg0[12] = {
+	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb
+};
+static const unsigned char hactbl0[16] = {
+	0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d
+};
+static const unsigned char hactblg0[162] = {
+	0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
+	0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+	0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
+	0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
+	0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
+	0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
+	0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+	0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+	0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+	0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+	0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+	0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+	0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+	0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+	0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
+	0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
+	0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
+	0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
+	0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
+	0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+	0xf9, 0xfa
+};
+
+static inline struct s5p_jpeg_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
+{
+	return container_of(c->handler, struct s5p_jpeg_ctx, ctrl_handler);
+}
+
+static inline struct s5p_jpeg_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+	return container_of(fh, struct s5p_jpeg_ctx, fh);
+}
+
+static inline void jpeg_set_qtbl(void __iomem *regs, const unsigned char *qtbl,
+		   unsigned long tab, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		writel((unsigned int)qtbl[i], regs + tab + (i * 0x04));
+}
+
+static inline void jpeg_set_qtbl_lum(void __iomem *regs, int quality)
+{
+	/* this driver fills quantisation table 0 with data for luma */
+	jpeg_set_qtbl(regs, qtbl_luminance[quality], S5P_JPG_QTBL_CONTENT(0),
+		      ARRAY_SIZE(qtbl_luminance[quality]));
+}
+
+static inline void jpeg_set_qtbl_chr(void __iomem *regs, int quality)
+{
+	/* this driver fills quantisation table 1 with data for chroma */
+	jpeg_set_qtbl(regs, qtbl_chrominance[quality], S5P_JPG_QTBL_CONTENT(1),
+		      ARRAY_SIZE(qtbl_chrominance[quality]));
+}
+
+static inline void jpeg_set_htbl(void __iomem *regs, const unsigned char *htbl,
+		   unsigned long tab, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		writel((unsigned int)htbl[i], regs + tab + (i * 0x04));
+}
+
+static inline void jpeg_set_hdctbl(void __iomem *regs)
+{
+	/* this driver fills table 0 for this component */
+	jpeg_set_htbl(regs, hdctbl0, S5P_JPG_HDCTBL(0), ARRAY_SIZE(hdctbl0));
+}
+
+static inline void jpeg_set_hdctblg(void __iomem *regs)
+{
+	/* this driver fills table 0 for this component */
+	jpeg_set_htbl(regs, hdctblg0, S5P_JPG_HDCTBLG(0), ARRAY_SIZE(hdctblg0));
+}
+
+static inline void jpeg_set_hactbl(void __iomem *regs)
+{
+	/* this driver fills table 0 for this component */
+	jpeg_set_htbl(regs, hactbl0, S5P_JPG_HACTBL(0), ARRAY_SIZE(hactbl0));
+}
+
+static inline void jpeg_set_hactblg(void __iomem *regs)
+{
+	/* this driver fills table 0 for this component */
+	jpeg_set_htbl(regs, hactblg0, S5P_JPG_HACTBLG(0), ARRAY_SIZE(hactblg0));
+}
+
+/*
+ * ============================================================================
+ * Device file operations
+ * ============================================================================
+ */
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+		      struct vb2_queue *dst_vq);
+static struct s5p_jpeg_fmt *s5p_jpeg_find_format(unsigned int mode,
+						 __u32 pixelformat);
+static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx);
+
+static int s5p_jpeg_open(struct file *file)
+{
+	struct s5p_jpeg *jpeg = video_drvdata(file);
+	struct video_device *vfd = video_devdata(file);
+	struct s5p_jpeg_ctx *ctx;
+	struct s5p_jpeg_fmt *out_fmt;
+	int ret = 0;
+
+	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	if (mutex_lock_interruptible(&jpeg->lock)) {
+		ret = -ERESTARTSYS;
+		goto free;
+	}
+
+	v4l2_fh_init(&ctx->fh, vfd);
+	/* Use separate control handler per file handle */
+	ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+	file->private_data = &ctx->fh;
+	v4l2_fh_add(&ctx->fh);
+
+	ctx->jpeg = jpeg;
+	if (vfd == jpeg->vfd_encoder) {
+		ctx->mode = S5P_JPEG_ENCODE;
+		out_fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_RGB565);
+	} else {
+		ctx->mode = S5P_JPEG_DECODE;
+		out_fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_JPEG);
+	}
+
+	ret = s5p_jpeg_controls_create(ctx);
+	if (ret < 0)
+		goto error;
+
+	ctx->m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx, queue_init);
+	if (IS_ERR(ctx->m2m_ctx)) {
+		ret = PTR_ERR(ctx->m2m_ctx);
+		goto error;
+	}
+
+	ctx->out_q.fmt = out_fmt;
+	ctx->cap_q.fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_YUYV);
+	mutex_unlock(&jpeg->lock);
+	return 0;
+
+error:
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	mutex_unlock(&jpeg->lock);
+free:
+	kfree(ctx);
+	return ret;
+}
+
+static int s5p_jpeg_release(struct file *file)
+{
+	struct s5p_jpeg *jpeg = video_drvdata(file);
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
+
+	mutex_lock(&jpeg->lock);
+	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+	mutex_unlock(&jpeg->lock);
+	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx);
+
+	return 0;
+}
+
+static unsigned int s5p_jpeg_poll(struct file *file,
+				 struct poll_table_struct *wait)
+{
+	struct s5p_jpeg *jpeg = video_drvdata(file);
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
+	unsigned int res;
+
+	mutex_lock(&jpeg->lock);
+	res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+	mutex_unlock(&jpeg->lock);
+	return res;
+}
+
+static int s5p_jpeg_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct s5p_jpeg *jpeg = video_drvdata(file);
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
+	int ret;
+
+	if (mutex_lock_interruptible(&jpeg->lock))
+		return -ERESTARTSYS;
+	ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+	mutex_unlock(&jpeg->lock);
+	return ret;
+}
+
+static const struct v4l2_file_operations s5p_jpeg_fops = {
+	.owner		= THIS_MODULE,
+	.open		= s5p_jpeg_open,
+	.release	= s5p_jpeg_release,
+	.poll		= s5p_jpeg_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= s5p_jpeg_mmap,
+};
+
+/*
+ * ============================================================================
+ * video ioctl operations
+ * ============================================================================
+ */
+
+static int get_byte(struct s5p_jpeg_buffer *buf)
+{
+	if (buf->curr >= buf->size)
+		return -1;
+
+	return ((unsigned char *)buf->data)[buf->curr++];
+}
+
+static int get_word_be(struct s5p_jpeg_buffer *buf, unsigned int *word)
+{
+	unsigned int temp;
+	int byte;
+
+	byte = get_byte(buf);
+	if (byte == -1)
+		return -1;
+	temp = byte << 8;
+	byte = get_byte(buf);
+	if (byte == -1)
+		return -1;
+	*word = (unsigned int)byte | temp;
+	return 0;
+}
+
+static void skip(struct s5p_jpeg_buffer *buf, long len)
+{
+	if (len <= 0)
+		return;
+
+	while (len--)
+		get_byte(buf);
+}
+
+static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
+			       unsigned long buffer, unsigned long size)
+{
+	int c, components, notfound;
+	unsigned int height, width, word;
+	long length;
+	struct s5p_jpeg_buffer jpeg_buffer;
+
+	jpeg_buffer.size = size;
+	jpeg_buffer.data = buffer;
+	jpeg_buffer.curr = 0;
+
+	notfound = 1;
+	while (notfound) {
+		c = get_byte(&jpeg_buffer);
+		if (c == -1)
+			break;
+		if (c != 0xff)
+			continue;
+		do
+			c = get_byte(&jpeg_buffer);
+		while (c == 0xff);
+		if (c == -1)
+			break;
+		if (c == 0)
+			continue;
+		length = 0;
+		switch (c) {
+		/* SOF0: baseline JPEG */
+		case SOF0:
+			if (get_word_be(&jpeg_buffer, &word))
+				break;
+			if (get_byte(&jpeg_buffer) == -1)
+				break;
+			if (get_word_be(&jpeg_buffer, &height))
+				break;
+			if (get_word_be(&jpeg_buffer, &width))
+				break;
+			components = get_byte(&jpeg_buffer);
+			if (components == -1)
+				break;
+			notfound = 0;
+
+			skip(&jpeg_buffer, components * 3);
+			break;
+
+		/* skip payload-less markers */
+		case RST ... RST + 7:
+		case SOI:
+		case EOI:
+		case TEM:
+			break;
+
+		/* skip uninteresting payload markers */
+		default:
+			if (get_word_be(&jpeg_buffer, &word))
+				break;
+			length = (long)word - 2;
+			skip(&jpeg_buffer, length);
+			break;
+		}
+	}
+	result->w = width;
+	result->h = height;
+	result->size = components;
+	return !notfound;
+}
+
+static int s5p_jpeg_querycap(struct file *file, void *priv,
+			   struct v4l2_capability *cap)
+{
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+
+	if (ctx->mode == S5P_JPEG_ENCODE) {
+		strlcpy(cap->driver, S5P_JPEG_M2M_NAME " encoder",
+			sizeof(cap->driver));
+		strlcpy(cap->card, S5P_JPEG_M2M_NAME " encoder",
+			sizeof(cap->card));
+	} else {
+		strlcpy(cap->driver, S5P_JPEG_M2M_NAME " decoder",
+			sizeof(cap->driver));
+		strlcpy(cap->card, S5P_JPEG_M2M_NAME " decoder",
+			sizeof(cap->card));
+	}
+	cap->bus_info[0] = 0;
+	/*
+	 * This is only a mem-to-mem video device. The capture and output
+	 * device capability flags are left only for backward compatibility
+	 * and are scheduled for removal.
+	 */
+	cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M |
+			    V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
+	return 0;
+}
+
+static int enum_fmt(struct s5p_jpeg_fmt *formats, int n,
+		    struct v4l2_fmtdesc *f, u32 type)
+{
+	int i, num = 0;
+
+	for (i = 0; i < n; ++i) {
+		if (formats[i].types & type) {
+			/* index-th format of type type found ? */
+			if (num == f->index)
+				break;
+			/* Correct type but haven't reached our index yet,
+			 * just increment per-type index */
+			++num;
+		}
+	}
+
+	/* Format not found */
+	if (i >= n)
+		return -EINVAL;
+
+	strlcpy(f->description, formats[i].name, sizeof(f->description));
+	f->pixelformat = formats[i].fourcc;
+
+	return 0;
+}
+
+static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+
+	if (ctx->mode == S5P_JPEG_ENCODE)
+		return enum_fmt(formats_enc, NUM_FORMATS_ENC, f,
+				MEM2MEM_CAPTURE);
+
+	return enum_fmt(formats_dec, NUM_FORMATS_DEC, f, MEM2MEM_CAPTURE);
+}
+
+static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+
+	if (ctx->mode == S5P_JPEG_ENCODE)
+		return enum_fmt(formats_enc, NUM_FORMATS_ENC, f,
+				MEM2MEM_OUTPUT);
+
+	return enum_fmt(formats_dec, NUM_FORMATS_DEC, f, MEM2MEM_OUTPUT);
+}
+
+static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
+					  enum v4l2_buf_type type)
+{
+	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return &ctx->out_q;
+	if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return &ctx->cap_q;
+
+	return NULL;
+}
+
+static int s5p_jpeg_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct vb2_queue *vq;
+	struct s5p_jpeg_q_data *q_data = NULL;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct s5p_jpeg_ctx *ct = fh_to_ctx(priv);
+
+	vq = v4l2_m2m_get_vq(ct->m2m_ctx, f->type);
+	if (!vq)
+		return -EINVAL;
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+	    ct->mode == S5P_JPEG_DECODE && !ct->hdr_parsed)
+		return -EINVAL;
+	q_data = get_q_data(ct, f->type);
+	BUG_ON(q_data == NULL);
+
+	pix->width = q_data->w;
+	pix->height = q_data->h;
+	pix->field = V4L2_FIELD_NONE;
+	pix->pixelformat = q_data->fmt->fourcc;
+	pix->bytesperline = 0;
+	if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG) {
+		u32 bpl = q_data->w;
+		if (q_data->fmt->colplanes == 1)
+			bpl = (bpl * q_data->fmt->depth) >> 3;
+		pix->bytesperline = bpl;
+	}
+	pix->sizeimage = q_data->size;
+
+	return 0;
+}
+
+static struct s5p_jpeg_fmt *s5p_jpeg_find_format(unsigned int mode,
+						 u32 pixelformat)
+{
+	unsigned int k;
+	struct s5p_jpeg_fmt *formats;
+	int n;
+
+	if (mode == S5P_JPEG_ENCODE) {
+		formats = formats_enc;
+		n = NUM_FORMATS_ENC;
+	} else {
+		formats = formats_dec;
+		n = NUM_FORMATS_DEC;
+	}
+
+	for (k = 0; k < n; k++) {
+		struct s5p_jpeg_fmt *fmt = &formats[k];
+		if (fmt->fourcc == pixelformat)
+			return fmt;
+	}
+
+	return NULL;
+
+}
+
+static void jpeg_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
+				   unsigned int walign,
+				   u32 *h, unsigned int hmin, unsigned int hmax,
+				   unsigned int halign)
+{
+	int width, height, w_step, h_step;
+
+	width = *w;
+	height = *h;
+
+	w_step = 1 << walign;
+	h_step = 1 << halign;
+	v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0);
+
+	if (*w < width && (*w + w_step) < wmax)
+		*w += w_step;
+	if (*h < height && (*h + h_step) < hmax)
+		*h += h_step;
+
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct s5p_jpeg_fmt *fmt,
+			  struct s5p_jpeg_ctx *ctx, int q_type)
+{
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+
+	if (pix->field == V4L2_FIELD_ANY)
+		pix->field = V4L2_FIELD_NONE;
+	else if (pix->field != V4L2_FIELD_NONE)
+		return -EINVAL;
+
+	/* V4L2 specification suggests the driver corrects the format struct
+	 * if any of the dimensions is unsupported */
+	if (q_type == MEM2MEM_OUTPUT)
+		jpeg_bound_align_image(&pix->width, S5P_JPEG_MIN_WIDTH,
+				       S5P_JPEG_MAX_WIDTH, 0,
+				       &pix->height, S5P_JPEG_MIN_HEIGHT,
+				       S5P_JPEG_MAX_HEIGHT, 0);
+	else
+		jpeg_bound_align_image(&pix->width, S5P_JPEG_MIN_WIDTH,
+				       S5P_JPEG_MAX_WIDTH, fmt->h_align,
+				       &pix->height, S5P_JPEG_MIN_HEIGHT,
+				       S5P_JPEG_MAX_HEIGHT, fmt->v_align);
+
+	if (fmt->fourcc == V4L2_PIX_FMT_JPEG) {
+		if (pix->sizeimage <= 0)
+			pix->sizeimage = PAGE_SIZE;
+		pix->bytesperline = 0;
+	} else {
+		u32 bpl = pix->bytesperline;
+
+		if (fmt->colplanes > 1 && bpl < pix->width)
+			bpl = pix->width; /* planar */
+
+		if (fmt->colplanes == 1 && /* packed */
+		    (bpl << 3) * fmt->depth < pix->width)
+			bpl = (pix->width * fmt->depth) >> 3;
+
+		pix->bytesperline = bpl;
+		pix->sizeimage = (pix->width * pix->height * fmt->depth) >> 3;
+	}
+
+	return 0;
+}
+
+static int s5p_jpeg_try_fmt_vid_cap(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+	struct s5p_jpeg_fmt *fmt;
+
+	fmt = s5p_jpeg_find_format(ctx->mode, f->fmt.pix.pixelformat);
+	if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
+		v4l2_err(&ctx->jpeg->v4l2_dev,
+			 "Fourcc format (0x%08x) invalid.\n",
+			 f->fmt.pix.pixelformat);
+		return -EINVAL;
+	}
+
+	return vidioc_try_fmt(f, fmt, ctx, MEM2MEM_CAPTURE);
+}
+
+static int s5p_jpeg_try_fmt_vid_out(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+	struct s5p_jpeg_fmt *fmt;
+
+	fmt = s5p_jpeg_find_format(ctx->mode, f->fmt.pix.pixelformat);
+	if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
+		v4l2_err(&ctx->jpeg->v4l2_dev,
+			 "Fourcc format (0x%08x) invalid.\n",
+			 f->fmt.pix.pixelformat);
+		return -EINVAL;
+	}
+
+	return vidioc_try_fmt(f, fmt, ctx, MEM2MEM_OUTPUT);
+}
+
+static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
+{
+	struct vb2_queue *vq;
+	struct s5p_jpeg_q_data *q_data = NULL;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+
+	vq = v4l2_m2m_get_vq(ct->m2m_ctx, f->type);
+	if (!vq)
+		return -EINVAL;
+
+	q_data = get_q_data(ct, f->type);
+	BUG_ON(q_data == NULL);
+
+	if (vb2_is_busy(vq)) {
+		v4l2_err(&ct->jpeg->v4l2_dev, "%s queue busy\n", __func__);
+		return -EBUSY;
+	}
+
+	q_data->fmt = s5p_jpeg_find_format(ct->mode, pix->pixelformat);
+	q_data->w = pix->width;
+	q_data->h = pix->height;
+	if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG)
+		q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3;
+	else
+		q_data->size = pix->sizeimage;
+
+	return 0;
+}
+
+static int s5p_jpeg_s_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	int ret;
+
+	ret = s5p_jpeg_try_fmt_vid_cap(file, priv, f);
+	if (ret)
+		return ret;
+
+	return s5p_jpeg_s_fmt(fh_to_ctx(priv), f);
+}
+
+static int s5p_jpeg_s_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	int ret;
+
+	ret = s5p_jpeg_try_fmt_vid_out(file, priv, f);
+	if (ret)
+		return ret;
+
+	return s5p_jpeg_s_fmt(fh_to_ctx(priv), f);
+}
+
+static int s5p_jpeg_reqbufs(struct file *file, void *priv,
+			  struct v4l2_requestbuffers *reqbufs)
+{
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+
+	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int s5p_jpeg_querybuf(struct file *file, void *priv,
+			   struct v4l2_buffer *buf)
+{
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+
+	return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int s5p_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+
+	return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int s5p_jpeg_dqbuf(struct file *file, void *priv,
+			  struct v4l2_buffer *buf)
+{
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+
+	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int s5p_jpeg_streamon(struct file *file, void *priv,
+			   enum v4l2_buf_type type)
+{
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+
+	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int s5p_jpeg_streamoff(struct file *file, void *priv,
+			    enum v4l2_buf_type type)
+{
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+
+	return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static int s5p_jpeg_g_selection(struct file *file, void *priv,
+			 struct v4l2_selection *s)
+{
+	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+
+	if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+	    s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	/* For JPEG blob active == default == bounds */
+	switch (s->target) {
+	case V4L2_SEL_TGT_CROP:
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+	case V4L2_SEL_TGT_COMPOSE:
+	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+		s->r.width = ctx->out_q.w;
+		s->r.height = ctx->out_q.h;
+		break;
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+	case V4L2_SEL_TGT_COMPOSE_PADDED:
+		s->r.width = ctx->cap_q.w;
+		s->r.height = ctx->cap_q.h;
+		break;
+	default:
+		return -EINVAL;
+	}
+	s->r.left = 0;
+	s->r.top = 0;
+	return 0;
+}
+
+/*
+ * V4L2 controls
+ */
+
+static int s5p_jpeg_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
+	struct s5p_jpeg *jpeg = ctx->jpeg;
+	unsigned long flags;
+
+	switch (ctrl->id) {
+	case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+		spin_lock_irqsave(&jpeg->slock, flags);
+
+		WARN_ON(ctx->subsampling > S5P_SUBSAMPLING_MODE_GRAY);
+		if (ctx->subsampling > 2)
+			ctrl->val = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
+		else
+			ctrl->val = ctx->subsampling;
+		spin_unlock_irqrestore(&jpeg->slock, flags);
+		break;
+	}
+
+	return 0;
+}
+
+static int s5p_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->jpeg->slock, flags);
+
+	switch (ctrl->id) {
+	case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+		ctx->compr_quality = S5P_JPEG_COMPR_QUAL_WORST - ctrl->val;
+		break;
+	case V4L2_CID_JPEG_RESTART_INTERVAL:
+		ctx->restart_interval = ctrl->val;
+		break;
+	case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+		ctx->subsampling = ctrl->val;
+		break;
+	}
+
+	spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops s5p_jpeg_ctrl_ops = {
+	.g_volatile_ctrl	= s5p_jpeg_g_volatile_ctrl,
+	.s_ctrl			= s5p_jpeg_s_ctrl,
+};
+
+static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx)
+{
+	unsigned int mask = ~0x27; /* 444, 422, 420, GRAY */
+	struct v4l2_ctrl *ctrl;
+
+	v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3);
+
+	if (ctx->mode == S5P_JPEG_ENCODE) {
+		v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
+				  V4L2_CID_JPEG_COMPRESSION_QUALITY,
+				  0, 3, 1, 3);
+
+		v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
+				  V4L2_CID_JPEG_RESTART_INTERVAL,
+				  0, 3, 0xffff, 0);
+		mask = ~0x06; /* 422, 420 */
+	}
+
+	ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
+				      V4L2_CID_JPEG_CHROMA_SUBSAMPLING,
+				      V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY, mask,
+				      V4L2_JPEG_CHROMA_SUBSAMPLING_422);
+
+	if (ctx->ctrl_handler.error)
+		return ctx->ctrl_handler.error;
+
+	if (ctx->mode == S5P_JPEG_DECODE)
+		ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE |
+			V4L2_CTRL_FLAG_READ_ONLY;
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops s5p_jpeg_ioctl_ops = {
+	.vidioc_querycap		= s5p_jpeg_querycap,
+
+	.vidioc_enum_fmt_vid_cap	= s5p_jpeg_enum_fmt_vid_cap,
+	.vidioc_enum_fmt_vid_out	= s5p_jpeg_enum_fmt_vid_out,
+
+	.vidioc_g_fmt_vid_cap		= s5p_jpeg_g_fmt,
+	.vidioc_g_fmt_vid_out		= s5p_jpeg_g_fmt,
+
+	.vidioc_try_fmt_vid_cap		= s5p_jpeg_try_fmt_vid_cap,
+	.vidioc_try_fmt_vid_out		= s5p_jpeg_try_fmt_vid_out,
+
+	.vidioc_s_fmt_vid_cap		= s5p_jpeg_s_fmt_vid_cap,
+	.vidioc_s_fmt_vid_out		= s5p_jpeg_s_fmt_vid_out,
+
+	.vidioc_reqbufs			= s5p_jpeg_reqbufs,
+	.vidioc_querybuf		= s5p_jpeg_querybuf,
+
+	.vidioc_qbuf			= s5p_jpeg_qbuf,
+	.vidioc_dqbuf			= s5p_jpeg_dqbuf,
+
+	.vidioc_streamon		= s5p_jpeg_streamon,
+	.vidioc_streamoff		= s5p_jpeg_streamoff,
+
+	.vidioc_g_selection		= s5p_jpeg_g_selection,
+};
+
+/*
+ * ============================================================================
+ * mem2mem callbacks
+ * ============================================================================
+ */
+
+static void s5p_jpeg_device_run(void *priv)
+{
+	struct s5p_jpeg_ctx *ctx = priv;
+	struct s5p_jpeg *jpeg = ctx->jpeg;
+	struct vb2_buffer *src_buf, *dst_buf;
+	unsigned long src_addr, dst_addr;
+
+	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+	src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+	dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+
+	jpeg_reset(jpeg->regs);
+	jpeg_poweron(jpeg->regs);
+	jpeg_proc_mode(jpeg->regs, ctx->mode);
+	if (ctx->mode == S5P_JPEG_ENCODE) {
+		if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565)
+			jpeg_input_raw_mode(jpeg->regs, S5P_JPEG_RAW_IN_565);
+		else
+			jpeg_input_raw_mode(jpeg->regs, S5P_JPEG_RAW_IN_422);
+		jpeg_subsampling_mode(jpeg->regs, ctx->subsampling);
+		jpeg_dri(jpeg->regs, ctx->restart_interval);
+		jpeg_x(jpeg->regs, ctx->out_q.w);
+		jpeg_y(jpeg->regs, ctx->out_q.h);
+		jpeg_imgadr(jpeg->regs, src_addr);
+		jpeg_jpgadr(jpeg->regs, dst_addr);
+
+		/* ultimately comes from sizeimage from userspace */
+		jpeg_enc_stream_int(jpeg->regs, ctx->cap_q.size);
+
+		/* JPEG RGB to YCbCr conversion matrix */
+		jpeg_coef(jpeg->regs, 1, 1, S5P_JPEG_COEF11);
+		jpeg_coef(jpeg->regs, 1, 2, S5P_JPEG_COEF12);
+		jpeg_coef(jpeg->regs, 1, 3, S5P_JPEG_COEF13);
+		jpeg_coef(jpeg->regs, 2, 1, S5P_JPEG_COEF21);
+		jpeg_coef(jpeg->regs, 2, 2, S5P_JPEG_COEF22);
+		jpeg_coef(jpeg->regs, 2, 3, S5P_JPEG_COEF23);
+		jpeg_coef(jpeg->regs, 3, 1, S5P_JPEG_COEF31);
+		jpeg_coef(jpeg->regs, 3, 2, S5P_JPEG_COEF32);
+		jpeg_coef(jpeg->regs, 3, 3, S5P_JPEG_COEF33);
+
+		/*
+		 * JPEG IP allows storing 4 quantization tables
+		 * We fill table 0 for luma and table 1 for chroma
+		 */
+		jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
+		jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
+		/* use table 0 for Y */
+		jpeg_qtbl(jpeg->regs, 1, 0);
+		/* use table 1 for Cb and Cr*/
+		jpeg_qtbl(jpeg->regs, 2, 1);
+		jpeg_qtbl(jpeg->regs, 3, 1);
+
+		/* Y, Cb, Cr use Huffman table 0 */
+		jpeg_htbl_ac(jpeg->regs, 1);
+		jpeg_htbl_dc(jpeg->regs, 1);
+		jpeg_htbl_ac(jpeg->regs, 2);
+		jpeg_htbl_dc(jpeg->regs, 2);
+		jpeg_htbl_ac(jpeg->regs, 3);
+		jpeg_htbl_dc(jpeg->regs, 3);
+	} else { /* S5P_JPEG_DECODE */
+		jpeg_rst_int_enable(jpeg->regs, true);
+		jpeg_data_num_int_enable(jpeg->regs, true);
+		jpeg_final_mcu_num_int_enable(jpeg->regs, true);
+		if (ctx->cap_q.fmt->fourcc == V4L2_PIX_FMT_YUYV)
+			jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_422);
+		else
+			jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_420);
+		jpeg_jpgadr(jpeg->regs, src_addr);
+		jpeg_imgadr(jpeg->regs, dst_addr);
+	}
+
+	jpeg_start(jpeg->regs);
+}
+
+static int s5p_jpeg_job_ready(void *priv)
+{
+	struct s5p_jpeg_ctx *ctx = priv;
+
+	if (ctx->mode == S5P_JPEG_DECODE)
+		return ctx->hdr_parsed;
+	return 1;
+}
+
+static void s5p_jpeg_job_abort(void *priv)
+{
+}
+
+static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
+	.device_run	= s5p_jpeg_device_run,
+	.job_ready	= s5p_jpeg_job_ready,
+	.job_abort	= s5p_jpeg_job_abort,
+};
+
+/*
+ * ============================================================================
+ * Queue operations
+ * ============================================================================
+ */
+
+static int s5p_jpeg_queue_setup(struct vb2_queue *vq,
+			   const struct v4l2_format *fmt,
+			   unsigned int *nbuffers, unsigned int *nplanes,
+			   unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
+	struct s5p_jpeg_q_data *q_data = NULL;
+	unsigned int size, count = *nbuffers;
+
+	q_data = get_q_data(ctx, vq->type);
+	BUG_ON(q_data == NULL);
+
+	size = q_data->size;
+
+	/*
+	 * header is parsed during decoding and parsed information stored
+	 * in the context so we do not allow another buffer to overwrite it
+	 */
+	if (ctx->mode == S5P_JPEG_DECODE)
+		count = 1;
+
+	*nbuffers = count;
+	*nplanes = 1;
+	sizes[0] = size;
+	alloc_ctxs[0] = ctx->jpeg->alloc_ctx;
+
+	return 0;
+}
+
+static int s5p_jpeg_buf_prepare(struct vb2_buffer *vb)
+{
+	struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct s5p_jpeg_q_data *q_data = NULL;
+
+	q_data = get_q_data(ctx, vb->vb2_queue->type);
+	BUG_ON(q_data == NULL);
+
+	if (vb2_plane_size(vb, 0) < q_data->size) {
+		pr_err("%s data will not fit into plane (%lu < %lu)\n",
+				__func__, vb2_plane_size(vb, 0),
+				(long)q_data->size);
+		return -EINVAL;
+	}
+
+	vb2_set_plane_payload(vb, 0, q_data->size);
+
+	return 0;
+}
+
+static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
+{
+	struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+	if (ctx->mode == S5P_JPEG_DECODE &&
+	    vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		struct s5p_jpeg_q_data tmp, *q_data;
+		ctx->hdr_parsed = s5p_jpeg_parse_hdr(&tmp,
+		     (unsigned long)vb2_plane_vaddr(vb, 0),
+		     min((unsigned long)ctx->out_q.size,
+			 vb2_get_plane_payload(vb, 0)));
+		if (!ctx->hdr_parsed) {
+			vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+			return;
+		}
+
+		q_data = &ctx->out_q;
+		q_data->w = tmp.w;
+		q_data->h = tmp.h;
+
+		q_data = &ctx->cap_q;
+		q_data->w = tmp.w;
+		q_data->h = tmp.h;
+
+		jpeg_bound_align_image(&q_data->w, S5P_JPEG_MIN_WIDTH,
+				       S5P_JPEG_MAX_WIDTH, q_data->fmt->h_align,
+				       &q_data->h, S5P_JPEG_MIN_HEIGHT,
+				       S5P_JPEG_MAX_HEIGHT, q_data->fmt->v_align
+				      );
+		q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3;
+	}
+	if (ctx->m2m_ctx)
+		v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static void s5p_jpeg_wait_prepare(struct vb2_queue *vq)
+{
+	struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
+
+	mutex_unlock(&ctx->jpeg->lock);
+}
+
+static void s5p_jpeg_wait_finish(struct vb2_queue *vq)
+{
+	struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
+
+	mutex_lock(&ctx->jpeg->lock);
+}
+
+static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q);
+	int ret;
+
+	ret = pm_runtime_get_sync(ctx->jpeg->dev);
+
+	return ret > 0 ? 0 : ret;
+}
+
+static int s5p_jpeg_stop_streaming(struct vb2_queue *q)
+{
+	struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q);
+
+	pm_runtime_put(ctx->jpeg->dev);
+
+	return 0;
+}
+
+static struct vb2_ops s5p_jpeg_qops = {
+	.queue_setup		= s5p_jpeg_queue_setup,
+	.buf_prepare		= s5p_jpeg_buf_prepare,
+	.buf_queue		= s5p_jpeg_buf_queue,
+	.wait_prepare		= s5p_jpeg_wait_prepare,
+	.wait_finish		= s5p_jpeg_wait_finish,
+	.start_streaming	= s5p_jpeg_start_streaming,
+	.stop_streaming		= s5p_jpeg_stop_streaming,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+		      struct vb2_queue *dst_vq)
+{
+	struct s5p_jpeg_ctx *ctx = priv;
+	int ret;
+
+	memset(src_vq, 0, sizeof(*src_vq));
+	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+	src_vq->drv_priv = ctx;
+	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	src_vq->ops = &s5p_jpeg_qops;
+	src_vq->mem_ops = &vb2_dma_contig_memops;
+
+	ret = vb2_queue_init(src_vq);
+	if (ret)
+		return ret;
+
+	memset(dst_vq, 0, sizeof(*dst_vq));
+	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+	dst_vq->drv_priv = ctx;
+	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	dst_vq->ops = &s5p_jpeg_qops;
+	dst_vq->mem_ops = &vb2_dma_contig_memops;
+
+	return vb2_queue_init(dst_vq);
+}
+
+/*
+ * ============================================================================
+ * ISR
+ * ============================================================================
+ */
+
+static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
+{
+	struct s5p_jpeg *jpeg = dev_id;
+	struct s5p_jpeg_ctx *curr_ctx;
+	struct vb2_buffer *src_buf, *dst_buf;
+	unsigned long payload_size = 0;
+	enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
+	bool enc_jpeg_too_large = false;
+	bool timer_elapsed = false;
+	bool op_completed = false;
+
+	spin_lock(&jpeg->slock);
+
+	curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
+
+	src_buf = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
+	dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+
+	if (curr_ctx->mode == S5P_JPEG_ENCODE)
+		enc_jpeg_too_large = jpeg_enc_stream_stat(jpeg->regs);
+	timer_elapsed = jpeg_timer_stat(jpeg->regs);
+	op_completed = jpeg_result_stat_ok(jpeg->regs);
+	if (curr_ctx->mode == S5P_JPEG_DECODE)
+		op_completed = op_completed && jpeg_stream_stat_ok(jpeg->regs);
+
+	if (enc_jpeg_too_large) {
+		state = VB2_BUF_STATE_ERROR;
+		jpeg_clear_enc_stream_stat(jpeg->regs);
+	} else if (timer_elapsed) {
+		state = VB2_BUF_STATE_ERROR;
+		jpeg_clear_timer_stat(jpeg->regs);
+	} else if (!op_completed) {
+		state = VB2_BUF_STATE_ERROR;
+	} else {
+		payload_size = jpeg_compressed_size(jpeg->regs);
+	}
+
+	v4l2_m2m_buf_done(src_buf, state);
+	if (curr_ctx->mode == S5P_JPEG_ENCODE)
+		vb2_set_plane_payload(dst_buf, 0, payload_size);
+	v4l2_m2m_buf_done(dst_buf, state);
+	v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->m2m_ctx);
+
+	curr_ctx->subsampling = jpeg_get_subsampling_mode(jpeg->regs);
+	spin_unlock(&jpeg->slock);
+
+	jpeg_clear_int(jpeg->regs);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * ============================================================================
+ * Driver basic infrastructure
+ * ============================================================================
+ */
+
+static int s5p_jpeg_probe(struct platform_device *pdev)
+{
+	struct s5p_jpeg *jpeg;
+	struct resource *res;
+	int ret;
+
+	/* JPEG IP abstraction struct */
+	jpeg = devm_kzalloc(&pdev->dev, sizeof(struct s5p_jpeg), GFP_KERNEL);
+	if (!jpeg)
+		return -ENOMEM;
+
+	mutex_init(&jpeg->lock);
+	spin_lock_init(&jpeg->slock);
+	jpeg->dev = &pdev->dev;
+
+	/* memory-mapped registers */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	jpeg->regs = devm_request_and_ioremap(&pdev->dev, res);
+	if (jpeg->regs == NULL) {
+		dev_err(&pdev->dev, "Failed to obtain io memory\n");
+		return -ENOENT;
+	}
+
+	/* interrupt service routine registration */
+	jpeg->irq = ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "cannot find IRQ\n");
+		return ret;
+	}
+
+	ret = devm_request_irq(&pdev->dev, jpeg->irq, s5p_jpeg_irq, 0,
+			dev_name(&pdev->dev), jpeg);
+	if (ret) {
+		dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpeg->irq);
+		return ret;
+	}
+
+	/* clocks */
+	jpeg->clk = clk_get(&pdev->dev, "jpeg");
+	if (IS_ERR(jpeg->clk)) {
+		dev_err(&pdev->dev, "cannot get clock\n");
+		ret = PTR_ERR(jpeg->clk);
+		return ret;
+	}
+	dev_dbg(&pdev->dev, "clock source %p\n", jpeg->clk);
+	clk_enable(jpeg->clk);
+
+	/* v4l2 device */
+	ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+		goto clk_get_rollback;
+	}
+
+	/* mem2mem device */
+	jpeg->m2m_dev = v4l2_m2m_init(&s5p_jpeg_m2m_ops);
+	if (IS_ERR(jpeg->m2m_dev)) {
+		v4l2_err(&jpeg->v4l2_dev, "Failed to init mem2mem device\n");
+		ret = PTR_ERR(jpeg->m2m_dev);
+		goto device_register_rollback;
+	}
+
+	jpeg->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+	if (IS_ERR(jpeg->alloc_ctx)) {
+		v4l2_err(&jpeg->v4l2_dev, "Failed to init memory allocator\n");
+		ret = PTR_ERR(jpeg->alloc_ctx);
+		goto m2m_init_rollback;
+	}
+
+	/* JPEG encoder /dev/videoX node */
+	jpeg->vfd_encoder = video_device_alloc();
+	if (!jpeg->vfd_encoder) {
+		v4l2_err(&jpeg->v4l2_dev, "Failed to allocate video device\n");
+		ret = -ENOMEM;
+		goto vb2_allocator_rollback;
+	}
+	strlcpy(jpeg->vfd_encoder->name, S5P_JPEG_M2M_NAME,
+		sizeof(jpeg->vfd_encoder->name));
+	jpeg->vfd_encoder->fops		= &s5p_jpeg_fops;
+	jpeg->vfd_encoder->ioctl_ops	= &s5p_jpeg_ioctl_ops;
+	jpeg->vfd_encoder->minor	= -1;
+	jpeg->vfd_encoder->release	= video_device_release;
+	jpeg->vfd_encoder->lock		= &jpeg->lock;
+	jpeg->vfd_encoder->v4l2_dev	= &jpeg->v4l2_dev;
+
+	ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_GRABBER, -1);
+	if (ret) {
+		v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
+		goto enc_vdev_alloc_rollback;
+	}
+
+	video_set_drvdata(jpeg->vfd_encoder, jpeg);
+	v4l2_info(&jpeg->v4l2_dev,
+		  "encoder device registered as /dev/video%d\n",
+		  jpeg->vfd_encoder->num);
+
+	/* JPEG decoder /dev/videoX node */
+	jpeg->vfd_decoder = video_device_alloc();
+	if (!jpeg->vfd_decoder) {
+		v4l2_err(&jpeg->v4l2_dev, "Failed to allocate video device\n");
+		ret = -ENOMEM;
+		goto enc_vdev_register_rollback;
+	}
+	strlcpy(jpeg->vfd_decoder->name, S5P_JPEG_M2M_NAME,
+		sizeof(jpeg->vfd_decoder->name));
+	jpeg->vfd_decoder->fops		= &s5p_jpeg_fops;
+	jpeg->vfd_decoder->ioctl_ops	= &s5p_jpeg_ioctl_ops;
+	jpeg->vfd_decoder->minor	= -1;
+	jpeg->vfd_decoder->release	= video_device_release;
+	jpeg->vfd_decoder->lock		= &jpeg->lock;
+	jpeg->vfd_decoder->v4l2_dev	= &jpeg->v4l2_dev;
+
+	ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
+	if (ret) {
+		v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
+		goto dec_vdev_alloc_rollback;
+	}
+
+	video_set_drvdata(jpeg->vfd_decoder, jpeg);
+	v4l2_info(&jpeg->v4l2_dev,
+		  "decoder device registered as /dev/video%d\n",
+		  jpeg->vfd_decoder->num);
+
+	/* final statements & power management */
+	platform_set_drvdata(pdev, jpeg);
+
+	pm_runtime_enable(&pdev->dev);
+
+	v4l2_info(&jpeg->v4l2_dev, "Samsung S5P JPEG codec\n");
+
+	return 0;
+
+dec_vdev_alloc_rollback:
+	video_device_release(jpeg->vfd_decoder);
+
+enc_vdev_register_rollback:
+	video_unregister_device(jpeg->vfd_encoder);
+
+enc_vdev_alloc_rollback:
+	video_device_release(jpeg->vfd_encoder);
+
+vb2_allocator_rollback:
+	vb2_dma_contig_cleanup_ctx(jpeg->alloc_ctx);
+
+m2m_init_rollback:
+	v4l2_m2m_release(jpeg->m2m_dev);
+
+device_register_rollback:
+	v4l2_device_unregister(&jpeg->v4l2_dev);
+
+clk_get_rollback:
+	clk_disable(jpeg->clk);
+	clk_put(jpeg->clk);
+
+	return ret;
+}
+
+static int s5p_jpeg_remove(struct platform_device *pdev)
+{
+	struct s5p_jpeg *jpeg = platform_get_drvdata(pdev);
+
+	pm_runtime_disable(jpeg->dev);
+
+	video_unregister_device(jpeg->vfd_decoder);
+	video_device_release(jpeg->vfd_decoder);
+	video_unregister_device(jpeg->vfd_encoder);
+	video_device_release(jpeg->vfd_encoder);
+	vb2_dma_contig_cleanup_ctx(jpeg->alloc_ctx);
+	v4l2_m2m_release(jpeg->m2m_dev);
+	v4l2_device_unregister(&jpeg->v4l2_dev);
+
+	clk_disable(jpeg->clk);
+	clk_put(jpeg->clk);
+
+	return 0;
+}
+
+static int s5p_jpeg_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int s5p_jpeg_runtime_resume(struct device *dev)
+{
+	struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
+	/*
+	 * JPEG IP allows storing two Huffman tables for each component
+	 * We fill table 0 for each component
+	 */
+	jpeg_set_hdctbl(jpeg->regs);
+	jpeg_set_hdctblg(jpeg->regs);
+	jpeg_set_hactbl(jpeg->regs);
+	jpeg_set_hactblg(jpeg->regs);
+	return 0;
+}
+
+static const struct dev_pm_ops s5p_jpeg_pm_ops = {
+	.runtime_suspend = s5p_jpeg_runtime_suspend,
+	.runtime_resume	 = s5p_jpeg_runtime_resume,
+};
+
+static struct platform_driver s5p_jpeg_driver = {
+	.probe = s5p_jpeg_probe,
+	.remove = s5p_jpeg_remove,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = S5P_JPEG_M2M_NAME,
+		.pm = &s5p_jpeg_pm_ops,
+	},
+};
+
+module_platform_driver(s5p_jpeg_driver);
+
+MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@samsung.com>");
+MODULE_DESCRIPTION("Samsung JPEG codec driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
new file mode 100644
index 0000000..022b9b9
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -0,0 +1,150 @@
+/* linux/drivers/media/platform/s5p-jpeg/jpeg-core.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef JPEG_CORE_H_
+#define JPEG_CORE_H_
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
+
+#define S5P_JPEG_M2M_NAME		"s5p-jpeg"
+
+/* JPEG compression quality setting */
+#define S5P_JPEG_COMPR_QUAL_BEST	0
+#define S5P_JPEG_COMPR_QUAL_WORST	3
+
+/* JPEG RGB to YCbCr conversion matrix coefficients */
+#define S5P_JPEG_COEF11			0x4d
+#define S5P_JPEG_COEF12			0x97
+#define S5P_JPEG_COEF13			0x1e
+#define S5P_JPEG_COEF21			0x2c
+#define S5P_JPEG_COEF22			0x57
+#define S5P_JPEG_COEF23			0x83
+#define S5P_JPEG_COEF31			0x83
+#define S5P_JPEG_COEF32			0x6e
+#define S5P_JPEG_COEF33			0x13
+
+/* a selection of JPEG markers */
+#define TEM				0x01
+#define SOF0				0xc0
+#define RST				0xd0
+#define SOI				0xd8
+#define EOI				0xd9
+#define DHP				0xde
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE			(1 << 0)
+#define MEM2MEM_OUTPUT			(1 << 1)
+
+/**
+ * struct s5p_jpeg - JPEG IP abstraction
+ * @lock:		the mutex protecting this structure
+ * @slock:		spinlock protecting the device contexts
+ * @v4l2_dev:		v4l2 device for mem2mem mode
+ * @vfd_encoder:	video device node for encoder mem2mem mode
+ * @vfd_decoder:	video device node for decoder mem2mem mode
+ * @m2m_dev:		v4l2 mem2mem device data
+ * @regs:		JPEG IP registers mapping
+ * @irq:		JPEG IP irq
+ * @clk:		JPEG IP clock
+ * @dev:		JPEG IP struct device
+ * @alloc_ctx:		videobuf2 memory allocator's context
+ */
+struct s5p_jpeg {
+	struct mutex		lock;
+	struct spinlock		slock;
+
+	struct v4l2_device	v4l2_dev;
+	struct video_device	*vfd_encoder;
+	struct video_device	*vfd_decoder;
+	struct v4l2_m2m_dev	*m2m_dev;
+
+	void __iomem		*regs;
+	unsigned int		irq;
+	struct clk		*clk;
+	struct device		*dev;
+	void			*alloc_ctx;
+};
+
+/**
+ * struct jpeg_fmt - driver's internal color format data
+ * @name:	format descritpion
+ * @fourcc:	the fourcc code, 0 if not applicable
+ * @depth:	number of bits per pixel
+ * @colplanes:	number of color planes (1 for packed formats)
+ * @h_align:	horizontal alignment order (align to 2^h_align)
+ * @v_align:	vertical alignment order (align to 2^v_align)
+ * @types:	types of queue this format is applicable to
+ */
+struct s5p_jpeg_fmt {
+	char	*name;
+	u32	fourcc;
+	int	depth;
+	int	colplanes;
+	int	h_align;
+	int	v_align;
+	u32	types;
+};
+
+/**
+ * s5p_jpeg_q_data - parameters of one queue
+ * @fmt:	driver-specific format of this queue
+ * @w:		image width
+ * @h:		image height
+ * @size:	image buffer size in bytes
+ */
+struct s5p_jpeg_q_data {
+	struct s5p_jpeg_fmt	*fmt;
+	u32			w;
+	u32			h;
+	u32			size;
+};
+
+/**
+ * s5p_jpeg_ctx - the device context data
+ * @jpeg:		JPEG IP device for this context
+ * @mode:		compression (encode) operation or decompression (decode)
+ * @compr_quality:	destination image quality in compression (encode) mode
+ * @m2m_ctx:		mem2mem device context
+ * @out_q:		source (output) queue information
+ * @cap_fmt:		destination (capture) queue queue information
+ * @hdr_parsed:		set if header has been parsed during decompression
+ * @ctrl_handler:	controls handler
+ */
+struct s5p_jpeg_ctx {
+	struct s5p_jpeg		*jpeg;
+	unsigned int		mode;
+	unsigned short		compr_quality;
+	unsigned short		restart_interval;
+	unsigned short		subsampling;
+	struct v4l2_m2m_ctx	*m2m_ctx;
+	struct s5p_jpeg_q_data	out_q;
+	struct s5p_jpeg_q_data	cap_q;
+	struct v4l2_fh		fh;
+	bool			hdr_parsed;
+	struct v4l2_ctrl_handler ctrl_handler;
+};
+
+/**
+ * s5p_jpeg_buffer - description of memory containing input JPEG data
+ * @size:	buffer size
+ * @curr:	current position in the buffer
+ * @data:	pointer to the data
+ */
+struct s5p_jpeg_buffer {
+	unsigned long size;
+	unsigned long curr;
+	unsigned long data;
+};
+
+#endif /* JPEG_CORE_H */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw.h b/drivers/media/platform/s5p-jpeg/jpeg-hw.h
new file mode 100644
index 0000000..b47e887
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw.h
@@ -0,0 +1,357 @@
+/* linux/drivers/media/platform/s5p-jpeg/jpeg-hw.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef JPEG_HW_H_
+#define JPEG_HW_H_
+
+#include <linux/io.h>
+#include <linux/videodev2.h>
+
+#include "jpeg-hw.h"
+#include "jpeg-regs.h"
+
+#define S5P_JPEG_MIN_WIDTH		32
+#define S5P_JPEG_MIN_HEIGHT		32
+#define S5P_JPEG_MAX_WIDTH		8192
+#define S5P_JPEG_MAX_HEIGHT		8192
+#define S5P_JPEG_ENCODE			0
+#define S5P_JPEG_DECODE			1
+#define S5P_JPEG_RAW_IN_565		0
+#define S5P_JPEG_RAW_IN_422		1
+#define S5P_JPEG_RAW_OUT_422		0
+#define S5P_JPEG_RAW_OUT_420		1
+
+static inline void jpeg_reset(void __iomem *regs)
+{
+	unsigned long reg;
+
+	writel(1, regs + S5P_JPG_SW_RESET);
+	reg = readl(regs + S5P_JPG_SW_RESET);
+	/* no other way but polling for when JPEG IP becomes operational */
+	while (reg != 0) {
+		cpu_relax();
+		reg = readl(regs + S5P_JPG_SW_RESET);
+	}
+}
+
+static inline void jpeg_poweron(void __iomem *regs)
+{
+	writel(S5P_POWER_ON, regs + S5P_JPGCLKCON);
+}
+
+static inline void jpeg_input_raw_mode(void __iomem *regs, unsigned long mode)
+{
+	unsigned long reg, m;
+
+	m = S5P_MOD_SEL_565;
+	if (mode == S5P_JPEG_RAW_IN_565)
+		m = S5P_MOD_SEL_565;
+	else if (mode == S5P_JPEG_RAW_IN_422)
+		m = S5P_MOD_SEL_422;
+
+	reg = readl(regs + S5P_JPGCMOD);
+	reg &= ~S5P_MOD_SEL_MASK;
+	reg |= m;
+	writel(reg, regs + S5P_JPGCMOD);
+}
+
+static inline void jpeg_input_raw_y16(void __iomem *regs, bool y16)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPGCMOD);
+	if (y16)
+		reg |= S5P_MODE_Y16;
+	else
+		reg &= ~S5P_MODE_Y16_MASK;
+	writel(reg, regs + S5P_JPGCMOD);
+}
+
+static inline void jpeg_proc_mode(void __iomem *regs, unsigned long mode)
+{
+	unsigned long reg, m;
+
+	m = S5P_PROC_MODE_DECOMPR;
+	if (mode == S5P_JPEG_ENCODE)
+		m = S5P_PROC_MODE_COMPR;
+	else
+		m = S5P_PROC_MODE_DECOMPR;
+	reg = readl(regs + S5P_JPGMOD);
+	reg &= ~S5P_PROC_MODE_MASK;
+	reg |= m;
+	writel(reg, regs + S5P_JPGMOD);
+}
+
+static inline void jpeg_subsampling_mode(void __iomem *regs, unsigned int mode)
+{
+	unsigned long reg, m;
+
+	if (mode == V4L2_JPEG_CHROMA_SUBSAMPLING_420)
+		m = S5P_SUBSAMPLING_MODE_420;
+	else
+		m = S5P_SUBSAMPLING_MODE_422;
+
+	reg = readl(regs + S5P_JPGMOD);
+	reg &= ~S5P_SUBSAMPLING_MODE_MASK;
+	reg |= m;
+	writel(reg, regs + S5P_JPGMOD);
+}
+
+static inline unsigned int jpeg_get_subsampling_mode(void __iomem *regs)
+{
+	return readl(regs + S5P_JPGMOD) & S5P_SUBSAMPLING_MODE_MASK;
+}
+
+static inline void jpeg_dri(void __iomem *regs, unsigned int dri)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPGDRI_U);
+	reg &= ~0xff;
+	reg |= (dri >> 8) & 0xff;
+	writel(reg, regs + S5P_JPGDRI_U);
+
+	reg = readl(regs + S5P_JPGDRI_L);
+	reg &= ~0xff;
+	reg |= dri & 0xff;
+	writel(reg, regs + S5P_JPGDRI_L);
+}
+
+static inline void jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPG_QTBL);
+	reg &= ~S5P_QT_NUMt_MASK(t);
+	reg |= (n << S5P_QT_NUMt_SHIFT(t)) & S5P_QT_NUMt_MASK(t);
+	writel(reg, regs + S5P_JPG_QTBL);
+}
+
+static inline void jpeg_htbl_ac(void __iomem *regs, unsigned int t)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPG_HTBL);
+	reg &= ~S5P_HT_NUMt_AC_MASK(t);
+	/* this driver uses table 0 for all color components */
+	reg |= (0 << S5P_HT_NUMt_AC_SHIFT(t)) & S5P_HT_NUMt_AC_MASK(t);
+	writel(reg, regs + S5P_JPG_HTBL);
+}
+
+static inline void jpeg_htbl_dc(void __iomem *regs, unsigned int t)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPG_HTBL);
+	reg &= ~S5P_HT_NUMt_DC_MASK(t);
+	/* this driver uses table 0 for all color components */
+	reg |= (0 << S5P_HT_NUMt_DC_SHIFT(t)) & S5P_HT_NUMt_DC_MASK(t);
+	writel(reg, regs + S5P_JPG_HTBL);
+}
+
+static inline void jpeg_y(void __iomem *regs, unsigned int y)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPGY_U);
+	reg &= ~0xff;
+	reg |= (y >> 8) & 0xff;
+	writel(reg, regs + S5P_JPGY_U);
+
+	reg = readl(regs + S5P_JPGY_L);
+	reg &= ~0xff;
+	reg |= y & 0xff;
+	writel(reg, regs + S5P_JPGY_L);
+}
+
+static inline void jpeg_x(void __iomem *regs, unsigned int x)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPGX_U);
+	reg &= ~0xff;
+	reg |= (x >> 8) & 0xff;
+	writel(reg, regs + S5P_JPGX_U);
+
+	reg = readl(regs + S5P_JPGX_L);
+	reg &= ~0xff;
+	reg |= x & 0xff;
+	writel(reg, regs + S5P_JPGX_L);
+}
+
+static inline void jpeg_rst_int_enable(void __iomem *regs, bool enable)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPGINTSE);
+	reg &= ~S5P_RSTm_INT_EN_MASK;
+	if (enable)
+		reg |= S5P_RSTm_INT_EN;
+	writel(reg, regs + S5P_JPGINTSE);
+}
+
+static inline void jpeg_data_num_int_enable(void __iomem *regs, bool enable)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPGINTSE);
+	reg &= ~S5P_DATA_NUM_INT_EN_MASK;
+	if (enable)
+		reg |= S5P_DATA_NUM_INT_EN;
+	writel(reg, regs + S5P_JPGINTSE);
+}
+
+static inline void jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPGINTSE);
+	reg &= ~S5P_FINAL_MCU_NUM_INT_EN_MASK;
+	if (enbl)
+		reg |= S5P_FINAL_MCU_NUM_INT_EN;
+	writel(reg, regs + S5P_JPGINTSE);
+}
+
+static inline void jpeg_timer_enable(void __iomem *regs, unsigned long val)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPG_TIMER_SE);
+	reg |= S5P_TIMER_INT_EN;
+	reg &= ~S5P_TIMER_INIT_MASK;
+	reg |= val & S5P_TIMER_INIT_MASK;
+	writel(reg, regs + S5P_JPG_TIMER_SE);
+}
+
+static inline void jpeg_timer_disable(void __iomem *regs)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPG_TIMER_SE);
+	reg &= ~S5P_TIMER_INT_EN_MASK;
+	writel(reg, regs + S5P_JPG_TIMER_SE);
+}
+
+static inline int jpeg_timer_stat(void __iomem *regs)
+{
+	return (int)((readl(regs + S5P_JPG_TIMER_ST) & S5P_TIMER_INT_STAT_MASK)
+		     >> S5P_TIMER_INT_STAT_SHIFT);
+}
+
+static inline void jpeg_clear_timer_stat(void __iomem *regs)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPG_TIMER_SE);
+	reg &= ~S5P_TIMER_INT_STAT_MASK;
+	writel(reg, regs + S5P_JPG_TIMER_SE);
+}
+
+static inline void jpeg_enc_stream_int(void __iomem *regs, unsigned long size)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPG_ENC_STREAM_INTSE);
+	reg &= ~S5P_ENC_STREAM_BOUND_MASK;
+	reg |= S5P_ENC_STREAM_INT_EN;
+	reg |= size & S5P_ENC_STREAM_BOUND_MASK;
+	writel(reg, regs + S5P_JPG_ENC_STREAM_INTSE);
+}
+
+static inline int jpeg_enc_stream_stat(void __iomem *regs)
+{
+	return (int)(readl(regs + S5P_JPG_ENC_STREAM_INTST) &
+		     S5P_ENC_STREAM_INT_STAT_MASK);
+}
+
+static inline void jpeg_clear_enc_stream_stat(void __iomem *regs)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPG_ENC_STREAM_INTSE);
+	reg &= ~S5P_ENC_STREAM_INT_MASK;
+	writel(reg, regs + S5P_JPG_ENC_STREAM_INTSE);
+}
+
+static inline void jpeg_outform_raw(void __iomem *regs, unsigned long format)
+{
+	unsigned long reg, f;
+
+	f = S5P_DEC_OUT_FORMAT_422;
+	if (format == S5P_JPEG_RAW_OUT_422)
+		f = S5P_DEC_OUT_FORMAT_422;
+	else if (format == S5P_JPEG_RAW_OUT_420)
+		f = S5P_DEC_OUT_FORMAT_420;
+	reg = readl(regs + S5P_JPG_OUTFORM);
+	reg &= ~S5P_DEC_OUT_FORMAT_MASK;
+	reg |= f;
+	writel(reg, regs + S5P_JPG_OUTFORM);
+}
+
+static inline void jpeg_jpgadr(void __iomem *regs, unsigned long addr)
+{
+	writel(addr, regs + S5P_JPG_JPGADR);
+}
+
+static inline void jpeg_imgadr(void __iomem *regs, unsigned long addr)
+{
+	writel(addr, regs + S5P_JPG_IMGADR);
+}
+
+static inline void jpeg_coef(void __iomem *regs, unsigned int i,
+			     unsigned int j, unsigned int coef)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPG_COEF(i));
+	reg &= ~S5P_COEFn_MASK(j);
+	reg |= (coef << S5P_COEFn_SHIFT(j)) & S5P_COEFn_MASK(j);
+	writel(reg, regs + S5P_JPG_COEF(i));
+}
+
+static inline void jpeg_start(void __iomem *regs)
+{
+	writel(1, regs + S5P_JSTART);
+}
+
+static inline int jpeg_result_stat_ok(void __iomem *regs)
+{
+	return (int)((readl(regs + S5P_JPGINTST) & S5P_RESULT_STAT_MASK)
+		     >> S5P_RESULT_STAT_SHIFT);
+}
+
+static inline int jpeg_stream_stat_ok(void __iomem *regs)
+{
+	return !(int)((readl(regs + S5P_JPGINTST) & S5P_STREAM_STAT_MASK)
+		      >> S5P_STREAM_STAT_SHIFT);
+}
+
+static inline void jpeg_clear_int(void __iomem *regs)
+{
+	unsigned long reg;
+
+	reg = readl(regs + S5P_JPGINTST);
+	writel(S5P_INT_RELEASE, regs + S5P_JPGCOM);
+	reg = readl(regs + S5P_JPGOPR);
+}
+
+static inline unsigned int jpeg_compressed_size(void __iomem *regs)
+{
+	unsigned long jpeg_size = 0;
+
+	jpeg_size |= (readl(regs + S5P_JPGCNT_U) & 0xff) << 16;
+	jpeg_size |= (readl(regs + S5P_JPGCNT_M) & 0xff) << 8;
+	jpeg_size |= (readl(regs + S5P_JPGCNT_L) & 0xff);
+
+	return (unsigned int)jpeg_size;
+}
+
+#endif /* JPEG_HW_H_ */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-regs.h b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
new file mode 100644
index 0000000..38e5081
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
@@ -0,0 +1,170 @@
+/* linux/drivers/media/platform/s5p-jpeg/jpeg-regs.h
+ *
+ * Register definition file for Samsung JPEG codec driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef JPEG_REGS_H_
+#define JPEG_REGS_H_
+
+/* JPEG mode register */
+#define S5P_JPGMOD			0x00
+#define S5P_PROC_MODE_MASK		(0x1 << 3)
+#define S5P_PROC_MODE_DECOMPR		(0x1 << 3)
+#define S5P_PROC_MODE_COMPR		(0x0 << 3)
+#define S5P_SUBSAMPLING_MODE_MASK	0x7
+#define S5P_SUBSAMPLING_MODE_444	(0x0 << 0)
+#define S5P_SUBSAMPLING_MODE_422	(0x1 << 0)
+#define S5P_SUBSAMPLING_MODE_420	(0x2 << 0)
+#define S5P_SUBSAMPLING_MODE_GRAY	(0x3 << 0)
+
+/* JPEG operation status register */
+#define S5P_JPGOPR			0x04
+
+/* Quantization tables*/
+#define S5P_JPG_QTBL			0x08
+#define S5P_QT_NUMt_SHIFT(t)		(((t) - 1) << 1)
+#define S5P_QT_NUMt_MASK(t)		(0x3 << S5P_QT_NUMt_SHIFT(t))
+
+/* Huffman tables */
+#define S5P_JPG_HTBL			0x0c
+#define S5P_HT_NUMt_AC_SHIFT(t)		(((t) << 1) - 1)
+#define S5P_HT_NUMt_AC_MASK(t)		(0x1 << S5P_HT_NUMt_AC_SHIFT(t))
+
+#define S5P_HT_NUMt_DC_SHIFT(t)		(((t) - 1) << 1)
+#define S5P_HT_NUMt_DC_MASK(t)		(0x1 << S5P_HT_NUMt_DC_SHIFT(t))
+
+/* JPEG restart interval register upper byte */
+#define S5P_JPGDRI_U			0x10
+
+/* JPEG restart interval register lower byte */
+#define S5P_JPGDRI_L			0x14
+
+/* JPEG vertical resolution register upper byte */
+#define S5P_JPGY_U			0x18
+
+/* JPEG vertical resolution register lower byte */
+#define S5P_JPGY_L			0x1c
+
+/* JPEG horizontal resolution register upper byte */
+#define S5P_JPGX_U			0x20
+
+/* JPEG horizontal resolution register lower byte */
+#define S5P_JPGX_L			0x24
+
+/* JPEG byte count register upper byte */
+#define S5P_JPGCNT_U			0x28
+
+/* JPEG byte count register middle byte */
+#define S5P_JPGCNT_M			0x2c
+
+/* JPEG byte count register lower byte */
+#define S5P_JPGCNT_L			0x30
+
+/* JPEG interrupt setting register */
+#define S5P_JPGINTSE			0x34
+#define S5P_RSTm_INT_EN_MASK		(0x1 << 7)
+#define S5P_RSTm_INT_EN			(0x1 << 7)
+#define S5P_DATA_NUM_INT_EN_MASK	(0x1 << 6)
+#define S5P_DATA_NUM_INT_EN		(0x1 << 6)
+#define S5P_FINAL_MCU_NUM_INT_EN_MASK	(0x1 << 5)
+#define S5P_FINAL_MCU_NUM_INT_EN	(0x1 << 5)
+
+/* JPEG interrupt status register */
+#define S5P_JPGINTST			0x38
+#define S5P_RESULT_STAT_SHIFT		6
+#define S5P_RESULT_STAT_MASK		(0x1 << S5P_RESULT_STAT_SHIFT)
+#define S5P_STREAM_STAT_SHIFT		5
+#define S5P_STREAM_STAT_MASK		(0x1 << S5P_STREAM_STAT_SHIFT)
+
+/* JPEG command register */
+#define S5P_JPGCOM			0x4c
+#define S5P_INT_RELEASE			(0x1 << 2)
+
+/* Raw image data r/w address register */
+#define S5P_JPG_IMGADR			0x50
+
+/* JPEG file r/w address register */
+#define S5P_JPG_JPGADR			0x58
+
+/* Coefficient for RGB-to-YCbCr converter register */
+#define S5P_JPG_COEF(n)			(0x5c + (((n) - 1) << 2))
+#define S5P_COEFn_SHIFT(j)		((3 - (j)) << 3)
+#define S5P_COEFn_MASK(j)		(0xff << S5P_COEFn_SHIFT(j))
+
+/* JPEG color mode register */
+#define S5P_JPGCMOD			0x68
+#define S5P_MOD_SEL_MASK		(0x7 << 5)
+#define S5P_MOD_SEL_422			(0x1 << 5)
+#define S5P_MOD_SEL_565			(0x2 << 5)
+#define S5P_MODE_Y16_MASK		(0x1 << 1)
+#define S5P_MODE_Y16			(0x1 << 1)
+
+/* JPEG clock control register */
+#define S5P_JPGCLKCON			0x6c
+#define S5P_CLK_DOWN_READY		(0x1 << 1)
+#define S5P_POWER_ON			(0x1 << 0)
+
+/* JPEG start register */
+#define S5P_JSTART			0x70
+
+/* JPEG SW reset register */
+#define S5P_JPG_SW_RESET		0x78
+
+/* JPEG timer setting register */
+#define S5P_JPG_TIMER_SE		0x7c
+#define S5P_TIMER_INT_EN_MASK		(0x1 << 31)
+#define S5P_TIMER_INT_EN		(0x1 << 31)
+#define S5P_TIMER_INIT_MASK		0x7fffffff
+
+/* JPEG timer status register */
+#define S5P_JPG_TIMER_ST		0x80
+#define S5P_TIMER_INT_STAT_SHIFT	31
+#define S5P_TIMER_INT_STAT_MASK		(0x1 << S5P_TIMER_INT_STAT_SHIFT)
+#define S5P_TIMER_CNT_SHIFT		0
+#define S5P_TIMER_CNT_MASK		0x7fffffff
+
+/* JPEG decompression output format register */
+#define S5P_JPG_OUTFORM			0x88
+#define S5P_DEC_OUT_FORMAT_MASK		(0x1 << 0)
+#define S5P_DEC_OUT_FORMAT_422		(0x0 << 0)
+#define S5P_DEC_OUT_FORMAT_420		(0x1 << 0)
+
+/* JPEG version register */
+#define S5P_JPG_VERSION			0x8c
+
+/* JPEG compressed stream size interrupt setting register */
+#define S5P_JPG_ENC_STREAM_INTSE	0x98
+#define S5P_ENC_STREAM_INT_MASK		(0x1 << 24)
+#define S5P_ENC_STREAM_INT_EN		(0x1 << 24)
+#define S5P_ENC_STREAM_BOUND_MASK	0xffffff
+
+/* JPEG compressed stream size interrupt status register */
+#define S5P_JPG_ENC_STREAM_INTST	0x9c
+#define S5P_ENC_STREAM_INT_STAT_MASK	0x1
+
+/* JPEG quantizer table register */
+#define S5P_JPG_QTBL_CONTENT(n)		(0x400 + (n) * 0x100)
+
+/* JPEG DC Huffman table register */
+#define S5P_JPG_HDCTBL(n)		(0x800 + (n) * 0x400)
+
+/* JPEG DC Huffman table register */
+#define S5P_JPG_HDCTBLG(n)		(0x840 + (n) * 0x400)
+
+/* JPEG AC Huffman table register */
+#define S5P_JPG_HACTBL(n)		(0x880 + (n) * 0x400)
+
+/* JPEG AC Huffman table register */
+#define S5P_JPG_HACTBLG(n)		(0x8c0 + (n) * 0x400)
+
+#endif /* JPEG_REGS_H_ */
+
diff --git a/drivers/media/platform/s5p-mfc/Makefile b/drivers/media/platform/s5p-mfc/Makefile
new file mode 100644
index 0000000..d066340
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o
+s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o s5p_mfc_opr.o
+s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o
+s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_cmd.o
+s5p-mfc-y += s5p_mfc_pm.o s5p_mfc_shm.o
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc.h b/drivers/media/platform/s5p-mfc/regs-mfc.h
new file mode 100644
index 0000000..a19bece
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/regs-mfc.h
@@ -0,0 +1,418 @@
+/*
+ * Register definition file for Samsung MFC V5.1 Interface (FIMV) driver
+ *
+ * Kamil Debski, Copyright (c) 2010 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef _REGS_FIMV_H
+#define _REGS_FIMV_H
+
+#define S5P_FIMV_REG_SIZE	(S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR)
+#define S5P_FIMV_REG_COUNT	((S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR) / 4)
+
+/* Number of bits that the buffer address should be shifted for particular
+ * MFC buffers.  */
+#define S5P_FIMV_START_ADDR		0x0000
+#define S5P_FIMV_END_ADDR		0xe008
+
+#define S5P_FIMV_SW_RESET		0x0000
+#define S5P_FIMV_RISC_HOST_INT		0x0008
+
+/* Command from HOST to RISC */
+#define S5P_FIMV_HOST2RISC_CMD		0x0030
+#define S5P_FIMV_HOST2RISC_ARG1		0x0034
+#define S5P_FIMV_HOST2RISC_ARG2		0x0038
+#define S5P_FIMV_HOST2RISC_ARG3		0x003c
+#define S5P_FIMV_HOST2RISC_ARG4		0x0040
+
+/* Command from RISC to HOST */
+#define S5P_FIMV_RISC2HOST_CMD		0x0044
+#define S5P_FIMV_RISC2HOST_CMD_MASK	0x1FFFF
+#define S5P_FIMV_RISC2HOST_ARG1		0x0048
+#define S5P_FIMV_RISC2HOST_ARG2		0x004c
+#define S5P_FIMV_RISC2HOST_ARG3		0x0050
+#define S5P_FIMV_RISC2HOST_ARG4		0x0054
+
+#define S5P_FIMV_FW_VERSION		0x0058
+#define S5P_FIMV_SYS_MEM_SZ		0x005c
+#define S5P_FIMV_FW_STATUS		0x0080
+
+/* Memory controller register */
+#define S5P_FIMV_MC_DRAMBASE_ADR_A	0x0508
+#define S5P_FIMV_MC_DRAMBASE_ADR_B	0x050c
+#define S5P_FIMV_MC_STATUS		0x0510
+
+/* Common register */
+#define S5P_FIMV_COMMON_BASE_A		0x0600
+#define S5P_FIMV_COMMON_BASE_B		0x0700
+
+/* Decoder */
+#define S5P_FIMV_DEC_CHROMA_ADR		(S5P_FIMV_COMMON_BASE_A)
+#define S5P_FIMV_DEC_LUMA_ADR		(S5P_FIMV_COMMON_BASE_B)
+
+/* H.264 decoding */
+#define S5P_FIMV_H264_VERT_NB_MV_ADR	(S5P_FIMV_COMMON_BASE_A + 0x8c)
+					/* vertical neighbor motion vector */
+#define S5P_FIMV_H264_NB_IP_ADR		(S5P_FIMV_COMMON_BASE_A + 0x90)
+					/* neighbor pixels for intra pred */
+#define S5P_FIMV_H264_MV_ADR		(S5P_FIMV_COMMON_BASE_B + 0x80)
+					/* H264 motion vector */
+
+/* MPEG4 decoding */
+#define S5P_FIMV_MPEG4_NB_DCAC_ADR	(S5P_FIMV_COMMON_BASE_A + 0x8c)
+					/* neighbor AC/DC coeff. */
+#define S5P_FIMV_MPEG4_UP_NB_MV_ADR	(S5P_FIMV_COMMON_BASE_A + 0x90)
+					/* upper neighbor motion vector */
+#define S5P_FIMV_MPEG4_SA_MV_ADR	(S5P_FIMV_COMMON_BASE_A + 0x94)
+					/* subseq. anchor motion vector */
+#define S5P_FIMV_MPEG4_OT_LINE_ADR	(S5P_FIMV_COMMON_BASE_A + 0x98)
+					/* overlap transform line */
+#define S5P_FIMV_MPEG4_SP_ADR		(S5P_FIMV_COMMON_BASE_A + 0xa8)
+					/* syntax parser */
+
+/* H.263 decoding */
+#define S5P_FIMV_H263_NB_DCAC_ADR	(S5P_FIMV_COMMON_BASE_A + 0x8c)
+#define S5P_FIMV_H263_UP_NB_MV_ADR	(S5P_FIMV_COMMON_BASE_A + 0x90)
+#define S5P_FIMV_H263_SA_MV_ADR		(S5P_FIMV_COMMON_BASE_A + 0x94)
+#define S5P_FIMV_H263_OT_LINE_ADR	(S5P_FIMV_COMMON_BASE_A + 0x98)
+
+/* VC-1 decoding */
+#define S5P_FIMV_VC1_NB_DCAC_ADR	(S5P_FIMV_COMMON_BASE_A + 0x8c)
+#define S5P_FIMV_VC1_UP_NB_MV_ADR	(S5P_FIMV_COMMON_BASE_A + 0x90)
+#define S5P_FIMV_VC1_SA_MV_ADR		(S5P_FIMV_COMMON_BASE_A + 0x94)
+#define S5P_FIMV_VC1_OT_LINE_ADR	(S5P_FIMV_COMMON_BASE_A + 0x98)
+#define S5P_FIMV_VC1_BITPLANE3_ADR	(S5P_FIMV_COMMON_BASE_A + 0x9c)
+					/* bitplane3 */
+#define S5P_FIMV_VC1_BITPLANE2_ADR	(S5P_FIMV_COMMON_BASE_A + 0xa0)
+					/* bitplane2 */
+#define S5P_FIMV_VC1_BITPLANE1_ADR	(S5P_FIMV_COMMON_BASE_A + 0xa4)
+					/* bitplane1 */
+
+/* Encoder */
+#define S5P_FIMV_ENC_REF0_LUMA_ADR	(S5P_FIMV_COMMON_BASE_A + 0x1c)
+#define S5P_FIMV_ENC_REF1_LUMA_ADR	(S5P_FIMV_COMMON_BASE_A + 0x20)
+					/* reconstructed luma */
+#define S5P_FIMV_ENC_REF0_CHROMA_ADR	(S5P_FIMV_COMMON_BASE_B)
+#define S5P_FIMV_ENC_REF1_CHROMA_ADR	(S5P_FIMV_COMMON_BASE_B + 0x04)
+					/* reconstructed chroma */
+#define S5P_FIMV_ENC_REF2_LUMA_ADR	(S5P_FIMV_COMMON_BASE_B + 0x10)
+#define S5P_FIMV_ENC_REF2_CHROMA_ADR	(S5P_FIMV_COMMON_BASE_B + 0x08)
+#define S5P_FIMV_ENC_REF3_LUMA_ADR	(S5P_FIMV_COMMON_BASE_B + 0x14)
+#define S5P_FIMV_ENC_REF3_CHROMA_ADR	(S5P_FIMV_COMMON_BASE_B + 0x0c)
+
+/* H.264 encoding */
+#define S5P_FIMV_H264_UP_MV_ADR		(S5P_FIMV_COMMON_BASE_A)
+					/* upper motion vector */
+#define S5P_FIMV_H264_NBOR_INFO_ADR	(S5P_FIMV_COMMON_BASE_A + 0x04)
+					/* entropy engine's neighbor info. */
+#define S5P_FIMV_H264_UP_INTRA_MD_ADR	(S5P_FIMV_COMMON_BASE_A + 0x08)
+					/* upper intra MD */
+#define S5P_FIMV_H264_COZERO_FLAG_ADR	(S5P_FIMV_COMMON_BASE_A + 0x10)
+					/* direct cozero flag */
+#define S5P_FIMV_H264_UP_INTRA_PRED_ADR	(S5P_FIMV_COMMON_BASE_B + 0x40)
+					/* upper intra PRED */
+
+/* H.263 encoding */
+#define S5P_FIMV_H263_UP_MV_ADR		(S5P_FIMV_COMMON_BASE_A)
+					/* upper motion vector */
+#define S5P_FIMV_H263_ACDC_COEF_ADR	(S5P_FIMV_COMMON_BASE_A + 0x04)
+					/* upper Q coeff. */
+
+/* MPEG4 encoding */
+#define S5P_FIMV_MPEG4_UP_MV_ADR	(S5P_FIMV_COMMON_BASE_A)
+					/* upper motion vector */
+#define S5P_FIMV_MPEG4_ACDC_COEF_ADR	(S5P_FIMV_COMMON_BASE_A + 0x04)
+					/* upper Q coeff. */
+#define S5P_FIMV_MPEG4_COZERO_FLAG_ADR	(S5P_FIMV_COMMON_BASE_A + 0x10)
+					/* direct cozero flag */
+
+#define S5P_FIMV_ENC_REF_B_LUMA_ADR     0x062c /* ref B Luma addr */
+#define S5P_FIMV_ENC_REF_B_CHROMA_ADR   0x0630 /* ref B Chroma addr */
+
+#define S5P_FIMV_ENC_CUR_LUMA_ADR	0x0718 /* current Luma addr */
+#define S5P_FIMV_ENC_CUR_CHROMA_ADR	0x071C /* current Chroma addr */
+
+/* Codec common register */
+#define S5P_FIMV_ENC_HSIZE_PX		0x0818 /* frame width at encoder */
+#define S5P_FIMV_ENC_VSIZE_PX		0x081c /* frame height at encoder */
+#define S5P_FIMV_ENC_PROFILE		0x0830 /* profile register */
+#define S5P_FIMV_ENC_PROFILE_H264_MAIN			0
+#define S5P_FIMV_ENC_PROFILE_H264_HIGH			1
+#define S5P_FIMV_ENC_PROFILE_H264_BASELINE		2
+#define S5P_FIMV_ENC_PROFILE_MPEG4_SIMPLE		0
+#define S5P_FIMV_ENC_PROFILE_MPEG4_ADVANCED_SIMPLE	1
+#define S5P_FIMV_ENC_PIC_STRUCT		0x083c /* picture field/frame flag */
+#define S5P_FIMV_ENC_LF_CTRL		0x0848 /* loop filter control */
+#define S5P_FIMV_ENC_ALPHA_OFF		0x084c /* loop filter alpha offset */
+#define S5P_FIMV_ENC_BETA_OFF		0x0850 /* loop filter beta offset */
+#define S5P_FIMV_MR_BUSIF_CTRL		0x0854 /* hidden, bus interface ctrl */
+#define S5P_FIMV_ENC_PXL_CACHE_CTRL	0x0a00 /* pixel cache control */
+
+/* Channel & stream interface register */
+#define S5P_FIMV_SI_RTN_CHID		0x2000 /* Return CH inst ID register */
+#define S5P_FIMV_SI_CH0_INST_ID		0x2040 /* codec instance ID */
+#define S5P_FIMV_SI_CH1_INST_ID		0x2080 /* codec instance ID */
+/* Decoder */
+#define S5P_FIMV_SI_VRESOL		0x2004 /* vertical res of decoder */
+#define S5P_FIMV_SI_HRESOL		0x2008 /* horizontal res of decoder */
+#define S5P_FIMV_SI_BUF_NUMBER		0x200c /* number of frames in the
+								decoded pic */
+#define S5P_FIMV_SI_DISPLAY_Y_ADR	0x2010 /* luma addr of displayed pic */
+#define S5P_FIMV_SI_DISPLAY_C_ADR	0x2014 /* chroma addrof displayed pic */
+
+#define S5P_FIMV_SI_CONSUMED_BYTES	0x2018 /* Consumed number of bytes to
+							decode a frame */
+#define S5P_FIMV_SI_DISPLAY_STATUS	0x201c /* status of decoded picture */
+
+#define S5P_FIMV_SI_DECODE_Y_ADR	0x2024 /* luma addr of decoded pic */
+#define S5P_FIMV_SI_DECODE_C_ADR	0x2028 /* chroma addrof decoded pic */
+#define S5P_FIMV_SI_DECODE_STATUS	0x202c /* status of decoded picture */
+
+#define S5P_FIMV_SI_CH0_SB_ST_ADR	0x2044 /* start addr of stream buf */
+#define S5P_FIMV_SI_CH0_SB_FRM_SIZE	0x2048 /* size of stream buf */
+#define S5P_FIMV_SI_CH0_DESC_ADR	0x204c /* addr of descriptor buf */
+#define S5P_FIMV_SI_CH0_CPB_SIZE	0x2058 /* max size of coded pic. buf */
+#define S5P_FIMV_SI_CH0_DESC_SIZE	0x205c /* max size of descriptor buf */
+
+#define S5P_FIMV_SI_CH1_SB_ST_ADR	0x2084 /* start addr of stream buf */
+#define S5P_FIMV_SI_CH1_SB_FRM_SIZE	0x2088 /* size of stream buf */
+#define S5P_FIMV_SI_CH1_DESC_ADR	0x208c /* addr of descriptor buf */
+#define S5P_FIMV_SI_CH1_CPB_SIZE	0x2098 /* max size of coded pic. buf */
+#define S5P_FIMV_SI_CH1_DESC_SIZE	0x209c /* max size of descriptor buf */
+
+#define S5P_FIMV_CRC_LUMA0		0x2030 /* luma crc data per frame
+								(top field) */
+#define S5P_FIMV_CRC_CHROMA0		0x2034 /* chroma crc data per frame
+								(top field) */
+#define S5P_FIMV_CRC_LUMA1		0x2038 /* luma crc data per bottom
+								field */
+#define S5P_FIMV_CRC_CHROMA1		0x203c /* chroma crc data per bottom
+								field */
+
+/* Display status */
+#define S5P_FIMV_DEC_STATUS_DECODING_ONLY		0
+#define S5P_FIMV_DEC_STATUS_DECODING_DISPLAY		1
+#define S5P_FIMV_DEC_STATUS_DISPLAY_ONLY		2
+#define S5P_FIMV_DEC_STATUS_DECODING_EMPTY		3
+#define S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK	7
+#define S5P_FIMV_DEC_STATUS_PROGRESSIVE			(0<<3)
+#define S5P_FIMV_DEC_STATUS_INTERLACE			(1<<3)
+#define S5P_FIMV_DEC_STATUS_INTERLACE_MASK		(1<<3)
+#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_TWO		(0<<4)
+#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_FOUR		(1<<4)
+#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_MASK		(1<<4)
+#define S5P_FIMV_DEC_STATUS_CRC_GENERATED		(1<<5)
+#define S5P_FIMV_DEC_STATUS_CRC_NOT_GENERATED		(0<<5)
+#define S5P_FIMV_DEC_STATUS_CRC_MASK			(1<<5)
+
+#define S5P_FIMV_DEC_STATUS_RESOLUTION_MASK		(3<<4)
+#define S5P_FIMV_DEC_STATUS_RESOLUTION_INC		(1<<4)
+#define S5P_FIMV_DEC_STATUS_RESOLUTION_DEC		(2<<4)
+
+/* Decode frame address */
+#define S5P_FIMV_DECODE_Y_ADR			0x2024
+#define S5P_FIMV_DECODE_C_ADR			0x2028
+
+/* Decoded frame tpe */
+#define S5P_FIMV_DECODE_FRAME_TYPE		0x2020
+#define S5P_FIMV_DECODE_FRAME_MASK		7
+
+#define S5P_FIMV_DECODE_FRAME_SKIPPED		0
+#define S5P_FIMV_DECODE_FRAME_I_FRAME		1
+#define S5P_FIMV_DECODE_FRAME_P_FRAME		2
+#define S5P_FIMV_DECODE_FRAME_B_FRAME		3
+#define S5P_FIMV_DECODE_FRAME_OTHER_FRAME	4
+
+/* Sizes of buffers required for decoding */
+#define S5P_FIMV_DEC_NB_IP_SIZE			(32 * 1024)
+#define S5P_FIMV_DEC_VERT_NB_MV_SIZE		(16 * 1024)
+#define S5P_FIMV_DEC_NB_DCAC_SIZE		(16 * 1024)
+#define S5P_FIMV_DEC_UPNB_MV_SIZE		(68 * 1024)
+#define S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE		(136 * 1024)
+#define S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE     (32 * 1024)
+#define S5P_FIMV_DEC_VC1_BITPLANE_SIZE		(2 * 1024)
+#define S5P_FIMV_DEC_STX_PARSER_SIZE		(68 * 1024)
+
+#define S5P_FIMV_DEC_BUF_ALIGN			(8 * 1024)
+#define S5P_FIMV_ENC_BUF_ALIGN			(8 * 1024)
+#define S5P_FIMV_NV12M_HALIGN			16
+#define S5P_FIMV_NV12M_LVALIGN			16
+#define S5P_FIMV_NV12M_CVALIGN			8
+#define S5P_FIMV_NV12MT_HALIGN			128
+#define S5P_FIMV_NV12MT_VALIGN			32
+#define S5P_FIMV_NV12M_SALIGN			2048
+#define S5P_FIMV_NV12MT_SALIGN			8192
+
+/* Sizes of buffers required for encoding */
+#define S5P_FIMV_ENC_UPMV_SIZE		0x10000
+#define S5P_FIMV_ENC_COLFLG_SIZE	0x10000
+#define S5P_FIMV_ENC_INTRAMD_SIZE	0x10000
+#define S5P_FIMV_ENC_INTRAPRED_SIZE	0x4000
+#define S5P_FIMV_ENC_NBORINFO_SIZE	0x10000
+#define S5P_FIMV_ENC_ACDCCOEF_SIZE	0x10000
+
+/* Encoder */
+#define S5P_FIMV_ENC_SI_STRM_SIZE	0x2004 /* stream size */
+#define S5P_FIMV_ENC_SI_PIC_CNT		0x2008 /* picture count */
+#define S5P_FIMV_ENC_SI_WRITE_PTR	0x200c /* write pointer */
+#define S5P_FIMV_ENC_SI_SLICE_TYPE	0x2010 /* slice type(I/P/B/IDR) */
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_NON_CODED	0
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_I		1
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_P		2
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_B		3
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_SKIPPED	4
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_OTHERS	5
+#define S5P_FIMV_ENCODED_Y_ADDR         0x2014 /* the addr of the encoded
+								luma pic */
+#define S5P_FIMV_ENCODED_C_ADDR         0x2018 /* the addr of the encoded
+								chroma pic */
+
+#define S5P_FIMV_ENC_SI_CH0_SB_ADR	0x2044 /* addr of stream buf */
+#define S5P_FIMV_ENC_SI_CH0_SB_SIZE	0x204c /* size of stream buf */
+#define S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR	0x2050 /* current Luma addr */
+#define S5P_FIMV_ENC_SI_CH0_CUR_C_ADR	0x2054 /* current Chroma addr */
+#define S5P_FIMV_ENC_SI_CH0_FRAME_INS	0x2058 /* frame insertion */
+
+#define S5P_FIMV_ENC_SI_CH1_SB_ADR	0x2084 /* addr of stream buf */
+#define S5P_FIMV_ENC_SI_CH1_SB_SIZE	0x208c /* size of stream buf */
+#define S5P_FIMV_ENC_SI_CH1_CUR_Y_ADR	0x2090 /* current Luma addr */
+#define S5P_FIMV_ENC_SI_CH1_CUR_C_ADR	0x2094 /* current Chroma addr */
+#define S5P_FIMV_ENC_SI_CH1_FRAME_INS	0x2098 /* frame insertion */
+
+#define S5P_FIMV_ENC_PIC_TYPE_CTRL	0xc504 /* pic type level control */
+#define S5P_FIMV_ENC_B_RECON_WRITE_ON	0xc508 /* B frame recon write ctrl */
+#define S5P_FIMV_ENC_MSLICE_CTRL	0xc50c /* multi slice control */
+#define S5P_FIMV_ENC_MSLICE_MB		0xc510 /* MB number in the one slice */
+#define S5P_FIMV_ENC_MSLICE_BIT		0xc514 /* bit count for one slice */
+#define S5P_FIMV_ENC_CIR_CTRL		0xc518 /* number of intra refresh MB */
+#define S5P_FIMV_ENC_MAP_FOR_CUR	0xc51c /* linear or tiled mode */
+#define S5P_FIMV_ENC_PADDING_CTRL	0xc520 /* padding control */
+
+#define S5P_FIMV_ENC_RC_CONFIG		0xc5a0 /* RC config */
+#define S5P_FIMV_ENC_RC_BIT_RATE	0xc5a8 /* bit rate */
+#define S5P_FIMV_ENC_RC_QBOUND		0xc5ac /* max/min QP */
+#define S5P_FIMV_ENC_RC_RPARA		0xc5b0 /* rate control reaction coeff */
+#define S5P_FIMV_ENC_RC_MB_CTRL		0xc5b4 /* MB adaptive scaling */
+
+/* Encoder for H264 only */
+#define S5P_FIMV_ENC_H264_ENTROPY_MODE	0xd004 /* CAVLC or CABAC */
+#define S5P_FIMV_ENC_H264_ALPHA_OFF	0xd008 /* loop filter alpha offset */
+#define S5P_FIMV_ENC_H264_BETA_OFF	0xd00c /* loop filter beta offset */
+#define S5P_FIMV_ENC_H264_NUM_OF_REF	0xd010 /* number of reference for P/B */
+#define S5P_FIMV_ENC_H264_TRANS_FLAG	0xd034 /* 8x8 transform flag in PPS &
+								high profile */
+
+#define S5P_FIMV_ENC_RC_FRAME_RATE	0xd0d0 /* frame rate */
+
+/* Encoder for MPEG4 only */
+#define S5P_FIMV_ENC_MPEG4_QUART_PXL	0xe008 /* qpel interpolation ctrl */
+
+/* Additional */
+#define S5P_FIMV_SI_CH0_DPB_CONF_CTRL   0x2068 /* DPB Config Control Register */
+#define S5P_FIMV_SLICE_INT_MASK		1
+#define S5P_FIMV_SLICE_INT_SHIFT	31
+#define S5P_FIMV_DDELAY_ENA_SHIFT	30
+#define S5P_FIMV_DDELAY_VAL_MASK	0xff
+#define S5P_FIMV_DDELAY_VAL_SHIFT	16
+#define S5P_FIMV_DPB_COUNT_MASK		0xffff
+#define S5P_FIMV_DPB_FLUSH_MASK		1
+#define S5P_FIMV_DPB_FLUSH_SHIFT	14
+
+
+#define S5P_FIMV_SI_CH0_RELEASE_BUF     0x2060 /* DPB release buffer register */
+#define S5P_FIMV_SI_CH0_HOST_WR_ADR	0x2064 /* address of shared memory */
+
+/* Codec numbers  */
+#define S5P_FIMV_CODEC_NONE		-1
+
+#define S5P_FIMV_CODEC_H264_DEC		0
+#define S5P_FIMV_CODEC_VC1_DEC		1
+#define S5P_FIMV_CODEC_MPEG4_DEC	2
+#define S5P_FIMV_CODEC_MPEG2_DEC	3
+#define S5P_FIMV_CODEC_H263_DEC		4
+#define S5P_FIMV_CODEC_VC1RCV_DEC	5
+
+#define S5P_FIMV_CODEC_H264_ENC		16
+#define S5P_FIMV_CODEC_MPEG4_ENC	17
+#define S5P_FIMV_CODEC_H263_ENC		18
+
+/* Channel Control Register */
+#define S5P_FIMV_CH_SEQ_HEADER		1
+#define S5P_FIMV_CH_FRAME_START		2
+#define S5P_FIMV_CH_LAST_FRAME		3
+#define S5P_FIMV_CH_INIT_BUFS		4
+#define S5P_FIMV_CH_FRAME_START_REALLOC	5
+#define S5P_FIMV_CH_MASK		7
+#define S5P_FIMV_CH_SHIFT		16
+
+
+/* Host to RISC command */
+#define S5P_FIMV_H2R_CMD_EMPTY		0
+#define S5P_FIMV_H2R_CMD_OPEN_INSTANCE	1
+#define S5P_FIMV_H2R_CMD_CLOSE_INSTANCE	2
+#define S5P_FIMV_H2R_CMD_SYS_INIT	3
+#define S5P_FIMV_H2R_CMD_FLUSH		4
+#define S5P_FIMV_H2R_CMD_SLEEP		5
+#define S5P_FIMV_H2R_CMD_WAKEUP		6
+
+#define S5P_FIMV_R2H_CMD_EMPTY			0
+#define S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET	1
+#define S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET	2
+#define S5P_FIMV_R2H_CMD_RSV_RET		3
+#define S5P_FIMV_R2H_CMD_SEQ_DONE_RET		4
+#define S5P_FIMV_R2H_CMD_FRAME_DONE_RET		5
+#define S5P_FIMV_R2H_CMD_SLICE_DONE_RET		6
+#define S5P_FIMV_R2H_CMD_ENC_COMPLETE_RET	7
+#define S5P_FIMV_R2H_CMD_SYS_INIT_RET		8
+#define S5P_FIMV_R2H_CMD_FW_STATUS_RET		9
+#define S5P_FIMV_R2H_CMD_SLEEP_RET		10
+#define S5P_FIMV_R2H_CMD_WAKEUP_RET		11
+#define S5P_FIMV_R2H_CMD_FLUSH_RET		12
+#define S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET	15
+#define S5P_FIMV_R2H_CMD_EDFU_INIT_RET		16
+#define S5P_FIMV_R2H_CMD_ERR_RET		32
+
+/* Error handling defines */
+#define S5P_FIMV_ERR_WARNINGS_START		145
+#define S5P_FIMV_ERR_DEC_MASK			0xFFFF
+#define S5P_FIMV_ERR_DEC_SHIFT			0
+#define S5P_FIMV_ERR_DSPL_MASK			0xFFFF0000
+#define S5P_FIMV_ERR_DSPL_SHIFT			16
+
+/* Shared memory registers' offsets */
+
+/* An offset of the start position in the stream when
+ * the start position is not aligned */
+#define S5P_FIMV_SHARED_CROP_INFO_H		0x0020
+#define S5P_FIMV_SHARED_CROP_LEFT_MASK		0xFFFF
+#define S5P_FIMV_SHARED_CROP_LEFT_SHIFT		0
+#define S5P_FIMV_SHARED_CROP_RIGHT_MASK		0xFFFF0000
+#define S5P_FIMV_SHARED_CROP_RIGHT_SHIFT	16
+#define S5P_FIMV_SHARED_CROP_INFO_V		0x0024
+#define S5P_FIMV_SHARED_CROP_TOP_MASK		0xFFFF
+#define S5P_FIMV_SHARED_CROP_TOP_SHIFT		0
+#define S5P_FIMV_SHARED_CROP_BOTTOM_MASK	0xFFFF0000
+#define S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT	16
+#define S5P_FIMV_SHARED_SET_FRAME_TAG		0x0004
+#define S5P_FIMV_SHARED_GET_FRAME_TAG_TOP	0x0008
+#define S5P_FIMV_SHARED_GET_FRAME_TAG_BOT	0x000C
+#define S5P_FIMV_SHARED_START_BYTE_NUM		0x0018
+#define S5P_FIMV_SHARED_RC_VOP_TIMING		0x0030
+#define S5P_FIMV_SHARED_LUMA_DPB_SIZE		0x0064
+#define S5P_FIMV_SHARED_CHROMA_DPB_SIZE		0x0068
+#define S5P_FIMV_SHARED_MV_SIZE			0x006C
+#define S5P_FIMV_SHARED_PIC_TIME_TOP		0x0010
+#define S5P_FIMV_SHARED_PIC_TIME_BOTTOM		0x0014
+#define S5P_FIMV_SHARED_EXT_ENC_CONTROL		0x0028
+#define S5P_FIMV_SHARED_P_B_FRAME_QP		0x0070
+#define S5P_FIMV_SHARED_ASPECT_RATIO_IDC	0x0074
+#define S5P_FIMV_SHARED_EXTENDED_SAR		0x0078
+#define S5P_FIMV_SHARED_H264_I_PERIOD		0x009C
+#define S5P_FIMV_SHARED_RC_CONTROL_CONFIG	0x00A0
+
+#endif /* _REGS_FIMV_H */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
new file mode 100644
index 0000000..e3e616d
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -0,0 +1,1230 @@
+/*
+ * Samsung S5P Multi Format Codec v 5.1
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/videobuf2-core.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_ctrl.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_dec.h"
+#include "s5p_mfc_enc.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_pm.h"
+#include "s5p_mfc_shm.h"
+
+#define S5P_MFC_NAME		"s5p-mfc"
+#define S5P_MFC_DEC_NAME	"s5p-mfc-dec"
+#define S5P_MFC_ENC_NAME	"s5p-mfc-enc"
+
+int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level - higher value produces more verbose messages");
+
+/* Helper functions for interrupt processing */
+/* Remove from hw execution round robin */
+static void clear_work_bit(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	spin_lock(&dev->condlock);
+	clear_bit(ctx->num, &dev->ctx_work_bits);
+	spin_unlock(&dev->condlock);
+}
+
+/* Wake up context wait_queue */
+static void wake_up_ctx(struct s5p_mfc_ctx *ctx, unsigned int reason,
+			unsigned int err)
+{
+	ctx->int_cond = 1;
+	ctx->int_type = reason;
+	ctx->int_err = err;
+	wake_up(&ctx->queue);
+}
+
+/* Wake up device wait_queue */
+static void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason,
+			unsigned int err)
+{
+	dev->int_cond = 1;
+	dev->int_type = reason;
+	dev->int_err = err;
+	wake_up(&dev->queue);
+}
+
+static void s5p_mfc_watchdog(unsigned long arg)
+{
+	struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
+
+	if (test_bit(0, &dev->hw_lock))
+		atomic_inc(&dev->watchdog_cnt);
+	if (atomic_read(&dev->watchdog_cnt) >= MFC_WATCHDOG_CNT) {
+		/* This means that hw is busy and no interrupts were
+		 * generated by hw for the Nth time of running this
+		 * watchdog timer. This usually means a serious hw
+		 * error. Now it is time to kill all instances and
+		 * reset the MFC. */
+		mfc_err("Time out during waiting for HW\n");
+		queue_work(dev->watchdog_workqueue, &dev->watchdog_work);
+	}
+	dev->watchdog_timer.expires = jiffies +
+					msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
+	add_timer(&dev->watchdog_timer);
+}
+
+static void s5p_mfc_watchdog_worker(struct work_struct *work)
+{
+	struct s5p_mfc_dev *dev;
+	struct s5p_mfc_ctx *ctx;
+	unsigned long flags;
+	int mutex_locked;
+	int i, ret;
+
+	dev = container_of(work, struct s5p_mfc_dev, watchdog_work);
+
+	mfc_err("Driver timeout error handling\n");
+	/* Lock the mutex that protects open and release.
+	 * This is necessary as they may load and unload firmware. */
+	mutex_locked = mutex_trylock(&dev->mfc_mutex);
+	if (!mutex_locked)
+		mfc_err("Error: some instance may be closing/opening\n");
+	spin_lock_irqsave(&dev->irqlock, flags);
+
+	s5p_mfc_clock_off();
+
+	for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
+		ctx = dev->ctx[i];
+		if (!ctx)
+			continue;
+		ctx->state = MFCINST_ERROR;
+		s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+		s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+		clear_work_bit(ctx);
+		wake_up_ctx(ctx, S5P_FIMV_R2H_CMD_ERR_RET, 0);
+	}
+	clear_bit(0, &dev->hw_lock);
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+	/* Double check if there is at least one instance running.
+	 * If no instance is in memory than no firmware should be present */
+	if (dev->num_inst > 0) {
+		ret = s5p_mfc_reload_firmware(dev);
+		if (ret) {
+			mfc_err("Failed to reload FW\n");
+			goto unlock;
+		}
+		s5p_mfc_clock_on();
+		ret = s5p_mfc_init_hw(dev);
+		if (ret)
+			mfc_err("Failed to reinit FW\n");
+	}
+unlock:
+	if (mutex_locked)
+		mutex_unlock(&dev->mfc_mutex);
+}
+
+static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (!vdev) {
+		mfc_err("failed to get video_device");
+		return MFCNODE_INVALID;
+	}
+	if (vdev->index == 0)
+		return MFCNODE_DECODER;
+	else if (vdev->index == 1)
+		return MFCNODE_ENCODER;
+	return MFCNODE_INVALID;
+}
+
+static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
+{
+	mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
+	mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
+	mfc_write(dev, 0xffff, S5P_FIMV_SI_RTN_CHID);
+}
+
+static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_buf *dst_buf;
+
+	ctx->state = MFCINST_FINISHED;
+	ctx->sequence++;
+	while (!list_empty(&ctx->dst_queue)) {
+		dst_buf = list_entry(ctx->dst_queue.next,
+				     struct s5p_mfc_buf, list);
+		mfc_debug(2, "Cleaning up buffer: %d\n",
+					  dst_buf->b->v4l2_buf.index);
+		vb2_set_plane_payload(dst_buf->b, 0, 0);
+		vb2_set_plane_payload(dst_buf->b, 1, 0);
+		list_del(&dst_buf->list);
+		ctx->dst_queue_cnt--;
+		dst_buf->b->v4l2_buf.sequence = (ctx->sequence++);
+
+		if (s5p_mfc_read_shm(ctx, PIC_TIME_TOP) ==
+			s5p_mfc_read_shm(ctx, PIC_TIME_BOT))
+			dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
+		else
+			dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED;
+
+		ctx->dec_dst_flag &= ~(1 << dst_buf->b->v4l2_buf.index);
+		vb2_buffer_done(dst_buf->b, VB2_BUF_STATE_DONE);
+	}
+}
+
+static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct s5p_mfc_buf  *dst_buf, *src_buf;
+	size_t dec_y_addr = s5p_mfc_get_dec_y_adr();
+	unsigned int frame_type = s5p_mfc_get_frame_type();
+
+	/* Copy timestamp / timecode from decoded src to dst and set
+	   appropraite flags */
+	src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+	list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
+		if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
+			memcpy(&dst_buf->b->v4l2_buf.timecode,
+				&src_buf->b->v4l2_buf.timecode,
+				sizeof(struct v4l2_timecode));
+			memcpy(&dst_buf->b->v4l2_buf.timestamp,
+				&src_buf->b->v4l2_buf.timestamp,
+				sizeof(struct timeval));
+			switch (frame_type) {
+			case S5P_FIMV_DECODE_FRAME_I_FRAME:
+				dst_buf->b->v4l2_buf.flags |=
+						V4L2_BUF_FLAG_KEYFRAME;
+				break;
+			case S5P_FIMV_DECODE_FRAME_P_FRAME:
+				dst_buf->b->v4l2_buf.flags |=
+						V4L2_BUF_FLAG_PFRAME;
+				break;
+			case S5P_FIMV_DECODE_FRAME_B_FRAME:
+				dst_buf->b->v4l2_buf.flags |=
+						V4L2_BUF_FLAG_BFRAME;
+				break;
+			}
+			break;
+		}
+	}
+}
+
+static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct s5p_mfc_buf  *dst_buf;
+	size_t dspl_y_addr = s5p_mfc_get_dspl_y_adr();
+	unsigned int frame_type = s5p_mfc_get_frame_type();
+	unsigned int index;
+
+	/* If frame is same as previous then skip and do not dequeue */
+	if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
+		if (!ctx->after_packed_pb)
+			ctx->sequence++;
+		ctx->after_packed_pb = 0;
+		return;
+	}
+	ctx->sequence++;
+	/* The MFC returns address of the buffer, now we have to
+	 * check which videobuf does it correspond to */
+	list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
+		/* Check if this is the buffer we're looking for */
+		if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dspl_y_addr) {
+			list_del(&dst_buf->list);
+			ctx->dst_queue_cnt--;
+			dst_buf->b->v4l2_buf.sequence = ctx->sequence;
+			if (s5p_mfc_read_shm(ctx, PIC_TIME_TOP) ==
+				s5p_mfc_read_shm(ctx, PIC_TIME_BOT))
+				dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
+			else
+				dst_buf->b->v4l2_buf.field =
+							V4L2_FIELD_INTERLACED;
+			vb2_set_plane_payload(dst_buf->b, 0, ctx->luma_size);
+			vb2_set_plane_payload(dst_buf->b, 1, ctx->chroma_size);
+			clear_bit(dst_buf->b->v4l2_buf.index,
+							&ctx->dec_dst_flag);
+
+			vb2_buffer_done(dst_buf->b,
+				err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+
+			index = dst_buf->b->v4l2_buf.index;
+			break;
+		}
+	}
+}
+
+/* Handle frame decoding interrupt */
+static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
+					unsigned int reason, unsigned int err)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned int dst_frame_status;
+	struct s5p_mfc_buf *src_buf;
+	unsigned long flags;
+	unsigned int res_change;
+
+	unsigned int index;
+
+	dst_frame_status = s5p_mfc_get_dspl_status()
+				& S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
+	res_change = s5p_mfc_get_dspl_status()
+				& S5P_FIMV_DEC_STATUS_RESOLUTION_MASK;
+	mfc_debug(2, "Frame Status: %x\n", dst_frame_status);
+	if (ctx->state == MFCINST_RES_CHANGE_INIT)
+		ctx->state = MFCINST_RES_CHANGE_FLUSH;
+	if (res_change) {
+		ctx->state = MFCINST_RES_CHANGE_INIT;
+		s5p_mfc_clear_int_flags(dev);
+		wake_up_ctx(ctx, reason, err);
+		if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+			BUG();
+		s5p_mfc_clock_off();
+		s5p_mfc_try_run(dev);
+		return;
+	}
+	if (ctx->dpb_flush_flag)
+		ctx->dpb_flush_flag = 0;
+
+	spin_lock_irqsave(&dev->irqlock, flags);
+	/* All frames remaining in the buffer have been extracted  */
+	if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) {
+		if (ctx->state == MFCINST_RES_CHANGE_FLUSH) {
+			s5p_mfc_handle_frame_all_extracted(ctx);
+			ctx->state = MFCINST_RES_CHANGE_END;
+			goto leave_handle_frame;
+		} else {
+			s5p_mfc_handle_frame_all_extracted(ctx);
+		}
+	}
+
+	if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY ||
+		dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_ONLY)
+		s5p_mfc_handle_frame_copy_time(ctx);
+
+	/* A frame has been decoded and is in the buffer  */
+	if (dst_frame_status == S5P_FIMV_DEC_STATUS_DISPLAY_ONLY ||
+	    dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY) {
+		s5p_mfc_handle_frame_new(ctx, err);
+	} else {
+		mfc_debug(2, "No frame decode\n");
+	}
+	/* Mark source buffer as complete */
+	if (dst_frame_status != S5P_FIMV_DEC_STATUS_DISPLAY_ONLY
+		&& !list_empty(&ctx->src_queue)) {
+		src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
+								list);
+		ctx->consumed_stream += s5p_mfc_get_consumed_stream();
+		if (ctx->codec_mode != S5P_FIMV_CODEC_H264_DEC &&
+			s5p_mfc_get_frame_type() == S5P_FIMV_DECODE_FRAME_P_FRAME
+					&& ctx->consumed_stream + STUFF_BYTE <
+					src_buf->b->v4l2_planes[0].bytesused) {
+			/* Run MFC again on the same buffer */
+			mfc_debug(2, "Running again the same buffer\n");
+			ctx->after_packed_pb = 1;
+		} else {
+			index = src_buf->b->v4l2_buf.index;
+			mfc_debug(2, "MFC needs next buffer\n");
+			ctx->consumed_stream = 0;
+			list_del(&src_buf->list);
+			ctx->src_queue_cnt--;
+			if (s5p_mfc_err_dec(err) > 0)
+				vb2_buffer_done(src_buf->b, VB2_BUF_STATE_ERROR);
+			else
+				vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE);
+		}
+	}
+leave_handle_frame:
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+	if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
+				    || ctx->dst_queue_cnt < ctx->dpb_count)
+		clear_work_bit(ctx);
+	s5p_mfc_clear_int_flags(dev);
+	wake_up_ctx(ctx, reason, err);
+	if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+		BUG();
+	s5p_mfc_clock_off();
+	s5p_mfc_try_run(dev);
+}
+
+/* Error handling for interrupt */
+static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx,
+				 unsigned int reason, unsigned int err)
+{
+	struct s5p_mfc_dev *dev;
+	unsigned long flags;
+
+	/* If no context is available then all necessary
+	 * processing has been done. */
+	if (ctx == NULL)
+		return;
+
+	dev = ctx->dev;
+	mfc_err("Interrupt Error: %08x\n", err);
+	s5p_mfc_clear_int_flags(dev);
+	wake_up_dev(dev, reason, err);
+
+	/* Error recovery is dependent on the state of context */
+	switch (ctx->state) {
+	case MFCINST_INIT:
+		/* This error had to happen while acquireing instance */
+	case MFCINST_GOT_INST:
+		/* This error had to happen while parsing the header */
+	case MFCINST_HEAD_PARSED:
+		/* This error had to happen while setting dst buffers */
+	case MFCINST_RETURN_INST:
+		/* This error had to happen while releasing instance */
+		clear_work_bit(ctx);
+		wake_up_ctx(ctx, reason, err);
+		if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+			BUG();
+		s5p_mfc_clock_off();
+		ctx->state = MFCINST_ERROR;
+		break;
+	case MFCINST_FINISHING:
+	case MFCINST_FINISHED:
+	case MFCINST_RUNNING:
+		/* It is higly probable that an error occured
+		 * while decoding a frame */
+		clear_work_bit(ctx);
+		ctx->state = MFCINST_ERROR;
+		/* Mark all dst buffers as having an error */
+		spin_lock_irqsave(&dev->irqlock, flags);
+		s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+		/* Mark all src buffers as having an error */
+		s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+		spin_unlock_irqrestore(&dev->irqlock, flags);
+		if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+			BUG();
+		s5p_mfc_clock_off();
+		break;
+	default:
+		mfc_err("Encountered an error interrupt which had not been handled\n");
+		break;
+	}
+	return;
+}
+
+/* Header parsing interrupt handling */
+static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
+				 unsigned int reason, unsigned int err)
+{
+	struct s5p_mfc_dev *dev;
+	unsigned int guard_width, guard_height;
+
+	if (ctx == NULL)
+		return;
+	dev = ctx->dev;
+	if (ctx->c_ops->post_seq_start) {
+		if (ctx->c_ops->post_seq_start(ctx))
+			mfc_err("post_seq_start() failed\n");
+	} else {
+		ctx->img_width = s5p_mfc_get_img_width();
+		ctx->img_height = s5p_mfc_get_img_height();
+
+		ctx->buf_width = ALIGN(ctx->img_width,
+						S5P_FIMV_NV12MT_HALIGN);
+		ctx->buf_height = ALIGN(ctx->img_height,
+						S5P_FIMV_NV12MT_VALIGN);
+		mfc_debug(2, "SEQ Done: Movie dimensions %dx%d, "
+			"buffer dimensions: %dx%d\n", ctx->img_width,
+				ctx->img_height, ctx->buf_width,
+						ctx->buf_height);
+		if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) {
+			ctx->luma_size = ALIGN(ctx->buf_width *
+				ctx->buf_height, S5P_FIMV_DEC_BUF_ALIGN);
+			ctx->chroma_size = ALIGN(ctx->buf_width *
+					 ALIGN((ctx->img_height >> 1),
+					       S5P_FIMV_NV12MT_VALIGN),
+					       S5P_FIMV_DEC_BUF_ALIGN);
+			ctx->mv_size = ALIGN(ctx->buf_width *
+					ALIGN((ctx->buf_height >> 2),
+					S5P_FIMV_NV12MT_VALIGN),
+					S5P_FIMV_DEC_BUF_ALIGN);
+		} else {
+			guard_width = ALIGN(ctx->img_width + 24,
+					S5P_FIMV_NV12MT_HALIGN);
+			guard_height = ALIGN(ctx->img_height + 16,
+						S5P_FIMV_NV12MT_VALIGN);
+			ctx->luma_size = ALIGN(guard_width *
+				guard_height, S5P_FIMV_DEC_BUF_ALIGN);
+			guard_width = ALIGN(ctx->img_width + 16,
+						S5P_FIMV_NV12MT_HALIGN);
+			guard_height = ALIGN((ctx->img_height >> 1) + 4,
+						S5P_FIMV_NV12MT_VALIGN);
+			ctx->chroma_size = ALIGN(guard_width *
+				guard_height, S5P_FIMV_DEC_BUF_ALIGN);
+			ctx->mv_size = 0;
+		}
+		ctx->dpb_count = s5p_mfc_get_dpb_count();
+		if (ctx->img_width == 0 || ctx->img_height == 0)
+			ctx->state = MFCINST_ERROR;
+		else
+			ctx->state = MFCINST_HEAD_PARSED;
+	}
+	s5p_mfc_clear_int_flags(dev);
+	clear_work_bit(ctx);
+	if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+		BUG();
+	s5p_mfc_clock_off();
+	s5p_mfc_try_run(dev);
+	wake_up_ctx(ctx, reason, err);
+}
+
+/* Header parsing interrupt handling */
+static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
+				 unsigned int reason, unsigned int err)
+{
+	struct s5p_mfc_buf *src_buf;
+	struct s5p_mfc_dev *dev;
+	unsigned long flags;
+
+	if (ctx == NULL)
+		return;
+	dev = ctx->dev;
+	s5p_mfc_clear_int_flags(dev);
+	ctx->int_type = reason;
+	ctx->int_err = err;
+	ctx->int_cond = 1;
+	spin_lock(&dev->condlock);
+	clear_bit(ctx->num, &dev->ctx_work_bits);
+	spin_unlock(&dev->condlock);
+	if (err == 0) {
+		ctx->state = MFCINST_RUNNING;
+		if (!ctx->dpb_flush_flag) {
+			spin_lock_irqsave(&dev->irqlock, flags);
+			if (!list_empty(&ctx->src_queue)) {
+				src_buf = list_entry(ctx->src_queue.next,
+					     struct s5p_mfc_buf, list);
+				list_del(&src_buf->list);
+				ctx->src_queue_cnt--;
+				vb2_buffer_done(src_buf->b,
+						VB2_BUF_STATE_DONE);
+			}
+			spin_unlock_irqrestore(&dev->irqlock, flags);
+		} else {
+			ctx->dpb_flush_flag = 0;
+		}
+		if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+			BUG();
+
+		s5p_mfc_clock_off();
+
+		wake_up(&ctx->queue);
+		s5p_mfc_try_run(dev);
+	} else {
+		if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+			BUG();
+
+		s5p_mfc_clock_off();
+
+		wake_up(&ctx->queue);
+	}
+}
+
+/* Interrupt processing */
+static irqreturn_t s5p_mfc_irq(int irq, void *priv)
+{
+	struct s5p_mfc_dev *dev = priv;
+	struct s5p_mfc_ctx *ctx;
+	unsigned int reason;
+	unsigned int err;
+
+	mfc_debug_enter();
+	/* Reset the timeout watchdog */
+	atomic_set(&dev->watchdog_cnt, 0);
+	ctx = dev->ctx[dev->curr_ctx];
+	/* Get the reason of interrupt and the error code */
+	reason = s5p_mfc_get_int_reason();
+	err = s5p_mfc_get_int_err();
+	mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
+	switch (reason) {
+	case S5P_FIMV_R2H_CMD_ERR_RET:
+		/* An error has occured */
+		if (ctx->state == MFCINST_RUNNING &&
+			s5p_mfc_err_dec(err) >= S5P_FIMV_ERR_WARNINGS_START)
+			s5p_mfc_handle_frame(ctx, reason, err);
+		else
+			s5p_mfc_handle_error(ctx, reason, err);
+		clear_bit(0, &dev->enter_suspend);
+		break;
+
+	case S5P_FIMV_R2H_CMD_SLICE_DONE_RET:
+	case S5P_FIMV_R2H_CMD_FRAME_DONE_RET:
+		if (ctx->c_ops->post_frame_start) {
+			if (ctx->c_ops->post_frame_start(ctx))
+				mfc_err("post_frame_start() failed\n");
+			s5p_mfc_clear_int_flags(dev);
+			wake_up_ctx(ctx, reason, err);
+			if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+				BUG();
+			s5p_mfc_clock_off();
+			s5p_mfc_try_run(dev);
+		} else {
+			s5p_mfc_handle_frame(ctx, reason, err);
+		}
+		break;
+
+	case S5P_FIMV_R2H_CMD_SEQ_DONE_RET:
+		s5p_mfc_handle_seq_done(ctx, reason, err);
+		break;
+
+	case S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET:
+		ctx->inst_no = s5p_mfc_get_inst_no();
+		ctx->state = MFCINST_GOT_INST;
+		clear_work_bit(ctx);
+		wake_up(&ctx->queue);
+		goto irq_cleanup_hw;
+
+	case S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET:
+		clear_work_bit(ctx);
+		ctx->state = MFCINST_FREE;
+		wake_up(&ctx->queue);
+		goto irq_cleanup_hw;
+
+	case S5P_FIMV_R2H_CMD_SYS_INIT_RET:
+	case S5P_FIMV_R2H_CMD_FW_STATUS_RET:
+	case S5P_FIMV_R2H_CMD_SLEEP_RET:
+	case S5P_FIMV_R2H_CMD_WAKEUP_RET:
+		if (ctx)
+			clear_work_bit(ctx);
+		s5p_mfc_clear_int_flags(dev);
+		wake_up_dev(dev, reason, err);
+		clear_bit(0, &dev->hw_lock);
+		clear_bit(0, &dev->enter_suspend);
+		break;
+
+	case S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET:
+		s5p_mfc_handle_init_buffers(ctx, reason, err);
+		break;
+	default:
+		mfc_debug(2, "Unknown int reason\n");
+		s5p_mfc_clear_int_flags(dev);
+	}
+	mfc_debug_leave();
+	return IRQ_HANDLED;
+irq_cleanup_hw:
+	s5p_mfc_clear_int_flags(dev);
+	ctx->int_type = reason;
+	ctx->int_err = err;
+	ctx->int_cond = 1;
+	if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+		mfc_err("Failed to unlock hw\n");
+
+	s5p_mfc_clock_off();
+
+	s5p_mfc_try_run(dev);
+	mfc_debug(2, "Exit via irq_cleanup_hw\n");
+	return IRQ_HANDLED;
+}
+
+/* Open an MFC node */
+static int s5p_mfc_open(struct file *file)
+{
+	struct s5p_mfc_dev *dev = video_drvdata(file);
+	struct s5p_mfc_ctx *ctx = NULL;
+	struct vb2_queue *q;
+	unsigned long flags;
+	int ret = 0;
+
+	mfc_debug_enter();
+	if (mutex_lock_interruptible(&dev->mfc_mutex))
+		return -ERESTARTSYS;
+	dev->num_inst++;	/* It is guarded by mfc_mutex in vfd */
+	/* Allocate memory for context */
+	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+	if (!ctx) {
+		mfc_err("Not enough memory\n");
+		ret = -ENOMEM;
+		goto err_alloc;
+	}
+	v4l2_fh_init(&ctx->fh, video_devdata(file));
+	file->private_data = &ctx->fh;
+	v4l2_fh_add(&ctx->fh);
+	ctx->dev = dev;
+	INIT_LIST_HEAD(&ctx->src_queue);
+	INIT_LIST_HEAD(&ctx->dst_queue);
+	ctx->src_queue_cnt = 0;
+	ctx->dst_queue_cnt = 0;
+	/* Get context number */
+	ctx->num = 0;
+	while (dev->ctx[ctx->num]) {
+		ctx->num++;
+		if (ctx->num >= MFC_NUM_CONTEXTS) {
+			mfc_err("Too many open contexts\n");
+			ret = -EBUSY;
+			goto err_no_ctx;
+		}
+	}
+	/* Mark context as idle */
+	spin_lock_irqsave(&dev->condlock, flags);
+	clear_bit(ctx->num, &dev->ctx_work_bits);
+	spin_unlock_irqrestore(&dev->condlock, flags);
+	dev->ctx[ctx->num] = ctx;
+	if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+		ctx->type = MFCINST_DECODER;
+		ctx->c_ops = get_dec_codec_ops();
+		/* Setup ctrl handler */
+		ret = s5p_mfc_dec_ctrls_setup(ctx);
+		if (ret) {
+			mfc_err("Failed to setup mfc controls\n");
+			goto err_ctrls_setup;
+		}
+	} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+		ctx->type = MFCINST_ENCODER;
+		ctx->c_ops = get_enc_codec_ops();
+		/* only for encoder */
+		INIT_LIST_HEAD(&ctx->ref_queue);
+		ctx->ref_queue_cnt = 0;
+		/* Setup ctrl handler */
+		ret = s5p_mfc_enc_ctrls_setup(ctx);
+		if (ret) {
+			mfc_err("Failed to setup mfc controls\n");
+			goto err_ctrls_setup;
+		}
+	} else {
+		ret = -ENOENT;
+		goto err_bad_node;
+	}
+	ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+	ctx->inst_no = -1;
+	/* Load firmware if this is the first instance */
+	if (dev->num_inst == 1) {
+		dev->watchdog_timer.expires = jiffies +
+					msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
+		add_timer(&dev->watchdog_timer);
+		ret = s5p_mfc_power_on();
+		if (ret < 0) {
+			mfc_err("power on failed\n");
+			goto err_pwr_enable;
+		}
+		s5p_mfc_clock_on();
+		ret = s5p_mfc_alloc_and_load_firmware(dev);
+		if (ret)
+			goto err_alloc_fw;
+		/* Init the FW */
+		ret = s5p_mfc_init_hw(dev);
+		if (ret)
+			goto err_init_hw;
+		s5p_mfc_clock_off();
+	}
+	/* Init videobuf2 queue for CAPTURE */
+	q = &ctx->vq_dst;
+	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+	q->drv_priv = &ctx->fh;
+	if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+		q->io_modes = VB2_MMAP;
+		q->ops = get_dec_queue_ops();
+	} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+		q->io_modes = VB2_MMAP | VB2_USERPTR;
+		q->ops = get_enc_queue_ops();
+	} else {
+		ret = -ENOENT;
+		goto err_queue_init;
+	}
+	q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
+	ret = vb2_queue_init(q);
+	if (ret) {
+		mfc_err("Failed to initialize videobuf2 queue(capture)\n");
+		goto err_queue_init;
+	}
+	/* Init videobuf2 queue for OUTPUT */
+	q = &ctx->vq_src;
+	q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+	q->io_modes = VB2_MMAP;
+	q->drv_priv = &ctx->fh;
+	if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+		q->io_modes = VB2_MMAP;
+		q->ops = get_dec_queue_ops();
+	} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+		q->io_modes = VB2_MMAP | VB2_USERPTR;
+		q->ops = get_enc_queue_ops();
+	} else {
+		ret = -ENOENT;
+		goto err_queue_init;
+	}
+	q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
+	ret = vb2_queue_init(q);
+	if (ret) {
+		mfc_err("Failed to initialize videobuf2 queue(output)\n");
+		goto err_queue_init;
+	}
+	init_waitqueue_head(&ctx->queue);
+	mutex_unlock(&dev->mfc_mutex);
+	mfc_debug_leave();
+	return ret;
+	/* Deinit when failure occured */
+err_queue_init:
+err_init_hw:
+	s5p_mfc_release_firmware(dev);
+err_alloc_fw:
+	dev->ctx[ctx->num] = NULL;
+	del_timer_sync(&dev->watchdog_timer);
+	s5p_mfc_clock_off();
+err_pwr_enable:
+	if (dev->num_inst == 1) {
+		if (s5p_mfc_power_off() < 0)
+			mfc_err("power off failed\n");
+		s5p_mfc_release_firmware(dev);
+	}
+err_ctrls_setup:
+	s5p_mfc_dec_ctrls_delete(ctx);
+err_bad_node:
+err_no_ctx:
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx);
+err_alloc:
+	dev->num_inst--;
+	mutex_unlock(&dev->mfc_mutex);
+	mfc_debug_leave();
+	return ret;
+}
+
+/* Release MFC context */
+static int s5p_mfc_release(struct file *file)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned long flags;
+
+	mfc_debug_enter();
+	mutex_lock(&dev->mfc_mutex);
+	s5p_mfc_clock_on();
+	vb2_queue_release(&ctx->vq_src);
+	vb2_queue_release(&ctx->vq_dst);
+	/* Mark context as idle */
+	spin_lock_irqsave(&dev->condlock, flags);
+	clear_bit(ctx->num, &dev->ctx_work_bits);
+	spin_unlock_irqrestore(&dev->condlock, flags);
+	/* If instance was initialised then
+	 * return instance and free reosurces */
+	if (ctx->inst_no != MFC_NO_INSTANCE_SET) {
+		mfc_debug(2, "Has to free instance\n");
+		ctx->state = MFCINST_RETURN_INST;
+		spin_lock_irqsave(&dev->condlock, flags);
+		set_bit(ctx->num, &dev->ctx_work_bits);
+		spin_unlock_irqrestore(&dev->condlock, flags);
+		s5p_mfc_clean_ctx_int_flags(ctx);
+		s5p_mfc_try_run(dev);
+		/* Wait until instance is returned or timeout occured */
+		if (s5p_mfc_wait_for_done_ctx
+		    (ctx, S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET, 0)) {
+			s5p_mfc_clock_off();
+			mfc_err("Err returning instance\n");
+		}
+		mfc_debug(2, "After free instance\n");
+		/* Free resources */
+		s5p_mfc_release_codec_buffers(ctx);
+		s5p_mfc_release_instance_buffer(ctx);
+		if (ctx->type == MFCINST_DECODER)
+			s5p_mfc_release_dec_desc_buffer(ctx);
+
+		ctx->inst_no = MFC_NO_INSTANCE_SET;
+	}
+	/* hardware locking scheme */
+	if (dev->curr_ctx == ctx->num)
+		clear_bit(0, &dev->hw_lock);
+	dev->num_inst--;
+	if (dev->num_inst == 0) {
+		mfc_debug(2, "Last instance - release firmware\n");
+		/* reset <-> F/W release */
+		s5p_mfc_reset(dev);
+		s5p_mfc_release_firmware(dev);
+		del_timer_sync(&dev->watchdog_timer);
+		if (s5p_mfc_power_off() < 0)
+			mfc_err("Power off failed\n");
+	}
+	mfc_debug(2, "Shutting down clock\n");
+	s5p_mfc_clock_off();
+	dev->ctx[ctx->num] = NULL;
+	s5p_mfc_dec_ctrls_delete(ctx);
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx);
+	mfc_debug_leave();
+	mutex_unlock(&dev->mfc_mutex);
+	return 0;
+}
+
+/* Poll */
+static unsigned int s5p_mfc_poll(struct file *file,
+				 struct poll_table_struct *wait)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct vb2_queue *src_q, *dst_q;
+	struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
+	unsigned int rc = 0;
+	unsigned long flags;
+
+	mutex_lock(&dev->mfc_mutex);
+	src_q = &ctx->vq_src;
+	dst_q = &ctx->vq_dst;
+	/*
+	 * There has to be at least one buffer queued on each queued_list, which
+	 * means either in driver already or waiting for driver to claim it
+	 * and start processing.
+	 */
+	if ((!src_q->streaming || list_empty(&src_q->queued_list))
+		&& (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
+		rc = POLLERR;
+		goto end;
+	}
+	mutex_unlock(&dev->mfc_mutex);
+	poll_wait(file, &src_q->done_wq, wait);
+	poll_wait(file, &dst_q->done_wq, wait);
+	mutex_lock(&dev->mfc_mutex);
+	spin_lock_irqsave(&src_q->done_lock, flags);
+	if (!list_empty(&src_q->done_list))
+		src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
+								done_entry);
+	if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
+				|| src_vb->state == VB2_BUF_STATE_ERROR))
+		rc |= POLLOUT | POLLWRNORM;
+	spin_unlock_irqrestore(&src_q->done_lock, flags);
+	spin_lock_irqsave(&dst_q->done_lock, flags);
+	if (!list_empty(&dst_q->done_list))
+		dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
+								done_entry);
+	if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
+				|| dst_vb->state == VB2_BUF_STATE_ERROR))
+		rc |= POLLIN | POLLRDNORM;
+	spin_unlock_irqrestore(&dst_q->done_lock, flags);
+end:
+	mutex_unlock(&dev->mfc_mutex);
+	return rc;
+}
+
+/* Mmap */
+static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+	int ret;
+
+	if (mutex_lock_interruptible(&dev->mfc_mutex))
+		return -ERESTARTSYS;
+	if (offset < DST_QUEUE_OFF_BASE) {
+		mfc_debug(2, "mmaping source\n");
+		ret = vb2_mmap(&ctx->vq_src, vma);
+	} else {		/* capture */
+		mfc_debug(2, "mmaping destination\n");
+		vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
+		ret = vb2_mmap(&ctx->vq_dst, vma);
+	}
+	mutex_unlock(&dev->mfc_mutex);
+	return ret;
+}
+
+/* v4l2 ops */
+static const struct v4l2_file_operations s5p_mfc_fops = {
+	.owner = THIS_MODULE,
+	.open = s5p_mfc_open,
+	.release = s5p_mfc_release,
+	.poll = s5p_mfc_poll,
+	.unlocked_ioctl = video_ioctl2,
+	.mmap = s5p_mfc_mmap,
+};
+
+static int match_child(struct device *dev, void *data)
+{
+	if (!dev_name(dev))
+		return 0;
+	return !strcmp(dev_name(dev), (char *)data);
+}
+
+/* MFC probe function */
+static int s5p_mfc_probe(struct platform_device *pdev)
+{
+	struct s5p_mfc_dev *dev;
+	struct video_device *vfd;
+	struct resource *res;
+	int ret;
+
+	pr_debug("%s++\n", __func__);
+	dev = devm_kzalloc(&pdev->dev, sizeof *dev, GFP_KERNEL);
+	if (!dev) {
+		dev_err(&pdev->dev, "Not enough memory for MFC device\n");
+		return -ENOMEM;
+	}
+
+	spin_lock_init(&dev->irqlock);
+	spin_lock_init(&dev->condlock);
+	dev->plat_dev = pdev;
+	if (!dev->plat_dev) {
+		dev_err(&pdev->dev, "No platform data specified\n");
+		return -ENODEV;
+	}
+
+	ret = s5p_mfc_init_pm(dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to get mfc clock source\n");
+		return ret;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	dev->regs_base = devm_request_and_ioremap(&pdev->dev, res);
+	if (dev->regs_base == NULL) {
+		dev_err(&pdev->dev, "Failed to obtain io memory\n");
+		return -ENOENT;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "failed to get irq resource\n");
+		ret = -ENOENT;
+		goto err_res;
+	}
+	dev->irq = res->start;
+	ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq,
+					IRQF_DISABLED, pdev->name, dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
+		goto err_res;
+	}
+
+	dev->mem_dev_l = device_find_child(&dev->plat_dev->dev, "s5p-mfc-l",
+					   match_child);
+	if (!dev->mem_dev_l) {
+		mfc_err("Mem child (L) device get failed\n");
+		ret = -ENODEV;
+		goto err_res;
+	}
+	dev->mem_dev_r = device_find_child(&dev->plat_dev->dev, "s5p-mfc-r",
+					   match_child);
+	if (!dev->mem_dev_r) {
+		mfc_err("Mem child (R) device get failed\n");
+		ret = -ENODEV;
+		goto err_res;
+	}
+
+	dev->alloc_ctx[0] = vb2_dma_contig_init_ctx(dev->mem_dev_l);
+	if (IS_ERR_OR_NULL(dev->alloc_ctx[0])) {
+		ret = PTR_ERR(dev->alloc_ctx[0]);
+		goto err_res;
+	}
+	dev->alloc_ctx[1] = vb2_dma_contig_init_ctx(dev->mem_dev_r);
+	if (IS_ERR_OR_NULL(dev->alloc_ctx[1])) {
+		ret = PTR_ERR(dev->alloc_ctx[1]);
+		goto err_mem_init_ctx_1;
+	}
+
+	mutex_init(&dev->mfc_mutex);
+
+	ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+	if (ret)
+		goto err_v4l2_dev_reg;
+	init_waitqueue_head(&dev->queue);
+
+	/* decoder */
+	vfd = video_device_alloc();
+	if (!vfd) {
+		v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+		ret = -ENOMEM;
+		goto err_dec_alloc;
+	}
+	vfd->fops	= &s5p_mfc_fops,
+	vfd->ioctl_ops	= get_dec_v4l2_ioctl_ops();
+	vfd->release	= video_device_release,
+	vfd->lock	= &dev->mfc_mutex;
+	vfd->v4l2_dev	= &dev->v4l2_dev;
+	snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
+	dev->vfd_dec	= vfd;
+	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+	if (ret) {
+		v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+		video_device_release(vfd);
+		goto err_dec_reg;
+	}
+	v4l2_info(&dev->v4l2_dev,
+		  "decoder registered as /dev/video%d\n", vfd->num);
+	video_set_drvdata(vfd, dev);
+
+	/* encoder */
+	vfd = video_device_alloc();
+	if (!vfd) {
+		v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+		ret = -ENOMEM;
+		goto err_enc_alloc;
+	}
+	vfd->fops	= &s5p_mfc_fops,
+	vfd->ioctl_ops	= get_enc_v4l2_ioctl_ops();
+	vfd->release	= video_device_release,
+	vfd->lock	= &dev->mfc_mutex;
+	vfd->v4l2_dev	= &dev->v4l2_dev;
+	snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
+	dev->vfd_enc	= vfd;
+	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+	if (ret) {
+		v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+		video_device_release(vfd);
+		goto err_enc_reg;
+	}
+	v4l2_info(&dev->v4l2_dev,
+		  "encoder registered as /dev/video%d\n", vfd->num);
+	video_set_drvdata(vfd, dev);
+	platform_set_drvdata(pdev, dev);
+
+	dev->hw_lock = 0;
+	dev->watchdog_workqueue = create_singlethread_workqueue(S5P_MFC_NAME);
+	INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
+	atomic_set(&dev->watchdog_cnt, 0);
+	init_timer(&dev->watchdog_timer);
+	dev->watchdog_timer.data = (unsigned long)dev;
+	dev->watchdog_timer.function = s5p_mfc_watchdog;
+
+	pr_debug("%s--\n", __func__);
+	return 0;
+
+/* Deinit MFC if probe had failed */
+err_enc_reg:
+	video_device_release(dev->vfd_enc);
+err_enc_alloc:
+	video_unregister_device(dev->vfd_dec);
+err_dec_reg:
+	video_device_release(dev->vfd_dec);
+err_dec_alloc:
+	v4l2_device_unregister(&dev->v4l2_dev);
+err_v4l2_dev_reg:
+	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
+err_mem_init_ctx_1:
+	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
+err_res:
+	s5p_mfc_final_pm(dev);
+
+	pr_debug("%s-- with error\n", __func__);
+	return ret;
+
+}
+
+/* Remove the driver */
+static int __devexit s5p_mfc_remove(struct platform_device *pdev)
+{
+	struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
+
+	v4l2_info(&dev->v4l2_dev, "Removing %s\n", pdev->name);
+
+	del_timer_sync(&dev->watchdog_timer);
+	flush_workqueue(dev->watchdog_workqueue);
+	destroy_workqueue(dev->watchdog_workqueue);
+
+	video_unregister_device(dev->vfd_enc);
+	video_unregister_device(dev->vfd_dec);
+	v4l2_device_unregister(&dev->v4l2_dev);
+	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
+	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
+
+	s5p_mfc_final_pm(dev);
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int s5p_mfc_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+	int ret;
+
+	if (m_dev->num_inst == 0)
+		return 0;
+	return s5p_mfc_sleep(m_dev);
+	if (test_and_set_bit(0, &m_dev->enter_suspend) != 0) {
+		mfc_err("Error: going to suspend for a second time\n");
+		return -EIO;
+	}
+
+	/* Check if we're processing then wait if it necessary. */
+	while (test_and_set_bit(0, &m_dev->hw_lock) != 0) {
+		/* Try and lock the HW */
+		/* Wait on the interrupt waitqueue */
+		ret = wait_event_interruptible_timeout(m_dev->queue,
+			m_dev->int_cond || m_dev->ctx[m_dev->curr_ctx]->int_cond,
+			msecs_to_jiffies(MFC_INT_TIMEOUT));
+
+		if (ret == 0) {
+			mfc_err("Waiting for hardware to finish timed out\n");
+			return -EIO;
+		}
+	}
+	return 0;
+}
+
+static int s5p_mfc_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+
+	if (m_dev->num_inst == 0)
+		return 0;
+	return s5p_mfc_wakeup(m_dev);
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int s5p_mfc_runtime_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+
+	atomic_set(&m_dev->pm.power, 0);
+	return 0;
+}
+
+static int s5p_mfc_runtime_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+	int pre_power;
+
+	if (!m_dev->alloc_ctx)
+		return 0;
+	pre_power = atomic_read(&m_dev->pm.power);
+	atomic_set(&m_dev->pm.power, 1);
+	return 0;
+}
+#endif
+
+/* Power management */
+static const struct dev_pm_ops s5p_mfc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume)
+	SET_RUNTIME_PM_OPS(s5p_mfc_runtime_suspend, s5p_mfc_runtime_resume,
+			   NULL)
+};
+
+static struct platform_driver s5p_mfc_driver = {
+	.probe	= s5p_mfc_probe,
+	.remove	= __devexit_p(s5p_mfc_remove),
+	.driver	= {
+		.name	= S5P_MFC_NAME,
+		.owner	= THIS_MODULE,
+		.pm	= &s5p_mfc_pm_ops
+	},
+};
+
+module_platform_driver(s5p_mfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
+MODULE_DESCRIPTION("Samsung S5P Multi Format Codec V4L2 driver");
+
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
new file mode 100644
index 0000000..91a4155
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
@@ -0,0 +1,120 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "regs-mfc.h"
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+
+/* This function is used to send a command to the MFC */
+static int s5p_mfc_cmd_host2risc(struct s5p_mfc_dev *dev, int cmd,
+						struct s5p_mfc_cmd_args *args)
+{
+	int cur_cmd;
+	unsigned long timeout;
+
+	timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
+	/* wait until host to risc command register becomes 'H2R_CMD_EMPTY' */
+	do {
+		if (time_after(jiffies, timeout)) {
+			mfc_err("Timeout while waiting for hardware\n");
+			return -EIO;
+		}
+		cur_cmd = mfc_read(dev, S5P_FIMV_HOST2RISC_CMD);
+	} while (cur_cmd != S5P_FIMV_H2R_CMD_EMPTY);
+	mfc_write(dev, args->arg[0], S5P_FIMV_HOST2RISC_ARG1);
+	mfc_write(dev, args->arg[1], S5P_FIMV_HOST2RISC_ARG2);
+	mfc_write(dev, args->arg[2], S5P_FIMV_HOST2RISC_ARG3);
+	mfc_write(dev, args->arg[3], S5P_FIMV_HOST2RISC_ARG4);
+	/* Issue the command */
+	mfc_write(dev, cmd, S5P_FIMV_HOST2RISC_CMD);
+	return 0;
+}
+
+/* Initialize the MFC */
+int s5p_mfc_sys_init_cmd(struct s5p_mfc_dev *dev)
+{
+	struct s5p_mfc_cmd_args h2r_args;
+
+	memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+	h2r_args.arg[0] = dev->fw_size;
+	return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_SYS_INIT, &h2r_args);
+}
+
+/* Suspend the MFC hardware */
+int s5p_mfc_sleep_cmd(struct s5p_mfc_dev *dev)
+{
+	struct s5p_mfc_cmd_args h2r_args;
+
+	memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+	return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_SLEEP, &h2r_args);
+}
+
+/* Wake up the MFC hardware */
+int s5p_mfc_wakeup_cmd(struct s5p_mfc_dev *dev)
+{
+	struct s5p_mfc_cmd_args h2r_args;
+
+	memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+	return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_WAKEUP, &h2r_args);
+}
+
+
+int s5p_mfc_open_inst_cmd(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct s5p_mfc_cmd_args h2r_args;
+	int ret;
+
+	/* Preparing decoding - getting instance number */
+	mfc_debug(2, "Getting instance number (codec: %d)\n", ctx->codec_mode);
+	dev->curr_ctx = ctx->num;
+	memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+	h2r_args.arg[0] = ctx->codec_mode;
+	h2r_args.arg[1] = 0; /* no crc & no pixelcache */
+	h2r_args.arg[2] = ctx->ctx_ofs;
+	h2r_args.arg[3] = ctx->ctx_size;
+	ret = s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE,
+								&h2r_args);
+	if (ret) {
+		mfc_err("Failed to create a new instance\n");
+		ctx->state = MFCINST_ERROR;
+	}
+	return ret;
+}
+
+int s5p_mfc_close_inst_cmd(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct s5p_mfc_cmd_args h2r_args;
+	int ret;
+
+	if (ctx->state == MFCINST_FREE) {
+		mfc_err("Instance already returned\n");
+		ctx->state = MFCINST_ERROR;
+		return -EINVAL;
+	}
+	/* Closing decoding instance  */
+	mfc_debug(2, "Returning instance number %d\n", ctx->inst_no);
+	dev->curr_ctx = ctx->num;
+	memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+	h2r_args.arg[0] = ctx->inst_no;
+	ret = s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_CLOSE_INSTANCE,
+								&h2r_args);
+	if (ret) {
+		mfc_err("Failed to return an instance\n");
+		ctx->state = MFCINST_ERROR;
+		return -EINVAL;
+	}
+	return 0;
+}
+
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h
new file mode 100644
index 0000000..8b090d3
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h
@@ -0,0 +1,30 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_CMD_H_
+#define S5P_MFC_CMD_H_
+
+#include "s5p_mfc_common.h"
+
+#define MAX_H2R_ARG	4
+
+struct s5p_mfc_cmd_args {
+	unsigned int	arg[MAX_H2R_ARG];
+};
+
+int s5p_mfc_sys_init_cmd(struct s5p_mfc_dev *dev);
+int s5p_mfc_sleep_cmd(struct s5p_mfc_dev *dev);
+int s5p_mfc_wakeup_cmd(struct s5p_mfc_dev *dev);
+int s5p_mfc_open_inst_cmd(struct s5p_mfc_ctx *ctx);
+int s5p_mfc_close_inst_cmd(struct s5p_mfc_ctx *ctx);
+
+#endif /* S5P_MFC_CMD_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
new file mode 100644
index 0000000..bd5706a
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -0,0 +1,570 @@
+/*
+ * Samsung S5P Multi Format Codec v 5.0
+ *
+ * This file contains definitions of enums and structs used by the codec
+ * driver.
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#ifndef S5P_MFC_COMMON_H_
+#define S5P_MFC_COMMON_H_
+
+#include "regs-mfc.h"
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+
+/* Definitions related to MFC memory */
+
+/* Offset base used to differentiate between CAPTURE and OUTPUT
+*  while mmaping */
+#define DST_QUEUE_OFF_BASE      (TASK_SIZE / 2)
+
+/* Offset used by the hardware to store addresses */
+#define MFC_OFFSET_SHIFT	11
+
+#define FIRMWARE_ALIGN		0x20000		/* 128KB */
+#define MFC_H264_CTX_BUF_SIZE	0x96000		/* 600KB per H264 instance */
+#define MFC_CTX_BUF_SIZE	0x2800		/* 10KB per instance */
+#define DESC_BUF_SIZE		0x20000		/* 128KB for DESC buffer */
+#define SHARED_BUF_SIZE		0x2000		/* 8KB for shared buffer */
+
+#define DEF_CPB_SIZE		0x40000		/* 512KB */
+
+#define MFC_BANK1_ALLOC_CTX	0
+#define MFC_BANK2_ALLOC_CTX	1
+
+#define MFC_BANK1_ALIGN_ORDER	13
+#define MFC_BANK2_ALIGN_ORDER	13
+#define MFC_BASE_ALIGN_ORDER	17
+
+#include <media/videobuf2-dma-contig.h>
+
+static inline dma_addr_t s5p_mfc_mem_cookie(void *a, void *b)
+{
+	/* Same functionality as the vb2_dma_contig_plane_paddr */
+	dma_addr_t *paddr = vb2_dma_contig_memops.cookie(b);
+
+	return *paddr;
+}
+
+/* MFC definitions */
+#define MFC_MAX_EXTRA_DPB       5
+#define MFC_MAX_BUFFERS		32
+#define MFC_NUM_CONTEXTS	4
+/* Interrupt timeout */
+#define MFC_INT_TIMEOUT		2000
+/* Busy wait timeout */
+#define MFC_BW_TIMEOUT		500
+/* Watchdog interval */
+#define MFC_WATCHDOG_INTERVAL   1000
+/* After how many executions watchdog should assume lock up */
+#define MFC_WATCHDOG_CNT        10
+#define MFC_NO_INSTANCE_SET	-1
+#define MFC_ENC_CAP_PLANE_COUNT	1
+#define MFC_ENC_OUT_PLANE_COUNT	2
+#define STUFF_BYTE		4
+#define MFC_MAX_CTRLS		64
+
+#define mfc_read(dev, offset)		readl(dev->regs_base + (offset))
+#define mfc_write(dev, data, offset)	writel((data), dev->regs_base + \
+								(offset))
+
+/**
+ * enum s5p_mfc_fmt_type - type of the pixelformat
+ */
+enum s5p_mfc_fmt_type {
+	MFC_FMT_DEC,
+	MFC_FMT_ENC,
+	MFC_FMT_RAW,
+};
+
+/**
+ * enum s5p_mfc_node_type - The type of an MFC device node.
+ */
+enum s5p_mfc_node_type {
+	MFCNODE_INVALID = -1,
+	MFCNODE_DECODER = 0,
+	MFCNODE_ENCODER = 1,
+};
+
+/**
+ * enum s5p_mfc_inst_type - The type of an MFC instance.
+ */
+enum s5p_mfc_inst_type {
+	MFCINST_INVALID,
+	MFCINST_DECODER,
+	MFCINST_ENCODER,
+};
+
+/**
+ * enum s5p_mfc_inst_state - The state of an MFC instance.
+ */
+enum s5p_mfc_inst_state {
+	MFCINST_FREE = 0,
+	MFCINST_INIT = 100,
+	MFCINST_GOT_INST,
+	MFCINST_HEAD_PARSED,
+	MFCINST_BUFS_SET,
+	MFCINST_RUNNING,
+	MFCINST_FINISHING,
+	MFCINST_FINISHED,
+	MFCINST_RETURN_INST,
+	MFCINST_ERROR,
+	MFCINST_ABORT,
+	MFCINST_RES_CHANGE_INIT,
+	MFCINST_RES_CHANGE_FLUSH,
+	MFCINST_RES_CHANGE_END,
+};
+
+/**
+ * enum s5p_mfc_queue_state - The state of buffer queue.
+ */
+enum s5p_mfc_queue_state {
+	QUEUE_FREE,
+	QUEUE_BUFS_REQUESTED,
+	QUEUE_BUFS_QUERIED,
+	QUEUE_BUFS_MMAPED,
+};
+
+/**
+ * enum s5p_mfc_decode_arg - type of frame decoding
+ */
+enum s5p_mfc_decode_arg {
+	MFC_DEC_FRAME,
+	MFC_DEC_LAST_FRAME,
+	MFC_DEC_RES_CHANGE,
+};
+
+struct s5p_mfc_ctx;
+
+/**
+ * struct s5p_mfc_buf - MFC buffer
+ */
+struct s5p_mfc_buf {
+	struct list_head list;
+	struct vb2_buffer *b;
+	union {
+		struct {
+			size_t luma;
+			size_t chroma;
+		} raw;
+		size_t stream;
+	} cookie;
+	int used;
+};
+
+/**
+ * struct s5p_mfc_pm - power management data structure
+ */
+struct s5p_mfc_pm {
+	struct clk	*clock;
+	struct clk	*clock_gate;
+	atomic_t	power;
+	struct device	*device;
+};
+
+/**
+ * struct s5p_mfc_dev - The struct containing driver internal parameters.
+ *
+ * @v4l2_dev:		v4l2_device
+ * @vfd_dec:		video device for decoding
+ * @vfd_enc:		video device for encoding
+ * @plat_dev:		platform device
+ * @mem_dev_l:		child device of the left memory bank (0)
+ * @mem_dev_r:		child device of the right memory bank (1)
+ * @regs_base:		base address of the MFC hw registers
+ * @irq:		irq resource
+ * @dec_ctrl_handler:	control framework handler for decoding
+ * @enc_ctrl_handler:	control framework handler for encoding
+ * @pm:			power management control
+ * @num_inst:		couter of active MFC instances
+ * @irqlock:		lock for operations on videobuf2 queues
+ * @condlock:		lock for changing/checking if a context is ready to be
+ *			processed
+ * @mfc_mutex:		lock for video_device
+ * @int_cond:		variable used by the waitqueue
+ * @int_type:		type of last interrupt
+ * @int_err:		error number for last interrupt
+ * @queue:		waitqueue for waiting for completion of device commands
+ * @fw_size:		size of firmware
+ * @bank1:		address of the beggining of bank 1 memory
+ * @bank2:		address of the beggining of bank 2 memory
+ * @hw_lock:		used for hardware locking
+ * @ctx:		array of driver contexts
+ * @curr_ctx:		number of the currently running context
+ * @ctx_work_bits:	used to mark which contexts are waiting for hardware
+ * @watchdog_cnt:	counter for the watchdog
+ * @watchdog_workqueue:	workqueue for the watchdog
+ * @watchdog_work:	worker for the watchdog
+ * @alloc_ctx:		videobuf2 allocator contexts for two memory banks
+ * @enter_suspend:	flag set when entering suspend
+ *
+ */
+struct s5p_mfc_dev {
+	struct v4l2_device	v4l2_dev;
+	struct video_device	*vfd_dec;
+	struct video_device	*vfd_enc;
+	struct platform_device	*plat_dev;
+	struct device		*mem_dev_l;
+	struct device		*mem_dev_r;
+	void __iomem		*regs_base;
+	int			irq;
+	struct v4l2_ctrl_handler dec_ctrl_handler;
+	struct v4l2_ctrl_handler enc_ctrl_handler;
+	struct s5p_mfc_pm	pm;
+	int num_inst;
+	spinlock_t irqlock;	/* lock when operating on videobuf2 queues */
+	spinlock_t condlock;	/* lock when changing/checking if a context is
+					ready to be processed */
+	struct mutex mfc_mutex; /* video_device lock */
+	int int_cond;
+	int int_type;
+	unsigned int int_err;
+	wait_queue_head_t queue;
+	size_t fw_size;
+	size_t bank1;
+	size_t bank2;
+	unsigned long hw_lock;
+	struct s5p_mfc_ctx *ctx[MFC_NUM_CONTEXTS];
+	int curr_ctx;
+	unsigned long ctx_work_bits;
+	atomic_t watchdog_cnt;
+	struct timer_list watchdog_timer;
+	struct workqueue_struct *watchdog_workqueue;
+	struct work_struct watchdog_work;
+	void *alloc_ctx[2];
+	unsigned long enter_suspend;
+};
+
+/**
+ * struct s5p_mfc_h264_enc_params - encoding parameters for h264
+ */
+struct s5p_mfc_h264_enc_params {
+	enum v4l2_mpeg_video_h264_profile profile;
+	enum v4l2_mpeg_video_h264_loop_filter_mode loop_filter_mode;
+	s8 loop_filter_alpha;
+	s8 loop_filter_beta;
+	enum v4l2_mpeg_video_h264_entropy_mode entropy_mode;
+	u8 max_ref_pic;
+	u8 num_ref_pic_4p;
+	int _8x8_transform;
+	int rc_mb;
+	int rc_mb_dark;
+	int rc_mb_smooth;
+	int rc_mb_static;
+	int rc_mb_activity;
+	int vui_sar;
+	u8 vui_sar_idc;
+	u16 vui_ext_sar_width;
+	u16 vui_ext_sar_height;
+	int open_gop;
+	u16 open_gop_size;
+	u8 rc_frame_qp;
+	u8 rc_min_qp;
+	u8 rc_max_qp;
+	u8 rc_p_frame_qp;
+	u8 rc_b_frame_qp;
+	enum v4l2_mpeg_video_h264_level level_v4l2;
+	int level;
+	u16 cpb_size;
+};
+
+/**
+ * struct s5p_mfc_mpeg4_enc_params - encoding parameters for h263 and mpeg4
+ */
+struct s5p_mfc_mpeg4_enc_params {
+	/* MPEG4 Only */
+	enum v4l2_mpeg_video_mpeg4_profile profile;
+	int quarter_pixel;
+	/* Common for MPEG4, H263 */
+	u16 vop_time_res;
+	u16 vop_frm_delta;
+	u8 rc_frame_qp;
+	u8 rc_min_qp;
+	u8 rc_max_qp;
+	u8 rc_p_frame_qp;
+	u8 rc_b_frame_qp;
+	enum v4l2_mpeg_video_mpeg4_level level_v4l2;
+	int level;
+};
+
+/**
+ * struct s5p_mfc_enc_params - general encoding parameters
+ */
+struct s5p_mfc_enc_params {
+	u16 width;
+	u16 height;
+
+	u16 gop_size;
+	enum v4l2_mpeg_video_multi_slice_mode slice_mode;
+	u16 slice_mb;
+	u32 slice_bit;
+	u16 intra_refresh_mb;
+	int pad;
+	u8 pad_luma;
+	u8 pad_cb;
+	u8 pad_cr;
+	int rc_frame;
+	u32 rc_bitrate;
+	u16 rc_reaction_coeff;
+	u16 vbv_size;
+
+	enum v4l2_mpeg_video_header_mode seq_hdr_mode;
+	enum v4l2_mpeg_mfc51_video_frame_skip_mode frame_skip_mode;
+	int fixed_target_bit;
+
+	u8 num_b_frame;
+	u32 rc_framerate_num;
+	u32 rc_framerate_denom;
+	int interlace;
+
+	union {
+		struct s5p_mfc_h264_enc_params h264;
+		struct s5p_mfc_mpeg4_enc_params mpeg4;
+	} codec;
+
+};
+
+/**
+ * struct s5p_mfc_codec_ops - codec ops, used by encoding
+ */
+struct s5p_mfc_codec_ops {
+	/* initialization routines */
+	int (*pre_seq_start) (struct s5p_mfc_ctx *ctx);
+	int (*post_seq_start) (struct s5p_mfc_ctx *ctx);
+	/* execution routines */
+	int (*pre_frame_start) (struct s5p_mfc_ctx *ctx);
+	int (*post_frame_start) (struct s5p_mfc_ctx *ctx);
+};
+
+#define call_cop(c, op, args...)				\
+	(((c)->c_ops->op) ?					\
+		((c)->c_ops->op(args)) : 0)
+
+/**
+ * struct s5p_mfc_ctx - This struct contains the instance context
+ *
+ * @dev:		pointer to the s5p_mfc_dev of the device
+ * @fh:			struct v4l2_fh
+ * @num:		number of the context that this structure describes
+ * @int_cond:		variable used by the waitqueue
+ * @int_type:		type of the last interrupt
+ * @int_err:		error number received from MFC hw in the interrupt
+ * @queue:		waitqueue that can be used to wait for this context to
+ *			finish
+ * @src_fmt:		source pixelformat information
+ * @dst_fmt:		destination pixelformat information
+ * @vq_src:		vb2 queue for source buffers
+ * @vq_dst:		vb2 queue for destination buffers
+ * @src_queue:		driver internal queue for source buffers
+ * @dst_queue:		driver internal queue for destination buffers
+ * @src_queue_cnt:	number of buffers queued on the source internal queue
+ * @dst_queue_cnt:	number of buffers queued on the dest internal queue
+ * @type:		type of the instance - decoder or encoder
+ * @state:		state of the context
+ * @inst_no:		number of hw instance associated with the context
+ * @img_width:		width of the image that is decoded or encoded
+ * @img_height:		height of the image that is decoded or encoded
+ * @buf_width:		width of the buffer for processed image
+ * @buf_height:		height of the buffer for processed image
+ * @luma_size:		size of a luma plane
+ * @chroma_size:	size of a chroma plane
+ * @mv_size:		size of a motion vectors buffer
+ * @consumed_stream:	number of bytes that have been used so far from the
+ *			decoding buffer
+ * @dpb_flush_flag:	flag used to indicate that a DPB buffers are being
+ *			flushed
+ * @bank1_buf:		handle to memory allocated for temporary buffers from
+ *			memory bank 1
+ * @bank1_phys:		address of the temporary buffers from memory bank 1
+ * @bank1_size:		size of the memory allocated for temporary buffers from
+ *			memory bank 1
+ * @bank2_buf:		handle to memory allocated for temporary buffers from
+ *			memory bank 2
+ * @bank2_phys:		address of the temporary buffers from memory bank 2
+ * @bank2_size:		size of the memory allocated for temporary buffers from
+ *			memory bank 2
+ * @capture_state:	state of the capture buffers queue
+ * @output_state:	state of the output buffers queue
+ * @src_bufs:		information on allocated source buffers
+ * @dst_bufs:		information on allocated destination buffers
+ * @sequence:		counter for the sequence number for v4l2
+ * @dec_dst_flag:	flags for buffers queued in the hardware
+ * @dec_src_buf_size:	size of the buffer for source buffers in decoding
+ * @codec_mode:		number of codec mode used by MFC hw
+ * @slice_interface:	slice interface flag
+ * @loop_filter_mpeg4:	loop filter for MPEG4 flag
+ * @display_delay:	value of the display delay for H264
+ * @display_delay_enable:	display delay for H264 enable flag
+ * @after_packed_pb:	flag used to track buffer when stream is in
+ *			Packed PB format
+ * @dpb_count:		count of the DPB buffers required by MFC hw
+ * @total_dpb_count:	count of DPB buffers with additional buffers
+ *			requested by the application
+ * @ctx_buf:		handle to the memory associated with this context
+ * @ctx_phys:		address of the memory associated with this context
+ * @ctx_size:		size of the memory associated with this context
+ * @desc_buf:		description buffer for decoding handle
+ * @desc_phys:		description buffer for decoding address
+ * @shm_alloc:		handle for the shared memory buffer
+ * @shm:		virtual address for the shared memory buffer
+ * @shm_ofs:		address offset for shared memory
+ * @enc_params:		encoding parameters for MFC
+ * @enc_dst_buf_size:	size of the buffers for encoder output
+ * @frame_type:		used to force the type of the next encoded frame
+ * @ref_queue:		list of the reference buffers for encoding
+ * @ref_queue_cnt:	number of the buffers in the reference list
+ * @c_ops:		ops for encoding
+ * @ctrls:		array of controls, used when adding controls to the
+ *			v4l2 control framework
+ * @ctrl_handler:	handler for v4l2 framework
+ */
+struct s5p_mfc_ctx {
+	struct s5p_mfc_dev *dev;
+	struct v4l2_fh fh;
+
+	int num;
+
+	int int_cond;
+	int int_type;
+	unsigned int int_err;
+	wait_queue_head_t queue;
+
+	struct s5p_mfc_fmt *src_fmt;
+	struct s5p_mfc_fmt *dst_fmt;
+
+	struct vb2_queue vq_src;
+	struct vb2_queue vq_dst;
+
+	struct list_head src_queue;
+	struct list_head dst_queue;
+
+	unsigned int src_queue_cnt;
+	unsigned int dst_queue_cnt;
+
+	enum s5p_mfc_inst_type type;
+	enum s5p_mfc_inst_state state;
+	int inst_no;
+
+	/* Image parameters */
+	int img_width;
+	int img_height;
+	int buf_width;
+	int buf_height;
+
+	int luma_size;
+	int chroma_size;
+	int mv_size;
+
+	unsigned long consumed_stream;
+
+	unsigned int dpb_flush_flag;
+
+	/* Buffers */
+	void *bank1_buf;
+	size_t bank1_phys;
+	size_t bank1_size;
+
+	void *bank2_buf;
+	size_t bank2_phys;
+	size_t bank2_size;
+
+	enum s5p_mfc_queue_state capture_state;
+	enum s5p_mfc_queue_state output_state;
+
+	struct s5p_mfc_buf src_bufs[MFC_MAX_BUFFERS];
+	int src_bufs_cnt;
+	struct s5p_mfc_buf dst_bufs[MFC_MAX_BUFFERS];
+	int dst_bufs_cnt;
+
+	unsigned int sequence;
+	unsigned long dec_dst_flag;
+	size_t dec_src_buf_size;
+
+	/* Control values */
+	int codec_mode;
+	int slice_interface;
+	int loop_filter_mpeg4;
+	int display_delay;
+	int display_delay_enable;
+	int after_packed_pb;
+
+	int dpb_count;
+	int total_dpb_count;
+
+	/* Buffers */
+	void *ctx_buf;
+	size_t ctx_phys;
+	size_t ctx_ofs;
+	size_t ctx_size;
+
+	void *desc_buf;
+	size_t desc_phys;
+
+
+	void *shm_alloc;
+	void *shm;
+	size_t shm_ofs;
+
+	struct s5p_mfc_enc_params enc_params;
+
+	size_t enc_dst_buf_size;
+
+	enum v4l2_mpeg_mfc51_video_force_frame_type force_frame_type;
+
+	struct list_head ref_queue;
+	unsigned int ref_queue_cnt;
+
+	struct s5p_mfc_codec_ops *c_ops;
+
+	struct v4l2_ctrl *ctrls[MFC_MAX_CTRLS];
+	struct v4l2_ctrl_handler ctrl_handler;
+};
+
+/*
+ * struct s5p_mfc_fmt -	structure used to store information about pixelformats
+ *			used by the MFC
+ */
+struct s5p_mfc_fmt {
+	char *name;
+	u32 fourcc;
+	u32 codec_mode;
+	enum s5p_mfc_fmt_type type;
+	u32 num_planes;
+};
+
+/**
+ * struct mfc_control -	structure used to store information about MFC controls
+ *			it is used to initialize the control framework.
+ */
+struct mfc_control {
+	__u32			id;
+	enum v4l2_ctrl_type	type;
+	__u8			name[32];  /* Whatever */
+	__s32			minimum;   /* Note signedness */
+	__s32			maximum;
+	__s32			step;
+	__u32			menu_skip_mask;
+	__s32			default_value;
+	__u32			flags;
+	__u32			reserved[2];
+	__u8			is_volatile;
+};
+
+
+#define fh_to_ctx(__fh) container_of(__fh, struct s5p_mfc_ctx, fh)
+#define ctrl_to_ctx(__ctrl) \
+	container_of((__ctrl)->handler, struct s5p_mfc_ctx, ctrl_handler)
+
+#endif /* S5P_MFC_COMMON_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
new file mode 100644
index 0000000..4d662f1
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -0,0 +1,343 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_pm.h"
+
+static void *s5p_mfc_bitproc_buf;
+static size_t s5p_mfc_bitproc_phys;
+static unsigned char *s5p_mfc_bitproc_virt;
+
+/* Allocate and load firmware */
+int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
+{
+	struct firmware *fw_blob;
+	size_t bank2_base_phys;
+	void *b_base;
+	int err;
+
+	/* Firmare has to be present as a separate file or compiled
+	 * into kernel. */
+	mfc_debug_enter();
+	err = request_firmware((const struct firmware **)&fw_blob,
+				     "s5p-mfc.fw", dev->v4l2_dev.dev);
+	if (err != 0) {
+		mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
+		return -EINVAL;
+	}
+	dev->fw_size = ALIGN(fw_blob->size, FIRMWARE_ALIGN);
+	if (s5p_mfc_bitproc_buf) {
+		mfc_err("Attempting to allocate firmware when it seems that it is already loaded\n");
+		release_firmware(fw_blob);
+		return -ENOMEM;
+	}
+	s5p_mfc_bitproc_buf = vb2_dma_contig_memops.alloc(
+		dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], dev->fw_size);
+	if (IS_ERR(s5p_mfc_bitproc_buf)) {
+		s5p_mfc_bitproc_buf = NULL;
+		mfc_err("Allocating bitprocessor buffer failed\n");
+		release_firmware(fw_blob);
+		return -ENOMEM;
+	}
+	s5p_mfc_bitproc_phys = s5p_mfc_mem_cookie(
+		dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], s5p_mfc_bitproc_buf);
+	if (s5p_mfc_bitproc_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
+		mfc_err("The base memory for bank 1 is not aligned to 128KB\n");
+		vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+		s5p_mfc_bitproc_phys = 0;
+		s5p_mfc_bitproc_buf = NULL;
+		release_firmware(fw_blob);
+		return -EIO;
+	}
+	s5p_mfc_bitproc_virt = vb2_dma_contig_memops.vaddr(s5p_mfc_bitproc_buf);
+	if (!s5p_mfc_bitproc_virt) {
+		mfc_err("Bitprocessor memory remap failed\n");
+		vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+		s5p_mfc_bitproc_phys = 0;
+		s5p_mfc_bitproc_buf = NULL;
+		release_firmware(fw_blob);
+		return -EIO;
+	}
+	dev->bank1 = s5p_mfc_bitproc_phys;
+	b_base = vb2_dma_contig_memops.alloc(
+		dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], 1 << MFC_BANK2_ALIGN_ORDER);
+	if (IS_ERR(b_base)) {
+		vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+		s5p_mfc_bitproc_phys = 0;
+		s5p_mfc_bitproc_buf = NULL;
+		mfc_err("Allocating bank2 base failed\n");
+	release_firmware(fw_blob);
+		return -ENOMEM;
+	}
+	bank2_base_phys = s5p_mfc_mem_cookie(
+		dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], b_base);
+	vb2_dma_contig_memops.put(b_base);
+	if (bank2_base_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
+		mfc_err("The base memory for bank 2 is not aligned to 128KB\n");
+		vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+		s5p_mfc_bitproc_phys = 0;
+		s5p_mfc_bitproc_buf = NULL;
+		release_firmware(fw_blob);
+		return -EIO;
+	}
+	dev->bank2 = bank2_base_phys;
+	memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
+	wmb();
+	release_firmware(fw_blob);
+	mfc_debug_leave();
+	return 0;
+}
+
+/* Reload firmware to MFC */
+int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev)
+{
+	struct firmware *fw_blob;
+	int err;
+
+	/* Firmare has to be present as a separate file or compiled
+	 * into kernel. */
+	mfc_debug_enter();
+	err = request_firmware((const struct firmware **)&fw_blob,
+				     "s5p-mfc.fw", dev->v4l2_dev.dev);
+	if (err != 0) {
+		mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
+		return -EINVAL;
+	}
+	if (fw_blob->size > dev->fw_size) {
+		mfc_err("MFC firmware is too big to be loaded\n");
+		release_firmware(fw_blob);
+		return -ENOMEM;
+	}
+	if (s5p_mfc_bitproc_buf == NULL || s5p_mfc_bitproc_phys == 0) {
+		mfc_err("MFC firmware is not allocated or was not mapped correctly\n");
+		release_firmware(fw_blob);
+		return -EINVAL;
+	}
+	memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
+	wmb();
+	release_firmware(fw_blob);
+	mfc_debug_leave();
+	return 0;
+}
+
+/* Release firmware memory */
+int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev)
+{
+	/* Before calling this function one has to make sure
+	 * that MFC is no longer processing */
+	if (!s5p_mfc_bitproc_buf)
+		return -EINVAL;
+	vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+	s5p_mfc_bitproc_virt = NULL;
+	s5p_mfc_bitproc_phys = 0;
+	s5p_mfc_bitproc_buf = NULL;
+	return 0;
+}
+
+/* Reset the device */
+int s5p_mfc_reset(struct s5p_mfc_dev *dev)
+{
+	unsigned int mc_status;
+	unsigned long timeout;
+
+	mfc_debug_enter();
+	/* Stop procedure */
+	/*  reset RISC */
+	mfc_write(dev, 0x3f6, S5P_FIMV_SW_RESET);
+	/*  All reset except for MC */
+	mfc_write(dev, 0x3e2, S5P_FIMV_SW_RESET);
+	mdelay(10);
+
+	timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
+	/* Check MC status */
+	do {
+		if (time_after(jiffies, timeout)) {
+			mfc_err("Timeout while resetting MFC\n");
+			return -EIO;
+		}
+
+		mc_status = mfc_read(dev, S5P_FIMV_MC_STATUS);
+
+	} while (mc_status & 0x3);
+
+	mfc_write(dev, 0x0, S5P_FIMV_SW_RESET);
+	mfc_write(dev, 0x3fe, S5P_FIMV_SW_RESET);
+	mfc_debug_leave();
+	return 0;
+}
+
+static inline void s5p_mfc_init_memctrl(struct s5p_mfc_dev *dev)
+{
+	mfc_write(dev, dev->bank1, S5P_FIMV_MC_DRAMBASE_ADR_A);
+	mfc_write(dev, dev->bank2, S5P_FIMV_MC_DRAMBASE_ADR_B);
+	mfc_debug(2, "Bank1: %08x, Bank2: %08x\n", dev->bank1, dev->bank2);
+}
+
+static inline void s5p_mfc_clear_cmds(struct s5p_mfc_dev *dev)
+{
+	mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH0_INST_ID);
+	mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH1_INST_ID);
+	mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
+	mfc_write(dev, 0, S5P_FIMV_HOST2RISC_CMD);
+}
+
+/* Initialize hardware */
+int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
+{
+	unsigned int ver;
+	int ret;
+
+	mfc_debug_enter();
+	if (!s5p_mfc_bitproc_buf)
+		return -EINVAL;
+
+	/* 0. MFC reset */
+	mfc_debug(2, "MFC reset..\n");
+	s5p_mfc_clock_on();
+	ret = s5p_mfc_reset(dev);
+	if (ret) {
+		mfc_err("Failed to reset MFC - timeout\n");
+		return ret;
+	}
+	mfc_debug(2, "Done MFC reset..\n");
+	/* 1. Set DRAM base Addr */
+	s5p_mfc_init_memctrl(dev);
+	/* 2. Initialize registers of channel I/F */
+	s5p_mfc_clear_cmds(dev);
+	/* 3. Release reset signal to the RISC */
+	s5p_mfc_clean_dev_int_flags(dev);
+	mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
+	mfc_debug(2, "Will now wait for completion of firmware transfer\n");
+	if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_FW_STATUS_RET)) {
+		mfc_err("Failed to load firmware\n");
+		s5p_mfc_reset(dev);
+		s5p_mfc_clock_off();
+		return -EIO;
+	}
+	s5p_mfc_clean_dev_int_flags(dev);
+	/* 4. Initialize firmware */
+	ret = s5p_mfc_sys_init_cmd(dev);
+	if (ret) {
+		mfc_err("Failed to send command to MFC - timeout\n");
+		s5p_mfc_reset(dev);
+		s5p_mfc_clock_off();
+		return ret;
+	}
+	mfc_debug(2, "Ok, now will write a command to init the system\n");
+	if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_SYS_INIT_RET)) {
+		mfc_err("Failed to load firmware\n");
+		s5p_mfc_reset(dev);
+		s5p_mfc_clock_off();
+		return -EIO;
+	}
+	dev->int_cond = 0;
+	if (dev->int_err != 0 || dev->int_type !=
+					S5P_FIMV_R2H_CMD_SYS_INIT_RET) {
+		/* Failure. */
+		mfc_err("Failed to init firmware - error: %d int: %d\n",
+						dev->int_err, dev->int_type);
+		s5p_mfc_reset(dev);
+		s5p_mfc_clock_off();
+		return -EIO;
+	}
+	ver = mfc_read(dev, S5P_FIMV_FW_VERSION);
+	mfc_debug(2, "MFC F/W version : %02xyy, %02xmm, %02xdd\n",
+		(ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
+	s5p_mfc_clock_off();
+	mfc_debug_leave();
+	return 0;
+}
+
+
+int s5p_mfc_sleep(struct s5p_mfc_dev *dev)
+{
+	int ret;
+
+	mfc_debug_enter();
+	s5p_mfc_clock_on();
+	s5p_mfc_clean_dev_int_flags(dev);
+	ret = s5p_mfc_sleep_cmd(dev);
+	if (ret) {
+		mfc_err("Failed to send command to MFC - timeout\n");
+		return ret;
+	}
+	if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_SLEEP_RET)) {
+		mfc_err("Failed to sleep\n");
+		return -EIO;
+	}
+	s5p_mfc_clock_off();
+	dev->int_cond = 0;
+	if (dev->int_err != 0 || dev->int_type !=
+						S5P_FIMV_R2H_CMD_SLEEP_RET) {
+		/* Failure. */
+		mfc_err("Failed to sleep - error: %d int: %d\n", dev->int_err,
+								dev->int_type);
+		return -EIO;
+	}
+	mfc_debug_leave();
+	return ret;
+}
+
+int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
+{
+	int ret;
+
+	mfc_debug_enter();
+	/* 0. MFC reset */
+	mfc_debug(2, "MFC reset..\n");
+	s5p_mfc_clock_on();
+	ret = s5p_mfc_reset(dev);
+	if (ret) {
+		mfc_err("Failed to reset MFC - timeout\n");
+		return ret;
+	}
+	mfc_debug(2, "Done MFC reset..\n");
+	/* 1. Set DRAM base Addr */
+	s5p_mfc_init_memctrl(dev);
+	/* 2. Initialize registers of channel I/F */
+	s5p_mfc_clear_cmds(dev);
+	s5p_mfc_clean_dev_int_flags(dev);
+	/* 3. Initialize firmware */
+	ret = s5p_mfc_wakeup_cmd(dev);
+	if (ret) {
+		mfc_err("Failed to send command to MFC - timeout\n");
+		return ret;
+	}
+	/* 4. Release reset signal to the RISC */
+	mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
+	mfc_debug(2, "Ok, now will write a command to wakeup the system\n");
+	if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_WAKEUP_RET)) {
+		mfc_err("Failed to load firmware\n");
+		return -EIO;
+	}
+	s5p_mfc_clock_off();
+	dev->int_cond = 0;
+	if (dev->int_err != 0 || dev->int_type !=
+						S5P_FIMV_R2H_CMD_WAKEUP_RET) {
+		/* Failure. */
+		mfc_err("Failed to wakeup - error: %d int: %d\n", dev->int_err,
+								dev->int_type);
+		return -EIO;
+	}
+	mfc_debug_leave();
+	return 0;
+}
+
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
new file mode 100644
index 0000000..e1e0c54
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
@@ -0,0 +1,29 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_CTRL_H
+#define S5P_MFC_CTRL_H
+
+#include "s5p_mfc_common.h"
+
+int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev);
+int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev);
+int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_init_hw(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_sleep(struct s5p_mfc_dev *dev);
+int s5p_mfc_wakeup(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_reset(struct s5p_mfc_dev *dev);
+
+#endif /* S5P_MFC_CTRL_H */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
new file mode 100644
index 0000000..bd5cd4a
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
@@ -0,0 +1,48 @@
+/*
+ * drivers/media/platform/samsung/mfc5/s5p_mfc_debug.h
+ *
+ * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * This file contains debug macros
+ *
+ * Kamil Debski, Copyright (c) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S5P_MFC_DEBUG_H_
+#define S5P_MFC_DEBUG_H_
+
+#define DEBUG
+
+#ifdef DEBUG
+extern int debug;
+
+#define mfc_debug(level, fmt, args...)				\
+	do {							\
+		if (debug >= level)				\
+			printk(KERN_DEBUG "%s:%d: " fmt,	\
+				__func__, __LINE__, ##args);	\
+	} while (0)
+#else
+#define mfc_debug(level, fmt, args...)
+#endif
+
+#define mfc_debug_enter() mfc_debug(5, "enter")
+#define mfc_debug_leave() mfc_debug(5, "leave")
+
+#define mfc_err(fmt, args...)				\
+	do {						\
+		printk(KERN_ERR "%s:%d: " fmt,		\
+		       __func__, __LINE__, ##args);	\
+	} while (0)
+
+#define mfc_info(fmt, args...)				\
+	do {						\
+		printk(KERN_INFO "%s:%d: " fmt,		\
+		       __func__, __LINE__, ##args);	\
+	} while (0)
+
+#endif /* S5P_MFC_DEBUG_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
new file mode 100644
index 0000000..456f5df
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -0,0 +1,1044 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_dec.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_pm.h"
+#include "s5p_mfc_shm.h"
+
+static struct s5p_mfc_fmt formats[] = {
+	{
+		.name		= "4:2:0 2 Planes 64x32 Tiles",
+		.fourcc		= V4L2_PIX_FMT_NV12MT,
+		.codec_mode	= S5P_FIMV_CODEC_NONE,
+		.type		= MFC_FMT_RAW,
+		.num_planes	= 2,
+	 },
+	{
+		.name = "4:2:0 2 Planes",
+		.fourcc = V4L2_PIX_FMT_NV12M,
+		.codec_mode = S5P_FIMV_CODEC_NONE,
+		.type = MFC_FMT_RAW,
+		.num_planes = 2,
+	},
+	{
+		.name = "H264 Encoded Stream",
+		.fourcc = V4L2_PIX_FMT_H264,
+		.codec_mode = S5P_FIMV_CODEC_H264_DEC,
+		.type = MFC_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.name = "H263 Encoded Stream",
+		.fourcc = V4L2_PIX_FMT_H263,
+		.codec_mode = S5P_FIMV_CODEC_H263_DEC,
+		.type = MFC_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.name = "MPEG1 Encoded Stream",
+		.fourcc = V4L2_PIX_FMT_MPEG1,
+		.codec_mode = S5P_FIMV_CODEC_MPEG2_DEC,
+		.type = MFC_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.name = "MPEG2 Encoded Stream",
+		.fourcc = V4L2_PIX_FMT_MPEG2,
+		.codec_mode = S5P_FIMV_CODEC_MPEG2_DEC,
+		.type = MFC_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.name = "MPEG4 Encoded Stream",
+		.fourcc = V4L2_PIX_FMT_MPEG4,
+		.codec_mode = S5P_FIMV_CODEC_MPEG4_DEC,
+		.type = MFC_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.name = "XviD Encoded Stream",
+		.fourcc = V4L2_PIX_FMT_XVID,
+		.codec_mode = S5P_FIMV_CODEC_MPEG4_DEC,
+		.type = MFC_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.name = "VC1 Encoded Stream",
+		.fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
+		.codec_mode = S5P_FIMV_CODEC_VC1_DEC,
+		.type = MFC_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.name = "VC1 RCV Encoded Stream",
+		.fourcc = V4L2_PIX_FMT_VC1_ANNEX_L,
+		.codec_mode = S5P_FIMV_CODEC_VC1RCV_DEC,
+		.type = MFC_FMT_DEC,
+		.num_planes = 1,
+	},
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+/* Find selected format description */
+static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
+{
+	unsigned int i;
+
+	for (i = 0; i < NUM_FORMATS; i++) {
+		if (formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
+		    formats[i].type == t)
+			return &formats[i];
+	}
+	return NULL;
+}
+
+static struct mfc_control controls[] = {
+	{
+		.id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "H264 Display Delay",
+		.minimum = 0,
+		.maximum = 16383,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.name = "H264 Display Delay Enable",
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.name = "Mpeg4 Loop Filter Enable",
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.name = "Slice Interface Enable",
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Minimum number of cap bufs",
+		.minimum = 1,
+		.maximum = 32,
+		.step = 1,
+		.default_value = 1,
+		.is_volatile = 1,
+	},
+};
+
+#define NUM_CTRLS ARRAY_SIZE(controls)
+
+/* Check whether a context should be run on hardware */
+static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
+{
+	/* Context is to parse header */
+	if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_GOT_INST)
+		return 1;
+	/* Context is to decode a frame */
+	if (ctx->src_queue_cnt >= 1 &&
+	    ctx->state == MFCINST_RUNNING &&
+	    ctx->dst_queue_cnt >= ctx->dpb_count)
+		return 1;
+	/* Context is to return last frame */
+	if (ctx->state == MFCINST_FINISHING &&
+	    ctx->dst_queue_cnt >= ctx->dpb_count)
+		return 1;
+	/* Context is to set buffers */
+	if (ctx->src_queue_cnt >= 1 &&
+	    ctx->state == MFCINST_HEAD_PARSED &&
+	    ctx->capture_state == QUEUE_BUFS_MMAPED)
+		return 1;
+	/* Resolution change */
+	if ((ctx->state == MFCINST_RES_CHANGE_INIT ||
+		ctx->state == MFCINST_RES_CHANGE_FLUSH) &&
+		ctx->dst_queue_cnt >= ctx->dpb_count)
+		return 1;
+	if (ctx->state == MFCINST_RES_CHANGE_END &&
+		ctx->src_queue_cnt >= 1)
+		return 1;
+	mfc_debug(2, "ctx is not ready\n");
+	return 0;
+}
+
+static struct s5p_mfc_codec_ops decoder_codec_ops = {
+	.pre_seq_start		= NULL,
+	.post_seq_start		= NULL,
+	.pre_frame_start	= NULL,
+	.post_frame_start	= NULL,
+};
+
+/* Query capabilities of the device */
+static int vidioc_querycap(struct file *file, void *priv,
+			   struct v4l2_capability *cap)
+{
+	struct s5p_mfc_dev *dev = video_drvdata(file);
+
+	strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
+	strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
+	cap->bus_info[0] = 0;
+	cap->version = KERNEL_VERSION(1, 0, 0);
+	/*
+	 * This is only a mem-to-mem video device. The capture and output
+	 * device capability flags are left only for backward compatibility
+	 * and are scheduled for removal.
+	 */
+	cap->capabilities = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING |
+			    V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+			    V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+	return 0;
+}
+
+/* Enumerate format */
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool mplane, bool out)
+{
+	struct s5p_mfc_fmt *fmt;
+	int i, j = 0;
+
+	for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+		if (mplane && formats[i].num_planes == 1)
+			continue;
+		else if (!mplane && formats[i].num_planes > 1)
+			continue;
+		if (out && formats[i].type != MFC_FMT_DEC)
+			continue;
+		else if (!out && formats[i].type != MFC_FMT_RAW)
+			continue;
+
+		if (j == f->index)
+			break;
+		++j;
+	}
+	if (i == ARRAY_SIZE(formats))
+		return -EINVAL;
+	fmt = &formats[i];
+	strlcpy(f->description, fmt->name, sizeof(f->description));
+	f->pixelformat = fmt->fourcc;
+	return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
+							struct v4l2_fmtdesc *f)
+{
+	return vidioc_enum_fmt(f, false, false);
+}
+
+static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
+							struct v4l2_fmtdesc *f)
+{
+	return vidioc_enum_fmt(f, true, false);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *prov,
+							struct v4l2_fmtdesc *f)
+{
+	return vidioc_enum_fmt(f, false, true);
+}
+
+static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov,
+							struct v4l2_fmtdesc *f)
+{
+	return vidioc_enum_fmt(f, true, true);
+}
+
+/* Get format */
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+	struct v4l2_pix_format_mplane *pix_mp;
+
+	mfc_debug_enter();
+	pix_mp = &f->fmt.pix_mp;
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+	    (ctx->state == MFCINST_GOT_INST || ctx->state ==
+						MFCINST_RES_CHANGE_END)) {
+		/* If the MFC is parsing the header,
+		 * so wait until it is finished */
+		s5p_mfc_clean_ctx_int_flags(ctx);
+		s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_SEQ_DONE_RET,
+									0);
+	}
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+	    ctx->state >= MFCINST_HEAD_PARSED &&
+	    ctx->state < MFCINST_ABORT) {
+		/* This is run on CAPTURE (decode output) */
+		/* Width and height are set to the dimensions
+		   of the movie, the buffer is bigger and
+		   further processing stages should crop to this
+		   rectangle. */
+		pix_mp->width = ctx->buf_width;
+		pix_mp->height = ctx->buf_height;
+		pix_mp->field = V4L2_FIELD_NONE;
+		pix_mp->num_planes = 2;
+		/* Set pixelformat to the format in which MFC
+		   outputs the decoded frame */
+		pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT;
+		pix_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+		pix_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+		pix_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+		pix_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		/* This is run on OUTPUT
+		   The buffer contains compressed image
+		   so width and height have no meaning */
+		pix_mp->width = 0;
+		pix_mp->height = 0;
+		pix_mp->field = V4L2_FIELD_NONE;
+		pix_mp->plane_fmt[0].bytesperline = ctx->dec_src_buf_size;
+		pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size;
+		pix_mp->pixelformat = ctx->src_fmt->fourcc;
+		pix_mp->num_planes = ctx->src_fmt->num_planes;
+	} else {
+		mfc_err("Format could not be read\n");
+		mfc_debug(2, "%s-- with error\n", __func__);
+		return -EINVAL;
+	}
+	mfc_debug_leave();
+	return 0;
+}
+
+/* Try format */
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct s5p_mfc_fmt *fmt;
+
+	if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		mfc_err("This node supports decoding only\n");
+		return -EINVAL;
+	}
+	fmt = find_format(f, MFC_FMT_DEC);
+	if (!fmt) {
+		mfc_err("Unsupported format\n");
+		return -EINVAL;
+	}
+	if (fmt->type != MFC_FMT_DEC) {
+		mfc_err("\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/* Set format */
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct s5p_mfc_dev *dev = video_drvdata(file);
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+	int ret = 0;
+	struct s5p_mfc_fmt *fmt;
+	struct v4l2_pix_format_mplane *pix_mp;
+
+	mfc_debug_enter();
+	ret = vidioc_try_fmt(file, priv, f);
+	pix_mp = &f->fmt.pix_mp;
+	if (ret)
+		return ret;
+	if (ctx->vq_src.streaming || ctx->vq_dst.streaming) {
+		v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
+		ret = -EBUSY;
+		goto out;
+	}
+	fmt = find_format(f, MFC_FMT_DEC);
+	if (!fmt || fmt->codec_mode == S5P_FIMV_CODEC_NONE) {
+		mfc_err("Unknown codec\n");
+		ret = -EINVAL;
+		goto out;
+	}
+	if (fmt->type != MFC_FMT_DEC) {
+		mfc_err("Wrong format selected, you should choose "
+					"format for decoding\n");
+		ret = -EINVAL;
+		goto out;
+	}
+	ctx->src_fmt = fmt;
+	ctx->codec_mode = fmt->codec_mode;
+	mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
+	pix_mp->height = 0;
+	pix_mp->width = 0;
+	if (pix_mp->plane_fmt[0].sizeimage)
+		ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
+	else
+		pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
+								DEF_CPB_SIZE;
+	pix_mp->plane_fmt[0].bytesperline = 0;
+	ctx->state = MFCINST_INIT;
+out:
+	mfc_debug_leave();
+	return ret;
+}
+
+/* Reqeust buffers */
+static int vidioc_reqbufs(struct file *file, void *priv,
+					  struct v4l2_requestbuffers *reqbufs)
+{
+	struct s5p_mfc_dev *dev = video_drvdata(file);
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+	int ret = 0;
+	unsigned long flags;
+
+	if (reqbufs->memory != V4L2_MEMORY_MMAP) {
+		mfc_err("Only V4L2_MEMORY_MAP is supported\n");
+		return -EINVAL;
+	}
+	if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		/* Can only request buffers after an instance has been opened.*/
+		if (ctx->state == MFCINST_INIT) {
+			ctx->src_bufs_cnt = 0;
+			if (reqbufs->count == 0) {
+				mfc_debug(2, "Freeing buffers\n");
+				s5p_mfc_clock_on();
+				ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
+				s5p_mfc_clock_off();
+				return ret;
+			}
+			/* Decoding */
+			if (ctx->output_state != QUEUE_FREE) {
+				mfc_err("Bufs have already been requested\n");
+				return -EINVAL;
+			}
+			s5p_mfc_clock_on();
+			ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
+			s5p_mfc_clock_off();
+			if (ret) {
+				mfc_err("vb2_reqbufs on output failed\n");
+				return ret;
+			}
+			mfc_debug(2, "vb2_reqbufs: %d\n", ret);
+			ctx->output_state = QUEUE_BUFS_REQUESTED;
+		}
+	} else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		ctx->dst_bufs_cnt = 0;
+		if (reqbufs->count == 0) {
+			mfc_debug(2, "Freeing buffers\n");
+			s5p_mfc_clock_on();
+			ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+			s5p_mfc_clock_off();
+			return ret;
+		}
+		if (ctx->capture_state != QUEUE_FREE) {
+			mfc_err("Bufs have already been requested\n");
+			return -EINVAL;
+		}
+		ctx->capture_state = QUEUE_BUFS_REQUESTED;
+		s5p_mfc_clock_on();
+		ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+		s5p_mfc_clock_off();
+		if (ret) {
+			mfc_err("vb2_reqbufs on capture failed\n");
+			return ret;
+		}
+		if (reqbufs->count < ctx->dpb_count) {
+			mfc_err("Not enough buffers allocated\n");
+			reqbufs->count = 0;
+			s5p_mfc_clock_on();
+			ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+			s5p_mfc_clock_off();
+			return -ENOMEM;
+		}
+		ctx->total_dpb_count = reqbufs->count;
+		ret = s5p_mfc_alloc_codec_buffers(ctx);
+		if (ret) {
+			mfc_err("Failed to allocate decoding buffers\n");
+			reqbufs->count = 0;
+			s5p_mfc_clock_on();
+			ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+			s5p_mfc_clock_off();
+			return -ENOMEM;
+		}
+		if (ctx->dst_bufs_cnt == ctx->total_dpb_count) {
+			ctx->capture_state = QUEUE_BUFS_MMAPED;
+		} else {
+			mfc_err("Not all buffers passed to buf_init\n");
+			reqbufs->count = 0;
+			s5p_mfc_clock_on();
+			ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+			s5p_mfc_release_codec_buffers(ctx);
+			s5p_mfc_clock_off();
+			return -ENOMEM;
+		}
+		if (s5p_mfc_ctx_ready(ctx)) {
+			spin_lock_irqsave(&dev->condlock, flags);
+			set_bit(ctx->num, &dev->ctx_work_bits);
+			spin_unlock_irqrestore(&dev->condlock, flags);
+		}
+		s5p_mfc_try_run(dev);
+		s5p_mfc_wait_for_done_ctx(ctx,
+					 S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET, 0);
+	}
+	return ret;
+}
+
+/* Query buffer */
+static int vidioc_querybuf(struct file *file, void *priv,
+						   struct v4l2_buffer *buf)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+	int ret;
+	int i;
+
+	if (buf->memory != V4L2_MEMORY_MMAP) {
+		mfc_err("Only mmaped buffers can be used\n");
+		return -EINVAL;
+	}
+	mfc_debug(2, "State: %d, buf->type: %d\n", ctx->state, buf->type);
+	if (ctx->state == MFCINST_INIT &&
+			buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		ret = vb2_querybuf(&ctx->vq_src, buf);
+	} else if (ctx->state == MFCINST_RUNNING &&
+			buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		ret = vb2_querybuf(&ctx->vq_dst, buf);
+		for (i = 0; i < buf->length; i++)
+			buf->m.planes[i].m.mem_offset += DST_QUEUE_OFF_BASE;
+	} else {
+		mfc_err("vidioc_querybuf called in an inappropriate state\n");
+		ret = -EINVAL;
+	}
+	mfc_debug_leave();
+	return ret;
+}
+
+/* Queue a buffer */
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+	if (ctx->state == MFCINST_ERROR) {
+		mfc_err("Call on QBUF after unrecoverable error\n");
+		return -EIO;
+	}
+	if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		return vb2_qbuf(&ctx->vq_src, buf);
+	else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		return vb2_qbuf(&ctx->vq_dst, buf);
+	return -EINVAL;
+}
+
+/* Dequeue a buffer */
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+	if (ctx->state == MFCINST_ERROR) {
+		mfc_err("Call on DQBUF after unrecoverable error\n");
+		return -EIO;
+	}
+	if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		return vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK);
+	else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		return vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK);
+	return -EINVAL;
+}
+
+/* Stream on */
+static int vidioc_streamon(struct file *file, void *priv,
+			   enum v4l2_buf_type type)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	mfc_debug_enter();
+	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+
+		if (ctx->state == MFCINST_INIT) {
+			ctx->dst_bufs_cnt = 0;
+			ctx->src_bufs_cnt = 0;
+			ctx->capture_state = QUEUE_FREE;
+			ctx->output_state = QUEUE_FREE;
+			s5p_mfc_alloc_instance_buffer(ctx);
+			s5p_mfc_alloc_dec_temp_buffers(ctx);
+			spin_lock_irqsave(&dev->condlock, flags);
+			set_bit(ctx->num, &dev->ctx_work_bits);
+			spin_unlock_irqrestore(&dev->condlock, flags);
+			s5p_mfc_clean_ctx_int_flags(ctx);
+			s5p_mfc_try_run(dev);
+
+			if (s5p_mfc_wait_for_done_ctx(ctx,
+				S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET, 0)) {
+				/* Error or timeout */
+				mfc_err("Error getting instance from hardware\n");
+				s5p_mfc_release_instance_buffer(ctx);
+				s5p_mfc_release_dec_desc_buffer(ctx);
+				return -EIO;
+			}
+			mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
+		}
+		ret = vb2_streamon(&ctx->vq_src, type);
+		}
+	else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		ret = vb2_streamon(&ctx->vq_dst, type);
+	mfc_debug_leave();
+	return ret;
+}
+
+/* Stream off, which equals to a pause */
+static int vidioc_streamoff(struct file *file, void *priv,
+			    enum v4l2_buf_type type)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		return vb2_streamoff(&ctx->vq_src, type);
+	else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		return vb2_streamoff(&ctx->vq_dst, type);
+	return -EINVAL;
+}
+
+/* Set controls - v4l2 control framework */
+static int s5p_mfc_dec_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
+
+	switch (ctrl->id) {
+	case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY:
+		ctx->display_delay = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE:
+		ctx->display_delay_enable = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+		ctx->loop_filter_mpeg4 = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
+		ctx->slice_interface = ctrl->val;
+		break;
+	default:
+		mfc_err("Invalid control 0x%08x\n", ctrl->id);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	switch (ctrl->id) {
+	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+		if (ctx->state >= MFCINST_HEAD_PARSED &&
+		    ctx->state < MFCINST_ABORT) {
+			ctrl->val = ctx->dpb_count;
+			break;
+		} else if (ctx->state != MFCINST_INIT) {
+			v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
+			return -EINVAL;
+		}
+		/* Should wait for the header to be parsed */
+		s5p_mfc_clean_ctx_int_flags(ctx);
+		s5p_mfc_wait_for_done_ctx(ctx,
+				S5P_FIMV_R2H_CMD_SEQ_DONE_RET, 0);
+		if (ctx->state >= MFCINST_HEAD_PARSED &&
+		    ctx->state < MFCINST_ABORT) {
+			ctrl->val = ctx->dpb_count;
+		} else {
+			v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
+			return -EINVAL;
+		}
+		break;
+	}
+	return 0;
+}
+
+
+static const struct v4l2_ctrl_ops s5p_mfc_dec_ctrl_ops = {
+	.s_ctrl = s5p_mfc_dec_s_ctrl,
+	.g_volatile_ctrl = s5p_mfc_dec_g_v_ctrl,
+};
+
+/* Get cropping information */
+static int vidioc_g_crop(struct file *file, void *priv,
+		struct v4l2_crop *cr)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+	u32 left, right, top, bottom;
+
+	if (ctx->state != MFCINST_HEAD_PARSED &&
+	ctx->state != MFCINST_RUNNING && ctx->state != MFCINST_FINISHING
+					&& ctx->state != MFCINST_FINISHED) {
+			mfc_err("Cannont set crop\n");
+			return -EINVAL;
+		}
+	if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_H264) {
+		left = s5p_mfc_read_shm(ctx, CROP_INFO_H);
+		right = left >> S5P_FIMV_SHARED_CROP_RIGHT_SHIFT;
+		left = left & S5P_FIMV_SHARED_CROP_LEFT_MASK;
+		top = s5p_mfc_read_shm(ctx, CROP_INFO_V);
+		bottom = top >> S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT;
+		top = top & S5P_FIMV_SHARED_CROP_TOP_MASK;
+		cr->c.left = left;
+		cr->c.top = top;
+		cr->c.width = ctx->img_width - left - right;
+		cr->c.height = ctx->img_height - top - bottom;
+		mfc_debug(2, "Cropping info [h264]: l=%d t=%d "
+			"w=%d h=%d (r=%d b=%d fw=%d fh=%d\n", left, top,
+			cr->c.width, cr->c.height, right, bottom,
+			ctx->buf_width, ctx->buf_height);
+	} else {
+		cr->c.left = 0;
+		cr->c.top = 0;
+		cr->c.width = ctx->img_width;
+		cr->c.height = ctx->img_height;
+		mfc_debug(2, "Cropping info: w=%d h=%d fw=%d "
+			"fh=%d\n", cr->c.width,	cr->c.height, ctx->buf_width,
+							ctx->buf_height);
+	}
+	return 0;
+}
+
+/* v4l2_ioctl_ops */
+static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = {
+	.vidioc_querycap = vidioc_querycap,
+	.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+	.vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
+	.vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+	.vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
+	.vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
+	.vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
+	.vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt,
+	.vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt,
+	.vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt,
+	.vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt,
+	.vidioc_reqbufs = vidioc_reqbufs,
+	.vidioc_querybuf = vidioc_querybuf,
+	.vidioc_qbuf = vidioc_qbuf,
+	.vidioc_dqbuf = vidioc_dqbuf,
+	.vidioc_streamon = vidioc_streamon,
+	.vidioc_streamoff = vidioc_streamoff,
+	.vidioc_g_crop = vidioc_g_crop,
+};
+
+static int s5p_mfc_queue_setup(struct vb2_queue *vq,
+			const struct v4l2_format *fmt, unsigned int *buf_count,
+			unsigned int *plane_count, unsigned int psize[],
+			void *allocators[])
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+
+	/* Video output for decoding (source)
+	 * this can be set after getting an instance */
+	if (ctx->state == MFCINST_INIT &&
+	    vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		/* A single plane is required for input */
+		*plane_count = 1;
+		if (*buf_count < 1)
+			*buf_count = 1;
+		if (*buf_count > MFC_MAX_BUFFERS)
+			*buf_count = MFC_MAX_BUFFERS;
+	/* Video capture for decoding (destination)
+	 * this can be set after the header was parsed */
+	} else if (ctx->state == MFCINST_HEAD_PARSED &&
+		   vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		/* Output plane count is 2 - one for Y and one for CbCr */
+		*plane_count = 2;
+		/* Setup buffer count */
+		if (*buf_count < ctx->dpb_count)
+			*buf_count = ctx->dpb_count;
+		if (*buf_count > ctx->dpb_count + MFC_MAX_EXTRA_DPB)
+			*buf_count = ctx->dpb_count + MFC_MAX_EXTRA_DPB;
+		if (*buf_count > MFC_MAX_BUFFERS)
+			*buf_count = MFC_MAX_BUFFERS;
+	} else {
+		mfc_err("State seems invalid. State = %d, vq->type = %d\n",
+							ctx->state, vq->type);
+		return -EINVAL;
+	}
+	mfc_debug(2, "Buffer count=%d, plane count=%d\n",
+						*buf_count, *plane_count);
+	if (ctx->state == MFCINST_HEAD_PARSED &&
+	    vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		psize[0] = ctx->luma_size;
+		psize[1] = ctx->chroma_size;
+		allocators[0] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
+		allocators[1] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+	} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+		   ctx->state == MFCINST_INIT) {
+		psize[0] = ctx->dec_src_buf_size;
+		allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+	} else {
+		mfc_err("This video node is dedicated to decoding. Decoding not initalised\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void s5p_mfc_unlock(struct vb2_queue *q)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	mutex_unlock(&dev->mfc_mutex);
+}
+
+static void s5p_mfc_lock(struct vb2_queue *q)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	mutex_lock(&dev->mfc_mutex);
+}
+
+static int s5p_mfc_buf_init(struct vb2_buffer *vb)
+{
+	struct vb2_queue *vq = vb->vb2_queue;
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+	unsigned int i;
+
+	if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		if (ctx->capture_state == QUEUE_BUFS_MMAPED)
+			return 0;
+		for (i = 0; i <= ctx->src_fmt->num_planes ; i++) {
+			if (IS_ERR_OR_NULL(ERR_PTR(
+					vb2_dma_contig_plane_dma_addr(vb, i)))) {
+				mfc_err("Plane mem not allocated\n");
+				return -EINVAL;
+			}
+		}
+		if (vb2_plane_size(vb, 0) < ctx->luma_size ||
+			vb2_plane_size(vb, 1) < ctx->chroma_size) {
+			mfc_err("Plane buffer (CAPTURE) is too small\n");
+			return -EINVAL;
+		}
+		i = vb->v4l2_buf.index;
+		ctx->dst_bufs[i].b = vb;
+		ctx->dst_bufs[i].cookie.raw.luma =
+					vb2_dma_contig_plane_dma_addr(vb, 0);
+		ctx->dst_bufs[i].cookie.raw.chroma =
+					vb2_dma_contig_plane_dma_addr(vb, 1);
+		ctx->dst_bufs_cnt++;
+	} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		if (IS_ERR_OR_NULL(ERR_PTR(
+					vb2_dma_contig_plane_dma_addr(vb, 0)))) {
+			mfc_err("Plane memory not allocated\n");
+			return -EINVAL;
+		}
+		if (vb2_plane_size(vb, 0) < ctx->dec_src_buf_size) {
+			mfc_err("Plane buffer (OUTPUT) is too small\n");
+			return -EINVAL;
+		}
+
+		i = vb->v4l2_buf.index;
+		ctx->src_bufs[i].b = vb;
+		ctx->src_bufs[i].cookie.stream =
+					vb2_dma_contig_plane_dma_addr(vb, 0);
+		ctx->src_bufs_cnt++;
+	} else {
+		mfc_err("s5p_mfc_buf_init: unknown queue type\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned long flags;
+
+	v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+	if (ctx->state == MFCINST_FINISHING ||
+		ctx->state == MFCINST_FINISHED)
+		ctx->state = MFCINST_RUNNING;
+	/* If context is ready then dev = work->data;schedule it to run */
+	if (s5p_mfc_ctx_ready(ctx)) {
+		spin_lock_irqsave(&dev->condlock, flags);
+		set_bit(ctx->num, &dev->ctx_work_bits);
+		spin_unlock_irqrestore(&dev->condlock, flags);
+	}
+	s5p_mfc_try_run(dev);
+	return 0;
+}
+
+static int s5p_mfc_stop_streaming(struct vb2_queue *q)
+{
+	unsigned long flags;
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+	struct s5p_mfc_dev *dev = ctx->dev;
+	int aborted = 0;
+
+	if ((ctx->state == MFCINST_FINISHING ||
+		ctx->state ==  MFCINST_RUNNING) &&
+		dev->curr_ctx == ctx->num && dev->hw_lock) {
+		ctx->state = MFCINST_ABORT;
+		s5p_mfc_wait_for_done_ctx(ctx,
+					S5P_FIMV_R2H_CMD_FRAME_DONE_RET, 0);
+		aborted = 1;
+	}
+	spin_lock_irqsave(&dev->irqlock, flags);
+	if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+		INIT_LIST_HEAD(&ctx->dst_queue);
+		ctx->dst_queue_cnt = 0;
+		ctx->dpb_flush_flag = 1;
+		ctx->dec_dst_flag = 0;
+	}
+	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+		INIT_LIST_HEAD(&ctx->src_queue);
+		ctx->src_queue_cnt = 0;
+	}
+	if (aborted)
+		ctx->state = MFCINST_RUNNING;
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+	return 0;
+}
+
+
+static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
+{
+	struct vb2_queue *vq = vb->vb2_queue;
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned long flags;
+	struct s5p_mfc_buf *mfc_buf;
+
+	if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index];
+		mfc_buf->used = 0;
+		spin_lock_irqsave(&dev->irqlock, flags);
+		list_add_tail(&mfc_buf->list, &ctx->src_queue);
+		ctx->src_queue_cnt++;
+		spin_unlock_irqrestore(&dev->irqlock, flags);
+	} else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index];
+		mfc_buf->used = 0;
+		/* Mark destination as available for use by MFC */
+		spin_lock_irqsave(&dev->irqlock, flags);
+		set_bit(vb->v4l2_buf.index, &ctx->dec_dst_flag);
+		list_add_tail(&mfc_buf->list, &ctx->dst_queue);
+		ctx->dst_queue_cnt++;
+		spin_unlock_irqrestore(&dev->irqlock, flags);
+	} else {
+		mfc_err("Unsupported buffer type (%d)\n", vq->type);
+	}
+	if (s5p_mfc_ctx_ready(ctx)) {
+		spin_lock_irqsave(&dev->condlock, flags);
+		set_bit(ctx->num, &dev->ctx_work_bits);
+		spin_unlock_irqrestore(&dev->condlock, flags);
+	}
+	s5p_mfc_try_run(dev);
+}
+
+static struct vb2_ops s5p_mfc_dec_qops = {
+	.queue_setup		= s5p_mfc_queue_setup,
+	.wait_prepare		= s5p_mfc_unlock,
+	.wait_finish		= s5p_mfc_lock,
+	.buf_init		= s5p_mfc_buf_init,
+	.start_streaming	= s5p_mfc_start_streaming,
+	.stop_streaming		= s5p_mfc_stop_streaming,
+	.buf_queue		= s5p_mfc_buf_queue,
+};
+
+struct s5p_mfc_codec_ops *get_dec_codec_ops(void)
+{
+	return &decoder_codec_ops;
+}
+
+struct vb2_ops *get_dec_queue_ops(void)
+{
+	return &s5p_mfc_dec_qops;
+}
+
+const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void)
+{
+	return &s5p_mfc_dec_ioctl_ops;
+}
+
+#define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_MPEG) \
+						&& V4L2_CTRL_DRIVER_PRIV(x))
+
+int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx)
+{
+	struct v4l2_ctrl_config cfg;
+	int i;
+
+	v4l2_ctrl_handler_init(&ctx->ctrl_handler, NUM_CTRLS);
+	if (ctx->ctrl_handler.error) {
+		mfc_err("v4l2_ctrl_handler_init failed\n");
+		return ctx->ctrl_handler.error;
+	}
+
+	for (i = 0; i < NUM_CTRLS; i++) {
+		if (IS_MFC51_PRIV(controls[i].id)) {
+			memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
+			cfg.ops = &s5p_mfc_dec_ctrl_ops;
+			cfg.id = controls[i].id;
+			cfg.min = controls[i].minimum;
+			cfg.max = controls[i].maximum;
+			cfg.def = controls[i].default_value;
+			cfg.name = controls[i].name;
+			cfg.type = controls[i].type;
+
+			cfg.step = controls[i].step;
+			cfg.menu_skip_mask = 0;
+
+			ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+					&cfg, NULL);
+		} else {
+			ctx->ctrls[i] = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+					&s5p_mfc_dec_ctrl_ops,
+					controls[i].id, controls[i].minimum,
+					controls[i].maximum, controls[i].step,
+					controls[i].default_value);
+		}
+		if (ctx->ctrl_handler.error) {
+			mfc_err("Adding control (%d) failed\n", i);
+			return ctx->ctrl_handler.error;
+		}
+		if (controls[i].is_volatile && ctx->ctrls[i])
+			ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE;
+	}
+	return 0;
+}
+
+void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx)
+{
+	int i;
+
+	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+	for (i = 0; i < NUM_CTRLS; i++)
+		ctx->ctrls[i] = NULL;
+}
+
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h
new file mode 100644
index 0000000..fdf1d99
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h
@@ -0,0 +1,23 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_dec.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_DEC_H_
+#define S5P_MFC_DEC_H_
+
+struct s5p_mfc_codec_ops *get_dec_codec_ops(void);
+struct vb2_ops *get_dec_queue_ops(void);
+const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void);
+struct s5p_mfc_fmt *get_dec_def_fmt(bool src);
+int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx);
+
+#endif /* S5P_MFC_DEC_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
new file mode 100644
index 0000000..fdeebb0
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -0,0 +1,1834 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * Jeongtae Park	<jtp.park@samsung.com>
+ * Kamil Debski		<k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_enc.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+
+static struct s5p_mfc_fmt formats[] = {
+	{
+		.name = "4:2:0 2 Planes 64x32 Tiles",
+		.fourcc = V4L2_PIX_FMT_NV12MT,
+		.codec_mode = S5P_FIMV_CODEC_NONE,
+		.type = MFC_FMT_RAW,
+		.num_planes = 2,
+	},
+	{
+		.name = "4:2:0 2 Planes",
+		.fourcc = V4L2_PIX_FMT_NV12M,
+		.codec_mode = S5P_FIMV_CODEC_NONE,
+		.type = MFC_FMT_RAW,
+		.num_planes = 2,
+	},
+	{
+		.name = "H264 Encoded Stream",
+		.fourcc = V4L2_PIX_FMT_H264,
+		.codec_mode = S5P_FIMV_CODEC_H264_ENC,
+		.type = MFC_FMT_ENC,
+		.num_planes = 1,
+	},
+	{
+		.name = "MPEG4 Encoded Stream",
+		.fourcc = V4L2_PIX_FMT_MPEG4,
+		.codec_mode = S5P_FIMV_CODEC_MPEG4_ENC,
+		.type = MFC_FMT_ENC,
+		.num_planes = 1,
+	},
+	{
+		.name = "H263 Encoded Stream",
+		.fourcc = V4L2_PIX_FMT_H263,
+		.codec_mode = S5P_FIMV_CODEC_H263_ENC,
+		.type = MFC_FMT_ENC,
+		.num_planes = 1,
+	},
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
+{
+	unsigned int i;
+
+	for (i = 0; i < NUM_FORMATS; i++) {
+		if (formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
+		    formats[i].type == t)
+			return &formats[i];
+	}
+	return NULL;
+}
+
+static struct mfc_control controls[] = {
+	{
+		.id = V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = (1 << 16) - 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
+		.maximum = V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES,
+		.default_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
+		.menu_skip_mask = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = (1 << 16) - 1,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1900,
+		.maximum = (1 << 30) - 1,
+		.step = 1,
+		.default_value = 1900,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = (1 << 16) - 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_MFC51_VIDEO_PADDING,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.name = "Padding Control Enable",
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Padding Color YUV Value",
+		.minimum = 0,
+		.maximum = (1 << 25) - 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_BITRATE,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = (1 << 30) - 1,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Rate Control Reaction Coeff.",
+		.minimum = 1,
+		.maximum = (1 << 16) - 1,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE,
+		.type = V4L2_CTRL_TYPE_MENU,
+		.name = "Force frame type",
+		.minimum = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED,
+		.maximum = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED,
+		.default_value = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED,
+		.menu_skip_mask = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_VBV_SIZE,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = (1 << 16) - 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = (1 << 16) - 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+		.maximum = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+		.default_value = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+		.menu_skip_mask = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE,
+		.type = V4L2_CTRL_TYPE_MENU,
+		.name = "Frame Skip Enable",
+		.minimum = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED,
+		.maximum = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT,
+		.menu_skip_mask = 0,
+		.default_value = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED,
+	},
+	{
+		.id = V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.name = "Fixed Target Bit Enable",
+		.minimum = 0,
+		.maximum = 1,
+		.default_value = 0,
+		.menu_skip_mask = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_B_FRAMES,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 2,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+		.maximum = V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
+		.default_value = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+		.menu_skip_mask = ~(
+				(1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+				(1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+				(1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)
+				),
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+		.maximum = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
+		.default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
+		.maximum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
+		.default_value = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
+		.menu_skip_mask = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED,
+		.maximum = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
+		.default_value = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED,
+		.menu_skip_mask = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = -6,
+		.maximum = 6,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = -6,
+		.maximum = 6,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+		.maximum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+		.default_value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+		.menu_skip_mask = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "The Number of Ref. Pic for P",
+		.minimum = 1,
+		.maximum = 2,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 51,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 51,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 51,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 51,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 51,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "H263 I-Frame QP value",
+		.minimum = 1,
+		.maximum = 31,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H263_MIN_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "H263 Minimum QP value",
+		.minimum = 1,
+		.maximum = 31,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H263_MAX_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "H263 Maximum QP value",
+		.minimum = 1,
+		.maximum = 31,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "H263 P frame QP value",
+		.minimum = 1,
+		.maximum = 31,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "H263 B frame QP value",
+		.minimum = 1,
+		.maximum = 31,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "MPEG4 I-Frame QP value",
+		.minimum = 1,
+		.maximum = 31,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "MPEG4 Minimum QP value",
+		.minimum = 1,
+		.maximum = 31,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "MPEG4 Maximum QP value",
+		.minimum = 0,
+		.maximum = 51,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "MPEG4 P frame QP value",
+		.minimum = 1,
+		.maximum = 31,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "MPEG4 B frame QP value",
+		.minimum = 1,
+		.maximum = 31,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.name = "H264 Dark Reg Adaptive RC",
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.name = "H264 Smooth Reg Adaptive RC",
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.name = "H264 Static Reg Adaptive RC",
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.name = "H264 Activity Reg Adaptive RC",
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC,
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
+		.maximum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED,
+		.default_value = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
+		.menu_skip_mask = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = (1 << 16) - 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = (1 << 16) - 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_GOP_CLOSURE,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_H264_I_PERIOD,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = (1 << 16) - 1,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+		.type = V4L2_CTRL_TYPE_MENU,
+		.minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
+		.maximum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE,
+		.default_value = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
+		.menu_skip_mask = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_MPEG4_QPEL,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = 0,
+	},
+};
+
+#define NUM_CTRLS ARRAY_SIZE(controls)
+static const char * const *mfc51_get_menu(u32 id)
+{
+	static const char * const mfc51_video_frame_skip[] = {
+		"Disabled",
+		"Level Limit",
+		"VBV/CPB Limit",
+		NULL,
+	};
+	static const char * const mfc51_video_force_frame[] = {
+		"Disabled",
+		"I Frame",
+		"Not Coded",
+		NULL,
+	};
+	switch (id) {
+	case V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE:
+		return mfc51_video_frame_skip;
+	case V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE:
+		return mfc51_video_force_frame;
+	}
+	return NULL;
+}
+
+static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
+{
+	mfc_debug(2, "src=%d, dst=%d, state=%d\n",
+		  ctx->src_queue_cnt, ctx->dst_queue_cnt, ctx->state);
+	/* context is ready to make header */
+	if (ctx->state == MFCINST_GOT_INST && ctx->dst_queue_cnt >= 1)
+		return 1;
+	/* context is ready to encode a frame */
+	if (ctx->state == MFCINST_RUNNING &&
+		ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
+		return 1;
+	/* context is ready to encode remain frames */
+	if (ctx->state == MFCINST_FINISHING &&
+		ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
+		return 1;
+	mfc_debug(2, "ctx is not ready\n");
+	return 0;
+}
+
+static void cleanup_ref_queue(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_buf *mb_entry;
+	unsigned long mb_y_addr, mb_c_addr;
+
+	/* move buffers in ref queue to src queue */
+	while (!list_empty(&ctx->ref_queue)) {
+		mb_entry = list_entry((&ctx->ref_queue)->next,
+						struct s5p_mfc_buf, list);
+		mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
+		mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
+		list_del(&mb_entry->list);
+		ctx->ref_queue_cnt--;
+		list_add_tail(&mb_entry->list, &ctx->src_queue);
+		ctx->src_queue_cnt++;
+	}
+	mfc_debug(2, "enc src count: %d, enc ref count: %d\n",
+		  ctx->src_queue_cnt, ctx->ref_queue_cnt);
+	INIT_LIST_HEAD(&ctx->ref_queue);
+	ctx->ref_queue_cnt = 0;
+}
+
+static int enc_pre_seq_start(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct s5p_mfc_buf *dst_mb;
+	unsigned long dst_addr;
+	unsigned int dst_size;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->irqlock, flags);
+	dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+	dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
+	dst_size = vb2_plane_size(dst_mb->b, 0);
+	s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+	return 0;
+}
+
+static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct s5p_mfc_enc_params *p = &ctx->enc_params;
+	struct s5p_mfc_buf *dst_mb;
+	unsigned long flags;
+
+	if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) {
+		spin_lock_irqsave(&dev->irqlock, flags);
+		dst_mb = list_entry(ctx->dst_queue.next,
+				struct s5p_mfc_buf, list);
+		list_del(&dst_mb->list);
+		ctx->dst_queue_cnt--;
+		vb2_set_plane_payload(dst_mb->b, 0,
+						s5p_mfc_get_enc_strm_size());
+		vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE);
+		spin_unlock_irqrestore(&dev->irqlock, flags);
+	}
+	ctx->state = MFCINST_RUNNING;
+	if (s5p_mfc_ctx_ready(ctx)) {
+		spin_lock_irqsave(&dev->condlock, flags);
+		set_bit(ctx->num, &dev->ctx_work_bits);
+		spin_unlock_irqrestore(&dev->condlock, flags);
+	}
+	s5p_mfc_try_run(dev);
+	return 0;
+}
+
+static int enc_pre_frame_start(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct s5p_mfc_buf *dst_mb;
+	struct s5p_mfc_buf *src_mb;
+	unsigned long flags;
+	unsigned long src_y_addr, src_c_addr, dst_addr;
+	unsigned int dst_size;
+
+	spin_lock_irqsave(&dev->irqlock, flags);
+	src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+	src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
+	src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
+	s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr, src_c_addr);
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+
+	spin_lock_irqsave(&dev->irqlock, flags);
+	dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+	dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
+	dst_size = vb2_plane_size(dst_mb->b, 0);
+	s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+
+	return 0;
+}
+
+static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct s5p_mfc_buf *mb_entry;
+	unsigned long enc_y_addr, enc_c_addr;
+	unsigned long mb_y_addr, mb_c_addr;
+	int slice_type;
+	unsigned int strm_size;
+	unsigned long flags;
+
+	slice_type = s5p_mfc_get_enc_slice_type();
+	strm_size = s5p_mfc_get_enc_strm_size();
+	mfc_debug(2, "Encoded slice type: %d", slice_type);
+	mfc_debug(2, "Encoded stream size: %d", strm_size);
+	mfc_debug(2, "Display order: %d",
+		  mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT));
+	spin_lock_irqsave(&dev->irqlock, flags);
+	if (slice_type >= 0) {
+		s5p_mfc_get_enc_frame_buffer(ctx, &enc_y_addr, &enc_c_addr);
+		list_for_each_entry(mb_entry, &ctx->src_queue, list) {
+			mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
+			mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
+			if ((enc_y_addr == mb_y_addr) &&
+						(enc_c_addr == mb_c_addr)) {
+				list_del(&mb_entry->list);
+				ctx->src_queue_cnt--;
+				vb2_buffer_done(mb_entry->b,
+							VB2_BUF_STATE_DONE);
+				break;
+			}
+		}
+		list_for_each_entry(mb_entry, &ctx->ref_queue, list) {
+			mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
+			mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
+			if ((enc_y_addr == mb_y_addr) &&
+						(enc_c_addr == mb_c_addr)) {
+				list_del(&mb_entry->list);
+				ctx->ref_queue_cnt--;
+				vb2_buffer_done(mb_entry->b,
+							VB2_BUF_STATE_DONE);
+				break;
+			}
+		}
+	}
+	if ((ctx->src_queue_cnt > 0) && (ctx->state == MFCINST_RUNNING)) {
+		mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
+									list);
+		if (mb_entry->used) {
+			list_del(&mb_entry->list);
+			ctx->src_queue_cnt--;
+			list_add_tail(&mb_entry->list, &ctx->ref_queue);
+			ctx->ref_queue_cnt++;
+		}
+		mfc_debug(2, "enc src count: %d, enc ref count: %d\n",
+			  ctx->src_queue_cnt, ctx->ref_queue_cnt);
+	}
+	if (strm_size > 0) {
+		/* at least one more dest. buffers exist always  */
+		mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf,
+									list);
+		list_del(&mb_entry->list);
+		ctx->dst_queue_cnt--;
+		switch (slice_type) {
+		case S5P_FIMV_ENC_SI_SLICE_TYPE_I:
+			mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+			break;
+		case S5P_FIMV_ENC_SI_SLICE_TYPE_P:
+			mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+			break;
+		case S5P_FIMV_ENC_SI_SLICE_TYPE_B:
+			mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_BFRAME;
+			break;
+		}
+		vb2_set_plane_payload(mb_entry->b, 0, strm_size);
+		vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE);
+	}
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+	if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0)) {
+		spin_lock(&dev->condlock);
+		clear_bit(ctx->num, &dev->ctx_work_bits);
+		spin_unlock(&dev->condlock);
+	}
+	return 0;
+}
+
+static struct s5p_mfc_codec_ops encoder_codec_ops = {
+	.pre_seq_start		= enc_pre_seq_start,
+	.post_seq_start		= enc_post_seq_start,
+	.pre_frame_start	= enc_pre_frame_start,
+	.post_frame_start	= enc_post_frame_start,
+};
+
+/* Query capabilities of the device */
+static int vidioc_querycap(struct file *file, void *priv,
+			   struct v4l2_capability *cap)
+{
+	struct s5p_mfc_dev *dev = video_drvdata(file);
+
+	strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
+	strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
+	cap->bus_info[0] = 0;
+	cap->version = KERNEL_VERSION(1, 0, 0);
+	/*
+	 * This is only a mem-to-mem video device. The capture and output
+	 * device capability flags are left only for backward compatibility
+	 * and are scheduled for removal.
+	 */
+	cap->capabilities = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING |
+			    V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+			    V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+	return 0;
+}
+
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool mplane, bool out)
+{
+	struct s5p_mfc_fmt *fmt;
+	int i, j = 0;
+
+	for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+		if (mplane && formats[i].num_planes == 1)
+			continue;
+		else if (!mplane && formats[i].num_planes > 1)
+			continue;
+		if (out && formats[i].type != MFC_FMT_RAW)
+			continue;
+		else if (!out && formats[i].type != MFC_FMT_ENC)
+			continue;
+		if (j == f->index) {
+			fmt = &formats[i];
+			strlcpy(f->description, fmt->name,
+				sizeof(f->description));
+			f->pixelformat = fmt->fourcc;
+			return 0;
+		}
+		++j;
+	}
+	return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
+				   struct v4l2_fmtdesc *f)
+{
+	return vidioc_enum_fmt(f, false, false);
+}
+
+static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
+					  struct v4l2_fmtdesc *f)
+{
+	return vidioc_enum_fmt(f, true, false);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *prov,
+				   struct v4l2_fmtdesc *f)
+{
+	return vidioc_enum_fmt(f, false, true);
+}
+
+static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov,
+					  struct v4l2_fmtdesc *f)
+{
+	return vidioc_enum_fmt(f, true, true);
+}
+
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+	struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+
+	mfc_debug(2, "f->type = %d ctx->state = %d\n", f->type, ctx->state);
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		/* This is run on output (encoder dest) */
+		pix_fmt_mp->width = 0;
+		pix_fmt_mp->height = 0;
+		pix_fmt_mp->field = V4L2_FIELD_NONE;
+		pix_fmt_mp->pixelformat = ctx->dst_fmt->fourcc;
+		pix_fmt_mp->num_planes = ctx->dst_fmt->num_planes;
+
+		pix_fmt_mp->plane_fmt[0].bytesperline = ctx->enc_dst_buf_size;
+		pix_fmt_mp->plane_fmt[0].sizeimage = ctx->enc_dst_buf_size;
+	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		/* This is run on capture (encoder src) */
+		pix_fmt_mp->width = ctx->img_width;
+		pix_fmt_mp->height = ctx->img_height;
+
+		pix_fmt_mp->field = V4L2_FIELD_NONE;
+		pix_fmt_mp->pixelformat = ctx->src_fmt->fourcc;
+		pix_fmt_mp->num_planes = ctx->src_fmt->num_planes;
+
+		pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+		pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+		pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+		pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+	} else {
+		mfc_err("invalid buf type\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct s5p_mfc_fmt *fmt;
+	struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		fmt = find_format(f, MFC_FMT_ENC);
+		if (!fmt) {
+			mfc_err("failed to try output format\n");
+			return -EINVAL;
+		}
+
+		if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
+			mfc_err("must be set encoding output size\n");
+			return -EINVAL;
+		}
+
+		pix_fmt_mp->plane_fmt[0].bytesperline =
+			pix_fmt_mp->plane_fmt[0].sizeimage;
+	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		fmt = find_format(f, MFC_FMT_RAW);
+		if (!fmt) {
+			mfc_err("failed to try output format\n");
+			return -EINVAL;
+		}
+
+		if (fmt->num_planes != pix_fmt_mp->num_planes) {
+			mfc_err("failed to try output format\n");
+			return -EINVAL;
+		}
+		v4l_bound_align_image(&pix_fmt_mp->width, 8, 1920, 1,
+			&pix_fmt_mp->height, 4, 1080, 1, 0);
+	} else {
+		mfc_err("invalid buf type\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct s5p_mfc_dev *dev = video_drvdata(file);
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+	struct s5p_mfc_fmt *fmt;
+	struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+	unsigned long flags;
+	int ret = 0;
+
+	ret = vidioc_try_fmt(file, priv, f);
+	if (ret)
+		return ret;
+	if (ctx->vq_src.streaming || ctx->vq_dst.streaming) {
+		v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
+		ret = -EBUSY;
+		goto out;
+	}
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		fmt = find_format(f, MFC_FMT_ENC);
+		if (!fmt) {
+			mfc_err("failed to set capture format\n");
+			return -EINVAL;
+		}
+		ctx->state = MFCINST_INIT;
+		ctx->dst_fmt = fmt;
+		ctx->codec_mode = ctx->dst_fmt->codec_mode;
+		ctx->enc_dst_buf_size =	pix_fmt_mp->plane_fmt[0].sizeimage;
+		pix_fmt_mp->plane_fmt[0].bytesperline = 0;
+		ctx->dst_bufs_cnt = 0;
+		ctx->capture_state = QUEUE_FREE;
+		s5p_mfc_alloc_instance_buffer(ctx);
+		spin_lock_irqsave(&dev->condlock, flags);
+		set_bit(ctx->num, &dev->ctx_work_bits);
+		spin_unlock_irqrestore(&dev->condlock, flags);
+		s5p_mfc_clean_ctx_int_flags(ctx);
+		s5p_mfc_try_run(dev);
+		if (s5p_mfc_wait_for_done_ctx(ctx, \
+				S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET, 1)) {
+				/* Error or timeout */
+			mfc_err("Error getting instance from hardware\n");
+			s5p_mfc_release_instance_buffer(ctx);
+			ret = -EIO;
+			goto out;
+		}
+		mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
+	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		fmt = find_format(f, MFC_FMT_RAW);
+		if (!fmt) {
+			mfc_err("failed to set output format\n");
+			return -EINVAL;
+		}
+		if (fmt->num_planes != pix_fmt_mp->num_planes) {
+			mfc_err("failed to set output format\n");
+			ret = -EINVAL;
+			goto out;
+		}
+		ctx->src_fmt = fmt;
+		ctx->img_width = pix_fmt_mp->width;
+		ctx->img_height = pix_fmt_mp->height;
+		mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode);
+		mfc_debug(2, "fmt - w: %d, h: %d, ctx - w: %d, h: %d\n",
+			pix_fmt_mp->width, pix_fmt_mp->height,
+			ctx->img_width, ctx->img_height);
+		if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) {
+			ctx->buf_width = ALIGN(ctx->img_width,
+							S5P_FIMV_NV12M_HALIGN);
+			ctx->luma_size = ALIGN(ctx->img_width,
+				S5P_FIMV_NV12M_HALIGN) * ALIGN(ctx->img_height,
+				S5P_FIMV_NV12M_LVALIGN);
+			ctx->chroma_size = ALIGN(ctx->img_width,
+				S5P_FIMV_NV12M_HALIGN) * ALIGN((ctx->img_height
+				>> 1), S5P_FIMV_NV12M_CVALIGN);
+
+			ctx->luma_size = ALIGN(ctx->luma_size,
+							S5P_FIMV_NV12M_SALIGN);
+			ctx->chroma_size = ALIGN(ctx->chroma_size,
+							S5P_FIMV_NV12M_SALIGN);
+
+			pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+			pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+			pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+			pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+
+		} else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) {
+			ctx->buf_width = ALIGN(ctx->img_width,
+							S5P_FIMV_NV12MT_HALIGN);
+			ctx->luma_size = ALIGN(ctx->img_width,
+				S5P_FIMV_NV12MT_HALIGN)	* ALIGN(ctx->img_height,
+				S5P_FIMV_NV12MT_VALIGN);
+			ctx->chroma_size = ALIGN(ctx->img_width,
+				S5P_FIMV_NV12MT_HALIGN) * ALIGN((ctx->img_height
+				>> 1), S5P_FIMV_NV12MT_VALIGN);
+			ctx->luma_size = ALIGN(ctx->luma_size,
+							S5P_FIMV_NV12MT_SALIGN);
+			ctx->chroma_size = ALIGN(ctx->chroma_size,
+							S5P_FIMV_NV12MT_SALIGN);
+
+			pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+			pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+			pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+			pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+		}
+		ctx->src_bufs_cnt = 0;
+		ctx->output_state = QUEUE_FREE;
+	} else {
+		mfc_err("invalid buf type\n");
+		return -EINVAL;
+	}
+out:
+	mfc_debug_leave();
+	return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+					  struct v4l2_requestbuffers *reqbufs)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+	int ret = 0;
+
+	/* if memory is not mmp or userptr return error */
+	if ((reqbufs->memory != V4L2_MEMORY_MMAP) &&
+		(reqbufs->memory != V4L2_MEMORY_USERPTR))
+		return -EINVAL;
+	if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		if (ctx->capture_state != QUEUE_FREE) {
+			mfc_err("invalid capture state: %d\n",
+							ctx->capture_state);
+			return -EINVAL;
+		}
+		ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+		if (ret != 0) {
+			mfc_err("error in vb2_reqbufs() for E(D)\n");
+			return ret;
+		}
+		ctx->capture_state = QUEUE_BUFS_REQUESTED;
+		ret = s5p_mfc_alloc_codec_buffers(ctx);
+		if (ret) {
+			mfc_err("Failed to allocate encoding buffers\n");
+			reqbufs->count = 0;
+			ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+			return -ENOMEM;
+		}
+	} else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		if (ctx->output_state != QUEUE_FREE) {
+			mfc_err("invalid output state: %d\n",
+							ctx->output_state);
+			return -EINVAL;
+		}
+		ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
+		if (ret != 0) {
+			mfc_err("error in vb2_reqbufs() for E(S)\n");
+			return ret;
+		}
+		ctx->output_state = QUEUE_BUFS_REQUESTED;
+	} else {
+		mfc_err("invalid buf type\n");
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+						   struct v4l2_buffer *buf)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+	int ret = 0;
+
+	/* if memory is not mmp or userptr return error */
+	if ((buf->memory != V4L2_MEMORY_MMAP) &&
+		(buf->memory != V4L2_MEMORY_USERPTR))
+		return -EINVAL;
+	if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		if (ctx->state != MFCINST_GOT_INST) {
+			mfc_err("invalid context state: %d\n", ctx->state);
+			return -EINVAL;
+		}
+		ret = vb2_querybuf(&ctx->vq_dst, buf);
+		if (ret != 0) {
+			mfc_err("error in vb2_querybuf() for E(D)\n");
+			return ret;
+		}
+		buf->m.planes[0].m.mem_offset += DST_QUEUE_OFF_BASE;
+	} else if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		ret = vb2_querybuf(&ctx->vq_src, buf);
+		if (ret != 0) {
+			mfc_err("error in vb2_querybuf() for E(S)\n");
+			return ret;
+		}
+	} else {
+		mfc_err("invalid buf type\n");
+		return -EINVAL;
+	}
+	return ret;
+}
+
+/* Queue a buffer */
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+	if (ctx->state == MFCINST_ERROR) {
+		mfc_err("Call on QBUF after unrecoverable error\n");
+		return -EIO;
+	}
+	if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		return vb2_qbuf(&ctx->vq_src, buf);
+	else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		return vb2_qbuf(&ctx->vq_dst, buf);
+	return -EINVAL;
+}
+
+/* Dequeue a buffer */
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+	if (ctx->state == MFCINST_ERROR) {
+		mfc_err("Call on DQBUF after unrecoverable error\n");
+		return -EIO;
+	}
+	if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		return vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK);
+	else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		return vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK);
+	return -EINVAL;
+}
+
+/* Stream on */
+static int vidioc_streamon(struct file *file, void *priv,
+			   enum v4l2_buf_type type)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		return vb2_streamon(&ctx->vq_src, type);
+	else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		return vb2_streamon(&ctx->vq_dst, type);
+	return -EINVAL;
+}
+
+/* Stream off, which equals to a pause */
+static int vidioc_streamoff(struct file *file, void *priv,
+			    enum v4l2_buf_type type)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		return vb2_streamoff(&ctx->vq_src, type);
+	else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		return vb2_streamoff(&ctx->vq_dst, type);
+	return -EINVAL;
+}
+
+static inline int h264_level(enum v4l2_mpeg_video_h264_level lvl)
+{
+	static unsigned int t[V4L2_MPEG_VIDEO_H264_LEVEL_4_0 + 1] = {
+		/* V4L2_MPEG_VIDEO_H264_LEVEL_1_0   */ 10,
+		/* V4L2_MPEG_VIDEO_H264_LEVEL_1B    */ 9,
+		/* V4L2_MPEG_VIDEO_H264_LEVEL_1_1   */ 11,
+		/* V4L2_MPEG_VIDEO_H264_LEVEL_1_2   */ 12,
+		/* V4L2_MPEG_VIDEO_H264_LEVEL_1_3   */ 13,
+		/* V4L2_MPEG_VIDEO_H264_LEVEL_2_0   */ 20,
+		/* V4L2_MPEG_VIDEO_H264_LEVEL_2_1   */ 21,
+		/* V4L2_MPEG_VIDEO_H264_LEVEL_2_2   */ 22,
+		/* V4L2_MPEG_VIDEO_H264_LEVEL_3_0   */ 30,
+		/* V4L2_MPEG_VIDEO_H264_LEVEL_3_1   */ 31,
+		/* V4L2_MPEG_VIDEO_H264_LEVEL_3_2   */ 32,
+		/* V4L2_MPEG_VIDEO_H264_LEVEL_4_0   */ 40,
+	};
+	return t[lvl];
+}
+
+static inline int mpeg4_level(enum v4l2_mpeg_video_mpeg4_level lvl)
+{
+	static unsigned int t[V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 + 1] = {
+		/* V4L2_MPEG_VIDEO_MPEG4_LEVEL_0    */ 0,
+		/* V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B   */ 9,
+		/* V4L2_MPEG_VIDEO_MPEG4_LEVEL_1    */ 1,
+		/* V4L2_MPEG_VIDEO_MPEG4_LEVEL_2    */ 2,
+		/* V4L2_MPEG_VIDEO_MPEG4_LEVEL_3    */ 3,
+		/* V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B   */ 7,
+		/* V4L2_MPEG_VIDEO_MPEG4_LEVEL_4    */ 4,
+		/* V4L2_MPEG_VIDEO_MPEG4_LEVEL_5    */ 5,
+	};
+	return t[lvl];
+}
+
+static inline int vui_sar_idc(enum v4l2_mpeg_video_h264_vui_sar_idc sar)
+{
+	static unsigned int t[V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED + 1] = {
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED     */ 0,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1             */ 1,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11           */ 2,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11           */ 3,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11           */ 4,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33           */ 5,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11           */ 6,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11           */ 7,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11           */ 8,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33           */ 9,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11           */ 10,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11           */ 11,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33           */ 12,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99          */ 13,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3             */ 14,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2             */ 15,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1             */ 16,
+		/* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED        */ 255,
+	};
+	return t[sar];
+}
+
+static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct s5p_mfc_enc_params *p = &ctx->enc_params;
+	int ret = 0;
+
+	switch (ctrl->id) {
+	case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+		p->gop_size = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+		p->slice_mode = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+		p->slice_mb = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+		p->slice_bit = ctrl->val * 8;
+		break;
+	case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
+		p->intra_refresh_mb = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_MFC51_VIDEO_PADDING:
+		p->pad = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV:
+		p->pad_luma = (ctrl->val >> 16) & 0xff;
+		p->pad_cb = (ctrl->val >> 8) & 0xff;
+		p->pad_cr = (ctrl->val >> 0) & 0xff;
+		break;
+	case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+		p->rc_frame = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_BITRATE:
+		p->rc_bitrate = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF:
+		p->rc_reaction_coeff = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE:
+		ctx->force_frame_type = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_VBV_SIZE:
+		p->vbv_size = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
+		p->codec.h264.cpb_size = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+		p->seq_hdr_mode = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE:
+		p->frame_skip_mode = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT:
+		p->fixed_target_bit = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+		p->num_b_frame = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+			p->codec.h264.profile =
+					S5P_FIMV_ENC_PROFILE_H264_MAIN;
+			break;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+			p->codec.h264.profile =
+					S5P_FIMV_ENC_PROFILE_H264_HIGH;
+			break;
+		case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+			p->codec.h264.profile =
+				S5P_FIMV_ENC_PROFILE_H264_BASELINE;
+			break;
+		default:
+			ret = -EINVAL;
+		}
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+		p->codec.h264.level_v4l2 = ctrl->val;
+		p->codec.h264.level = h264_level(ctrl->val);
+		if (p->codec.h264.level < 0) {
+			mfc_err("Level number is wrong\n");
+			ret = p->codec.h264.level;
+		}
+		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+		p->codec.mpeg4.level_v4l2 = ctrl->val;
+		p->codec.mpeg4.level = mpeg4_level(ctrl->val);
+		if (p->codec.mpeg4.level < 0) {
+			mfc_err("Level number is wrong\n");
+			ret = p->codec.mpeg4.level;
+		}
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+		p->codec.h264.loop_filter_mode = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
+		p->codec.h264.loop_filter_alpha = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
+		p->codec.h264.loop_filter_beta = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+		p->codec.h264.entropy_mode = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P:
+		p->codec.h264.num_ref_pic_4p = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+		p->codec.h264._8x8_transform = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+		p->codec.h264.rc_mb = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+		p->codec.h264.rc_frame_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+		p->codec.h264.rc_min_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+		p->codec.h264.rc_max_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+		p->codec.h264.rc_p_frame_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
+		p->codec.h264.rc_b_frame_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:
+	case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP:
+		p->codec.mpeg4.rc_frame_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP:
+	case V4L2_CID_MPEG_VIDEO_H263_MIN_QP:
+		p->codec.mpeg4.rc_min_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP:
+	case V4L2_CID_MPEG_VIDEO_H263_MAX_QP:
+		p->codec.mpeg4.rc_max_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:
+	case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP:
+		p->codec.mpeg4.rc_p_frame_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP:
+	case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP:
+		p->codec.mpeg4.rc_b_frame_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK:
+		p->codec.h264.rc_mb_dark = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH:
+		p->codec.h264.rc_mb_smooth = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC:
+		p->codec.h264.rc_mb_static = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY:
+		p->codec.h264.rc_mb_activity = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+		p->codec.h264.vui_sar = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+		p->codec.h264.vui_sar_idc = vui_sar_idc(ctrl->val);
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH:
+		p->codec.h264.vui_ext_sar_width = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT:
+		p->codec.h264.vui_ext_sar_height = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
+		p->codec.h264.open_gop = !ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
+		p->codec.h264.open_gop_size = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE:
+			p->codec.mpeg4.profile =
+				S5P_FIMV_ENC_PROFILE_MPEG4_SIMPLE;
+			break;
+		case V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE:
+			p->codec.mpeg4.profile =
+			S5P_FIMV_ENC_PROFILE_MPEG4_ADVANCED_SIMPLE;
+			break;
+		default:
+			ret = -EINVAL;
+		}
+		break;
+	case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
+		p->codec.mpeg4.quarter_pixel = ctrl->val;
+		break;
+	default:
+		v4l2_err(&dev->v4l2_dev, "Invalid control, id=%d, val=%d\n",
+							ctrl->id, ctrl->val);
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static const struct v4l2_ctrl_ops s5p_mfc_enc_ctrl_ops = {
+	.s_ctrl = s5p_mfc_enc_s_ctrl,
+};
+
+static int vidioc_s_parm(struct file *file, void *priv,
+			 struct v4l2_streamparm *a)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+	if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		ctx->enc_params.rc_framerate_num =
+					a->parm.output.timeperframe.denominator;
+		ctx->enc_params.rc_framerate_denom =
+					a->parm.output.timeperframe.numerator;
+	} else {
+		mfc_err("Setting FPS is only possible for the output queue\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int vidioc_g_parm(struct file *file, void *priv,
+			 struct v4l2_streamparm *a)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+	if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		a->parm.output.timeperframe.denominator =
+					ctx->enc_params.rc_framerate_num;
+		a->parm.output.timeperframe.numerator =
+					ctx->enc_params.rc_framerate_denom;
+	} else {
+		mfc_err("Setting FPS is only possible for the output queue\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops s5p_mfc_enc_ioctl_ops = {
+	.vidioc_querycap = vidioc_querycap,
+	.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+	.vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
+	.vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+	.vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
+	.vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
+	.vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
+	.vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt,
+	.vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt,
+	.vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt,
+	.vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt,
+	.vidioc_reqbufs = vidioc_reqbufs,
+	.vidioc_querybuf = vidioc_querybuf,
+	.vidioc_qbuf = vidioc_qbuf,
+	.vidioc_dqbuf = vidioc_dqbuf,
+	.vidioc_streamon = vidioc_streamon,
+	.vidioc_streamoff = vidioc_streamoff,
+	.vidioc_s_parm = vidioc_s_parm,
+	.vidioc_g_parm = vidioc_g_parm,
+};
+
+static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
+{
+	int i;
+
+	if (!fmt)
+		return -EINVAL;
+	if (fmt->num_planes != vb->num_planes) {
+		mfc_err("invalid plane number for the format\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < fmt->num_planes; i++) {
+		if (!vb2_dma_contig_plane_dma_addr(vb, i)) {
+			mfc_err("failed to get plane cookie\n");
+			return -EINVAL;
+		}
+		mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx",
+				vb->v4l2_buf.index, i,
+				vb2_dma_contig_plane_dma_addr(vb, i));
+	}
+	return 0;
+}
+
+static int s5p_mfc_queue_setup(struct vb2_queue *vq,
+			const struct v4l2_format *fmt,
+			unsigned int *buf_count, unsigned int *plane_count,
+			unsigned int psize[], void *allocators[])
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+
+	if (ctx->state != MFCINST_GOT_INST) {
+		mfc_err("inavlid state: %d\n", ctx->state);
+		return -EINVAL;
+	}
+	if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		if (ctx->dst_fmt)
+			*plane_count = ctx->dst_fmt->num_planes;
+		else
+			*plane_count = MFC_ENC_CAP_PLANE_COUNT;
+		if (*buf_count < 1)
+			*buf_count = 1;
+		if (*buf_count > MFC_MAX_BUFFERS)
+			*buf_count = MFC_MAX_BUFFERS;
+		psize[0] = ctx->enc_dst_buf_size;
+		allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+	} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		if (ctx->src_fmt)
+			*plane_count = ctx->src_fmt->num_planes;
+		else
+			*plane_count = MFC_ENC_OUT_PLANE_COUNT;
+
+		if (*buf_count < 1)
+			*buf_count = 1;
+		if (*buf_count > MFC_MAX_BUFFERS)
+			*buf_count = MFC_MAX_BUFFERS;
+		psize[0] = ctx->luma_size;
+		psize[1] = ctx->chroma_size;
+		allocators[0] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
+		allocators[1] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
+	} else {
+		mfc_err("inavlid queue type: %d\n", vq->type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void s5p_mfc_unlock(struct vb2_queue *q)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	mutex_unlock(&dev->mfc_mutex);
+}
+
+static void s5p_mfc_lock(struct vb2_queue *q)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	mutex_lock(&dev->mfc_mutex);
+}
+
+static int s5p_mfc_buf_init(struct vb2_buffer *vb)
+{
+	struct vb2_queue *vq = vb->vb2_queue;
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+	unsigned int i;
+	int ret;
+
+	if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		ret = check_vb_with_fmt(ctx->dst_fmt, vb);
+		if (ret < 0)
+			return ret;
+		i = vb->v4l2_buf.index;
+		ctx->dst_bufs[i].b = vb;
+		ctx->dst_bufs[i].cookie.stream =
+					vb2_dma_contig_plane_dma_addr(vb, 0);
+		ctx->dst_bufs_cnt++;
+	} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		ret = check_vb_with_fmt(ctx->src_fmt, vb);
+		if (ret < 0)
+			return ret;
+		i = vb->v4l2_buf.index;
+		ctx->src_bufs[i].b = vb;
+		ctx->src_bufs[i].cookie.raw.luma =
+					vb2_dma_contig_plane_dma_addr(vb, 0);
+		ctx->src_bufs[i].cookie.raw.chroma =
+					vb2_dma_contig_plane_dma_addr(vb, 1);
+		ctx->src_bufs_cnt++;
+	} else {
+		mfc_err("inavlid queue type: %d\n", vq->type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int s5p_mfc_buf_prepare(struct vb2_buffer *vb)
+{
+	struct vb2_queue *vq = vb->vb2_queue;
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+	int ret;
+
+	if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		ret = check_vb_with_fmt(ctx->dst_fmt, vb);
+		if (ret < 0)
+			return ret;
+		mfc_debug(2, "plane size: %ld, dst size: %d\n",
+			vb2_plane_size(vb, 0), ctx->enc_dst_buf_size);
+		if (vb2_plane_size(vb, 0) < ctx->enc_dst_buf_size) {
+			mfc_err("plane size is too small for capture\n");
+			return -EINVAL;
+		}
+	} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		ret = check_vb_with_fmt(ctx->src_fmt, vb);
+		if (ret < 0)
+			return ret;
+		mfc_debug(2, "plane size: %ld, luma size: %d\n",
+			vb2_plane_size(vb, 0), ctx->luma_size);
+		mfc_debug(2, "plane size: %ld, chroma size: %d\n",
+			vb2_plane_size(vb, 1), ctx->chroma_size);
+		if (vb2_plane_size(vb, 0) < ctx->luma_size ||
+		    vb2_plane_size(vb, 1) < ctx->chroma_size) {
+			mfc_err("plane size is too small for output\n");
+			return -EINVAL;
+		}
+	} else {
+		mfc_err("inavlid queue type: %d\n", vq->type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned long flags;
+
+	v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+	/* If context is ready then dev = work->data;schedule it to run */
+	if (s5p_mfc_ctx_ready(ctx)) {
+		spin_lock_irqsave(&dev->condlock, flags);
+		set_bit(ctx->num, &dev->ctx_work_bits);
+		spin_unlock_irqrestore(&dev->condlock, flags);
+	}
+	s5p_mfc_try_run(dev);
+	return 0;
+}
+
+static int s5p_mfc_stop_streaming(struct vb2_queue *q)
+{
+	unsigned long flags;
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	if ((ctx->state == MFCINST_FINISHING ||
+		ctx->state == MFCINST_RUNNING) &&
+		dev->curr_ctx == ctx->num && dev->hw_lock) {
+		ctx->state = MFCINST_ABORT;
+		s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_FRAME_DONE_RET,
+					  0);
+	}
+	ctx->state = MFCINST_FINISHED;
+	spin_lock_irqsave(&dev->irqlock, flags);
+	if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+		INIT_LIST_HEAD(&ctx->dst_queue);
+		ctx->dst_queue_cnt = 0;
+	}
+	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		cleanup_ref_queue(ctx);
+		s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+		INIT_LIST_HEAD(&ctx->src_queue);
+		ctx->src_queue_cnt = 0;
+	}
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+	return 0;
+}
+
+static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
+{
+	struct vb2_queue *vq = vb->vb2_queue;
+	struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned long flags;
+	struct s5p_mfc_buf *mfc_buf;
+
+	if (ctx->state == MFCINST_ERROR) {
+		vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+		cleanup_ref_queue(ctx);
+		return;
+	}
+	if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index];
+		mfc_buf->used = 0;
+		/* Mark destination as available for use by MFC */
+		spin_lock_irqsave(&dev->irqlock, flags);
+		list_add_tail(&mfc_buf->list, &ctx->dst_queue);
+		ctx->dst_queue_cnt++;
+		spin_unlock_irqrestore(&dev->irqlock, flags);
+	} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index];
+		mfc_buf->used = 0;
+		spin_lock_irqsave(&dev->irqlock, flags);
+		if (vb->v4l2_planes[0].bytesused == 0) {
+			mfc_debug(1, "change state to FINISHING\n");
+			ctx->state = MFCINST_FINISHING;
+			vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+			cleanup_ref_queue(ctx);
+		} else {
+			list_add_tail(&mfc_buf->list, &ctx->src_queue);
+			ctx->src_queue_cnt++;
+		}
+		spin_unlock_irqrestore(&dev->irqlock, flags);
+	} else {
+		mfc_err("unsupported buffer type (%d)\n", vq->type);
+	}
+	if (s5p_mfc_ctx_ready(ctx)) {
+		spin_lock_irqsave(&dev->condlock, flags);
+		set_bit(ctx->num, &dev->ctx_work_bits);
+		spin_unlock_irqrestore(&dev->condlock, flags);
+	}
+	s5p_mfc_try_run(dev);
+}
+
+static struct vb2_ops s5p_mfc_enc_qops = {
+	.queue_setup		= s5p_mfc_queue_setup,
+	.wait_prepare		= s5p_mfc_unlock,
+	.wait_finish		= s5p_mfc_lock,
+	.buf_init		= s5p_mfc_buf_init,
+	.buf_prepare		= s5p_mfc_buf_prepare,
+	.start_streaming	= s5p_mfc_start_streaming,
+	.stop_streaming		= s5p_mfc_stop_streaming,
+	.buf_queue		= s5p_mfc_buf_queue,
+};
+
+struct s5p_mfc_codec_ops *get_enc_codec_ops(void)
+{
+	return &encoder_codec_ops;
+}
+
+struct vb2_ops *get_enc_queue_ops(void)
+{
+	return &s5p_mfc_enc_qops;
+}
+
+const struct v4l2_ioctl_ops *get_enc_v4l2_ioctl_ops(void)
+{
+	return &s5p_mfc_enc_ioctl_ops;
+}
+
+#define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_MPEG) \
+						&& V4L2_CTRL_DRIVER_PRIV(x))
+
+int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
+{
+	struct v4l2_ctrl_config cfg;
+	int i;
+
+	v4l2_ctrl_handler_init(&ctx->ctrl_handler, NUM_CTRLS);
+	if (ctx->ctrl_handler.error) {
+		mfc_err("v4l2_ctrl_handler_init failed\n");
+		return ctx->ctrl_handler.error;
+	}
+	for (i = 0; i < NUM_CTRLS; i++) {
+		if (IS_MFC51_PRIV(controls[i].id)) {
+			memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
+			cfg.ops = &s5p_mfc_enc_ctrl_ops;
+			cfg.id = controls[i].id;
+			cfg.min = controls[i].minimum;
+			cfg.max = controls[i].maximum;
+			cfg.def = controls[i].default_value;
+			cfg.name = controls[i].name;
+			cfg.type = controls[i].type;
+			cfg.flags = 0;
+
+			if (cfg.type == V4L2_CTRL_TYPE_MENU) {
+				cfg.step = 0;
+				cfg.menu_skip_mask = cfg.menu_skip_mask;
+				cfg.qmenu = mfc51_get_menu(cfg.id);
+			} else {
+				cfg.step = controls[i].step;
+				cfg.menu_skip_mask = 0;
+			}
+			ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+					&cfg, NULL);
+		} else {
+			if (controls[i].type == V4L2_CTRL_TYPE_MENU) {
+				ctx->ctrls[i] = v4l2_ctrl_new_std_menu(
+					&ctx->ctrl_handler,
+					&s5p_mfc_enc_ctrl_ops, controls[i].id,
+					controls[i].maximum, 0,
+					controls[i].default_value);
+			} else {
+				ctx->ctrls[i] = v4l2_ctrl_new_std(
+					&ctx->ctrl_handler,
+					&s5p_mfc_enc_ctrl_ops, controls[i].id,
+					controls[i].minimum,
+					controls[i].maximum, controls[i].step,
+					controls[i].default_value);
+			}
+		}
+		if (ctx->ctrl_handler.error) {
+			mfc_err("Adding control (%d) failed\n", i);
+			return ctx->ctrl_handler.error;
+		}
+		if (controls[i].is_volatile && ctx->ctrls[i])
+			ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE;
+	}
+	return 0;
+}
+
+void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx)
+{
+	int i;
+
+	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+	for (i = 0; i < NUM_CTRLS; i++)
+		ctx->ctrls[i] = NULL;
+}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h
new file mode 100644
index 0000000..ca9fd66
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h
@@ -0,0 +1,23 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_enc.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_ENC_H_
+#define S5P_MFC_ENC_H_
+
+struct s5p_mfc_codec_ops *get_enc_codec_ops(void);
+struct vb2_ops *get_enc_queue_ops(void);
+const struct v4l2_ioctl_ops *get_enc_v4l2_ioctl_ops(void);
+struct s5p_mfc_fmt *get_enc_def_fmt(bool src);
+int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx);
+
+#endif /* S5P_MFC_ENC_H_  */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_intr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_intr.c
new file mode 100644
index 0000000..37860e2
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_intr.c
@@ -0,0 +1,92 @@
+/*
+ * drivers/media/platform/samsung/mfc5/s5p_mfc_intr.c
+ *
+ * C file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * This file contains functions used to wait for command completion.
+ *
+ * Kamil Debski, Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_intr.h"
+
+int s5p_mfc_wait_for_done_dev(struct s5p_mfc_dev *dev, int command)
+{
+	int ret;
+
+	ret = wait_event_interruptible_timeout(dev->queue,
+		(dev->int_cond && (dev->int_type == command
+		|| dev->int_type == S5P_FIMV_R2H_CMD_ERR_RET)),
+		msecs_to_jiffies(MFC_INT_TIMEOUT));
+	if (ret == 0) {
+		mfc_err("Interrupt (dev->int_type:%d, command:%d) timed out\n",
+							dev->int_type, command);
+		return 1;
+	} else if (ret == -ERESTARTSYS) {
+		mfc_err("Interrupted by a signal\n");
+		return 1;
+	}
+	mfc_debug(1, "Finished waiting (dev->int_type:%d, command: %d)\n",
+							dev->int_type, command);
+	if (dev->int_type == S5P_FIMV_R2H_CMD_ERR_RET)
+		return 1;
+	return 0;
+}
+
+void s5p_mfc_clean_dev_int_flags(struct s5p_mfc_dev *dev)
+{
+	dev->int_cond = 0;
+	dev->int_type = 0;
+	dev->int_err = 0;
+}
+
+int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx,
+				    int command, int interrupt)
+{
+	int ret;
+
+	if (interrupt) {
+		ret = wait_event_interruptible_timeout(ctx->queue,
+				(ctx->int_cond && (ctx->int_type == command
+			|| ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)),
+					msecs_to_jiffies(MFC_INT_TIMEOUT));
+	} else {
+		ret = wait_event_timeout(ctx->queue,
+				(ctx->int_cond && (ctx->int_type == command
+			|| ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)),
+					msecs_to_jiffies(MFC_INT_TIMEOUT));
+	}
+	if (ret == 0) {
+		mfc_err("Interrupt (ctx->int_type:%d, command:%d) timed out\n",
+							ctx->int_type, command);
+		return 1;
+	} else if (ret == -ERESTARTSYS) {
+		mfc_err("Interrupted by a signal\n");
+		return 1;
+	}
+	mfc_debug(1, "Finished waiting (ctx->int_type:%d, command: %d)\n",
+							ctx->int_type, command);
+	if (ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)
+		return 1;
+	return 0;
+}
+
+void s5p_mfc_clean_ctx_int_flags(struct s5p_mfc_ctx *ctx)
+{
+	ctx->int_cond = 0;
+	ctx->int_type = 0;
+	ctx->int_err = 0;
+}
+
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_intr.h b/drivers/media/platform/s5p-mfc/s5p_mfc_intr.h
new file mode 100644
index 0000000..18341a8
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_intr.h
@@ -0,0 +1,26 @@
+/*
+ * drivers/media/platform/samsung/mfc5/s5p_mfc_intr.h
+ *
+ * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * It contains waiting functions declarations.
+ *
+ * Kamil Debski, Copyright (C) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S5P_MFC_INTR_H_
+#define S5P_MFC_INTR_H_
+
+#include "s5p_mfc_common.h"
+
+int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx,
+			      int command, int interrupt);
+int s5p_mfc_wait_for_done_dev(struct s5p_mfc_dev *dev, int command);
+void s5p_mfc_clean_ctx_int_flags(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_clean_dev_int_flags(struct s5p_mfc_dev *dev);
+
+#endif /* S5P_MFC_INTR_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
new file mode 100644
index 0000000..b5fd3d4
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
@@ -0,0 +1,1397 @@
+/*
+ * drivers/media/platform/samsung/mfc5/s5p_mfc_opr.c
+ *
+ * Samsung MFC (Multi Function Codec - FIMV) driver
+ * This file contains hw related functions.
+ *
+ * Kamil Debski, Copyright (c) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "regs-mfc.h"
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_ctrl.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_pm.h"
+#include "s5p_mfc_shm.h"
+#include <asm/cacheflush.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#define OFFSETA(x)		(((x) - dev->bank1) >> MFC_OFFSET_SHIFT)
+#define OFFSETB(x)		(((x) - dev->bank2) >> MFC_OFFSET_SHIFT)
+
+/* Allocate temporary buffers for decoding */
+int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx)
+{
+	void *desc_virt;
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	ctx->desc_buf = vb2_dma_contig_memops.alloc(
+			dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], DESC_BUF_SIZE);
+	if (IS_ERR_VALUE((int)ctx->desc_buf)) {
+		ctx->desc_buf = NULL;
+		mfc_err("Allocating DESC buffer failed\n");
+		return -ENOMEM;
+	}
+	ctx->desc_phys = s5p_mfc_mem_cookie(
+			dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->desc_buf);
+	BUG_ON(ctx->desc_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+	desc_virt = vb2_dma_contig_memops.vaddr(ctx->desc_buf);
+	if (desc_virt == NULL) {
+		vb2_dma_contig_memops.put(ctx->desc_buf);
+		ctx->desc_phys = 0;
+		ctx->desc_buf = NULL;
+		mfc_err("Remapping DESC buffer failed\n");
+		return -ENOMEM;
+	}
+	memset(desc_virt, 0, DESC_BUF_SIZE);
+	wmb();
+	return 0;
+}
+
+/* Release temporary buffers for decoding */
+void s5p_mfc_release_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
+{
+	if (ctx->desc_phys) {
+		vb2_dma_contig_memops.put(ctx->desc_buf);
+		ctx->desc_phys = 0;
+		ctx->desc_buf = NULL;
+	}
+}
+
+/* Allocate codec buffers */
+int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned int enc_ref_y_size = 0;
+	unsigned int enc_ref_c_size = 0;
+	unsigned int guard_width, guard_height;
+
+	if (ctx->type == MFCINST_DECODER) {
+		mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n",
+			  ctx->luma_size, ctx->chroma_size, ctx->mv_size);
+		mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count);
+	} else if (ctx->type == MFCINST_ENCODER) {
+		enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
+			* ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
+		enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
+
+		if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) {
+			enc_ref_c_size = ALIGN(ctx->img_width,
+						S5P_FIMV_NV12MT_HALIGN)
+						* ALIGN(ctx->img_height >> 1,
+						S5P_FIMV_NV12MT_VALIGN);
+			enc_ref_c_size = ALIGN(enc_ref_c_size,
+							S5P_FIMV_NV12MT_SALIGN);
+		} else {
+			guard_width = ALIGN(ctx->img_width + 16,
+							S5P_FIMV_NV12MT_HALIGN);
+			guard_height = ALIGN((ctx->img_height >> 1) + 4,
+							S5P_FIMV_NV12MT_VALIGN);
+			enc_ref_c_size = ALIGN(guard_width * guard_height,
+					       S5P_FIMV_NV12MT_SALIGN);
+		}
+		mfc_debug(2, "recon luma size: %d chroma size: %d\n",
+			  enc_ref_y_size, enc_ref_c_size);
+	} else {
+		return -EINVAL;
+	}
+	/* Codecs have different memory requirements */
+	switch (ctx->codec_mode) {
+	case S5P_FIMV_CODEC_H264_DEC:
+		ctx->bank1_size =
+		    ALIGN(S5P_FIMV_DEC_NB_IP_SIZE +
+					S5P_FIMV_DEC_VERT_NB_MV_SIZE,
+					S5P_FIMV_DEC_BUF_ALIGN);
+		ctx->bank2_size = ctx->total_dpb_count * ctx->mv_size;
+		break;
+	case S5P_FIMV_CODEC_MPEG4_DEC:
+		ctx->bank1_size =
+		    ALIGN(S5P_FIMV_DEC_NB_DCAC_SIZE +
+				     S5P_FIMV_DEC_UPNB_MV_SIZE +
+				     S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
+				     S5P_FIMV_DEC_STX_PARSER_SIZE +
+				     S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE,
+				     S5P_FIMV_DEC_BUF_ALIGN);
+		ctx->bank2_size = 0;
+		break;
+	case S5P_FIMV_CODEC_VC1RCV_DEC:
+	case S5P_FIMV_CODEC_VC1_DEC:
+		ctx->bank1_size =
+		    ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
+			     S5P_FIMV_DEC_UPNB_MV_SIZE +
+			     S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
+			     S5P_FIMV_DEC_NB_DCAC_SIZE +
+			     3 * S5P_FIMV_DEC_VC1_BITPLANE_SIZE,
+			     S5P_FIMV_DEC_BUF_ALIGN);
+		ctx->bank2_size = 0;
+		break;
+	case S5P_FIMV_CODEC_MPEG2_DEC:
+		ctx->bank1_size = 0;
+		ctx->bank2_size = 0;
+		break;
+	case S5P_FIMV_CODEC_H263_DEC:
+		ctx->bank1_size =
+		    ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
+			     S5P_FIMV_DEC_UPNB_MV_SIZE +
+			     S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
+			     S5P_FIMV_DEC_NB_DCAC_SIZE,
+			     S5P_FIMV_DEC_BUF_ALIGN);
+		ctx->bank2_size = 0;
+		break;
+	case S5P_FIMV_CODEC_H264_ENC:
+		ctx->bank1_size = (enc_ref_y_size * 2) +
+				   S5P_FIMV_ENC_UPMV_SIZE +
+				   S5P_FIMV_ENC_COLFLG_SIZE +
+				   S5P_FIMV_ENC_INTRAMD_SIZE +
+				   S5P_FIMV_ENC_NBORINFO_SIZE;
+		ctx->bank2_size = (enc_ref_y_size * 2) +
+				   (enc_ref_c_size * 4) +
+				   S5P_FIMV_ENC_INTRAPRED_SIZE;
+		break;
+	case S5P_FIMV_CODEC_MPEG4_ENC:
+		ctx->bank1_size = (enc_ref_y_size * 2) +
+				   S5P_FIMV_ENC_UPMV_SIZE +
+				   S5P_FIMV_ENC_COLFLG_SIZE +
+				   S5P_FIMV_ENC_ACDCCOEF_SIZE;
+		ctx->bank2_size = (enc_ref_y_size * 2) +
+				   (enc_ref_c_size * 4);
+		break;
+	case S5P_FIMV_CODEC_H263_ENC:
+		ctx->bank1_size = (enc_ref_y_size * 2) +
+				   S5P_FIMV_ENC_UPMV_SIZE +
+				   S5P_FIMV_ENC_ACDCCOEF_SIZE;
+		ctx->bank2_size = (enc_ref_y_size * 2) +
+				   (enc_ref_c_size * 4);
+		break;
+	default:
+		break;
+	}
+	/* Allocate only if memory from bank 1 is necessary */
+	if (ctx->bank1_size > 0) {
+		ctx->bank1_buf = vb2_dma_contig_memops.alloc(
+		dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size);
+		if (IS_ERR(ctx->bank1_buf)) {
+			ctx->bank1_buf = NULL;
+			printk(KERN_ERR
+			       "Buf alloc for decoding failed (port A)\n");
+			return -ENOMEM;
+		}
+		ctx->bank1_phys = s5p_mfc_mem_cookie(
+		dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_buf);
+		BUG_ON(ctx->bank1_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+	}
+	/* Allocate only if memory from bank 2 is necessary */
+	if (ctx->bank2_size > 0) {
+		ctx->bank2_buf = vb2_dma_contig_memops.alloc(
+		dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_size);
+		if (IS_ERR(ctx->bank2_buf)) {
+			ctx->bank2_buf = NULL;
+			mfc_err("Buf alloc for decoding failed (port B)\n");
+			return -ENOMEM;
+		}
+		ctx->bank2_phys = s5p_mfc_mem_cookie(
+		dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_buf);
+		BUG_ON(ctx->bank2_phys & ((1 << MFC_BANK2_ALIGN_ORDER) - 1));
+	}
+	return 0;
+}
+
+/* Release buffers allocated for codec */
+void s5p_mfc_release_codec_buffers(struct s5p_mfc_ctx *ctx)
+{
+	if (ctx->bank1_buf) {
+		vb2_dma_contig_memops.put(ctx->bank1_buf);
+		ctx->bank1_buf = NULL;
+		ctx->bank1_phys = 0;
+		ctx->bank1_size = 0;
+	}
+	if (ctx->bank2_buf) {
+		vb2_dma_contig_memops.put(ctx->bank2_buf);
+		ctx->bank2_buf = NULL;
+		ctx->bank2_phys = 0;
+		ctx->bank2_size = 0;
+	}
+}
+
+/* Allocate memory for instance data buffer */
+int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx)
+{
+	void *context_virt;
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC ||
+		ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC)
+		ctx->ctx_size = MFC_H264_CTX_BUF_SIZE;
+	else
+		ctx->ctx_size = MFC_CTX_BUF_SIZE;
+	ctx->ctx_buf = vb2_dma_contig_memops.alloc(
+		dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx_size);
+	if (IS_ERR(ctx->ctx_buf)) {
+		mfc_err("Allocating context buffer failed\n");
+		ctx->ctx_phys = 0;
+		ctx->ctx_buf = NULL;
+		return -ENOMEM;
+	}
+	ctx->ctx_phys = s5p_mfc_mem_cookie(
+		dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx_buf);
+	BUG_ON(ctx->ctx_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+	ctx->ctx_ofs = OFFSETA(ctx->ctx_phys);
+	context_virt = vb2_dma_contig_memops.vaddr(ctx->ctx_buf);
+	if (context_virt == NULL) {
+		mfc_err("Remapping instance buffer failed\n");
+		vb2_dma_contig_memops.put(ctx->ctx_buf);
+		ctx->ctx_phys = 0;
+		ctx->ctx_buf = NULL;
+		return -ENOMEM;
+	}
+	/* Zero content of the allocated memory */
+	memset(context_virt, 0, ctx->ctx_size);
+	wmb();
+	if (s5p_mfc_init_shm(ctx) < 0) {
+		vb2_dma_contig_memops.put(ctx->ctx_buf);
+		ctx->ctx_phys = 0;
+		ctx->ctx_buf = NULL;
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/* Release instance buffer */
+void s5p_mfc_release_instance_buffer(struct s5p_mfc_ctx *ctx)
+{
+	if (ctx->ctx_buf) {
+		vb2_dma_contig_memops.put(ctx->ctx_buf);
+		ctx->ctx_phys = 0;
+		ctx->ctx_buf = NULL;
+	}
+	if (ctx->shm_alloc) {
+		vb2_dma_contig_memops.put(ctx->shm_alloc);
+		ctx->shm_alloc = NULL;
+		ctx->shm = NULL;
+	}
+}
+
+/* Set registers for decoding temporary buffers */
+void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	mfc_write(dev, OFFSETA(ctx->desc_phys), S5P_FIMV_SI_CH0_DESC_ADR);
+	mfc_write(dev, DESC_BUF_SIZE, S5P_FIMV_SI_CH0_DESC_SIZE);
+}
+
+/* Set registers for shared buffer */
+static void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	mfc_write(dev, ctx->shm_ofs, S5P_FIMV_SI_CH0_HOST_WR_ADR);
+}
+
+/* Set registers for decoding stream buffer */
+int s5p_mfc_set_dec_stream_buffer(struct s5p_mfc_ctx *ctx, int buf_addr,
+		  unsigned int start_num_byte, unsigned int buf_size)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	mfc_write(dev, OFFSETA(buf_addr), S5P_FIMV_SI_CH0_SB_ST_ADR);
+	mfc_write(dev, ctx->dec_src_buf_size, S5P_FIMV_SI_CH0_CPB_SIZE);
+	mfc_write(dev, buf_size, S5P_FIMV_SI_CH0_SB_FRM_SIZE);
+	s5p_mfc_write_shm(ctx, start_num_byte, START_BYTE_NUM);
+	return 0;
+}
+
+/* Set decoding frame buffer */
+int s5p_mfc_set_dec_frame_buffer(struct s5p_mfc_ctx *ctx)
+{
+	unsigned int frame_size, i;
+	unsigned int frame_size_ch, frame_size_mv;
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned int dpb;
+	size_t buf_addr1, buf_addr2;
+	int buf_size1, buf_size2;
+
+	buf_addr1 = ctx->bank1_phys;
+	buf_size1 = ctx->bank1_size;
+	buf_addr2 = ctx->bank2_phys;
+	buf_size2 = ctx->bank2_size;
+	dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
+						~S5P_FIMV_DPB_COUNT_MASK;
+	mfc_write(dev, ctx->total_dpb_count | dpb,
+						S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
+	s5p_mfc_set_shared_buffer(ctx);
+	switch (ctx->codec_mode) {
+	case S5P_FIMV_CODEC_H264_DEC:
+		mfc_write(dev, OFFSETA(buf_addr1),
+						S5P_FIMV_H264_VERT_NB_MV_ADR);
+		buf_addr1 += S5P_FIMV_DEC_VERT_NB_MV_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_VERT_NB_MV_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_NB_IP_ADR);
+		buf_addr1 += S5P_FIMV_DEC_NB_IP_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_NB_IP_SIZE;
+		break;
+	case S5P_FIMV_CODEC_MPEG4_DEC:
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_NB_DCAC_ADR);
+		buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_NB_MV_ADR);
+		buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SA_MV_ADR);
+		buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SP_ADR);
+		buf_addr1 += S5P_FIMV_DEC_STX_PARSER_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_STX_PARSER_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_OT_LINE_ADR);
+		buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+		break;
+	case S5P_FIMV_CODEC_H263_DEC:
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_OT_LINE_ADR);
+		buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_NB_MV_ADR);
+		buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_SA_MV_ADR);
+		buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_NB_DCAC_ADR);
+		buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
+		break;
+	case S5P_FIMV_CODEC_VC1_DEC:
+	case S5P_FIMV_CODEC_VC1RCV_DEC:
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_NB_DCAC_ADR);
+		buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_OT_LINE_ADR);
+		buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_UP_NB_MV_ADR);
+		buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_SA_MV_ADR);
+		buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE3_ADR);
+		buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE2_ADR);
+		buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE1_ADR);
+		buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+		buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+		break;
+	case S5P_FIMV_CODEC_MPEG2_DEC:
+		break;
+	default:
+		mfc_err("Unknown codec for decoding (%x)\n",
+			ctx->codec_mode);
+		return -EINVAL;
+		break;
+	}
+	frame_size = ctx->luma_size;
+	frame_size_ch = ctx->chroma_size;
+	frame_size_mv = ctx->mv_size;
+	mfc_debug(2, "Frm size: %d ch: %d mv: %d\n", frame_size, frame_size_ch,
+								frame_size_mv);
+	for (i = 0; i < ctx->total_dpb_count; i++) {
+		/* Bank2 */
+		mfc_debug(2, "Luma %d: %x\n", i,
+					ctx->dst_bufs[i].cookie.raw.luma);
+		mfc_write(dev, OFFSETB(ctx->dst_bufs[i].cookie.raw.luma),
+						S5P_FIMV_DEC_LUMA_ADR + i * 4);
+		mfc_debug(2, "\tChroma %d: %x\n", i,
+					ctx->dst_bufs[i].cookie.raw.chroma);
+		mfc_write(dev, OFFSETA(ctx->dst_bufs[i].cookie.raw.chroma),
+					       S5P_FIMV_DEC_CHROMA_ADR + i * 4);
+		if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) {
+			mfc_debug(2, "\tBuf2: %x, size: %d\n",
+							buf_addr2, buf_size2);
+			mfc_write(dev, OFFSETB(buf_addr2),
+						S5P_FIMV_H264_MV_ADR + i * 4);
+			buf_addr2 += frame_size_mv;
+			buf_size2 -= frame_size_mv;
+		}
+	}
+	mfc_debug(2, "Buf1: %u, buf_size1: %d\n", buf_addr1, buf_size1);
+	mfc_debug(2, "Buf 1/2 size after: %d/%d (frames %d)\n",
+			buf_size1,  buf_size2, ctx->total_dpb_count);
+	if (buf_size1 < 0 || buf_size2 < 0) {
+		mfc_debug(2, "Not enough memory has been allocated\n");
+		return -ENOMEM;
+	}
+	s5p_mfc_write_shm(ctx, frame_size, ALLOC_LUMA_DPB_SIZE);
+	s5p_mfc_write_shm(ctx, frame_size_ch, ALLOC_CHROMA_DPB_SIZE);
+	if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC)
+		s5p_mfc_write_shm(ctx, frame_size_mv, ALLOC_MV_SIZE);
+	mfc_write(dev, ((S5P_FIMV_CH_INIT_BUFS & S5P_FIMV_CH_MASK)
+					<< S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
+						S5P_FIMV_SI_CH0_INST_ID);
+	return 0;
+}
+
+/* Set registers for encoding stream buffer */
+int s5p_mfc_set_enc_stream_buffer(struct s5p_mfc_ctx *ctx,
+		unsigned long addr, unsigned int size)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	mfc_write(dev, OFFSETA(addr), S5P_FIMV_ENC_SI_CH0_SB_ADR);
+	mfc_write(dev, size, S5P_FIMV_ENC_SI_CH0_SB_SIZE);
+	return 0;
+}
+
+void s5p_mfc_set_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
+		unsigned long y_addr, unsigned long c_addr)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	mfc_write(dev, OFFSETB(y_addr), S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR);
+	mfc_write(dev, OFFSETB(c_addr), S5P_FIMV_ENC_SI_CH0_CUR_C_ADR);
+}
+
+void s5p_mfc_get_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
+		unsigned long *y_addr, unsigned long *c_addr)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	*y_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_Y_ADDR)
+							<< MFC_OFFSET_SHIFT);
+	*c_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_C_ADDR)
+							<< MFC_OFFSET_SHIFT);
+}
+
+/* Set encoding ref & codec buffer */
+int s5p_mfc_set_enc_ref_buffer(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	size_t buf_addr1, buf_addr2;
+	size_t buf_size1, buf_size2;
+	unsigned int enc_ref_y_size, enc_ref_c_size;
+	unsigned int guard_width, guard_height;
+	int i;
+
+	buf_addr1 = ctx->bank1_phys;
+	buf_size1 = ctx->bank1_size;
+	buf_addr2 = ctx->bank2_phys;
+	buf_size2 = ctx->bank2_size;
+	enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
+		* ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
+	enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
+	if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) {
+		enc_ref_c_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
+			* ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN);
+		enc_ref_c_size = ALIGN(enc_ref_c_size, S5P_FIMV_NV12MT_SALIGN);
+	} else {
+		guard_width = ALIGN(ctx->img_width + 16,
+						S5P_FIMV_NV12MT_HALIGN);
+		guard_height = ALIGN((ctx->img_height >> 1) + 4,
+						S5P_FIMV_NV12MT_VALIGN);
+		enc_ref_c_size = ALIGN(guard_width * guard_height,
+				       S5P_FIMV_NV12MT_SALIGN);
+	}
+	mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2);
+	switch (ctx->codec_mode) {
+	case S5P_FIMV_CODEC_H264_ENC:
+		for (i = 0; i < 2; i++) {
+			mfc_write(dev, OFFSETA(buf_addr1),
+				S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
+			buf_addr1 += enc_ref_y_size;
+			buf_size1 -= enc_ref_y_size;
+
+			mfc_write(dev, OFFSETB(buf_addr2),
+				S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
+			buf_addr2 += enc_ref_y_size;
+			buf_size2 -= enc_ref_y_size;
+		}
+		for (i = 0; i < 4; i++) {
+			mfc_write(dev, OFFSETB(buf_addr2),
+				S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
+			buf_addr2 += enc_ref_c_size;
+			buf_size2 -= enc_ref_c_size;
+		}
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_UP_MV_ADR);
+		buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
+		buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1),
+					S5P_FIMV_H264_COZERO_FLAG_ADR);
+		buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
+		buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1),
+					S5P_FIMV_H264_UP_INTRA_MD_ADR);
+		buf_addr1 += S5P_FIMV_ENC_INTRAMD_SIZE;
+		buf_size1 -= S5P_FIMV_ENC_INTRAMD_SIZE;
+		mfc_write(dev, OFFSETB(buf_addr2),
+					S5P_FIMV_H264_UP_INTRA_PRED_ADR);
+		buf_addr2 += S5P_FIMV_ENC_INTRAPRED_SIZE;
+		buf_size2 -= S5P_FIMV_ENC_INTRAPRED_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1),
+					S5P_FIMV_H264_NBOR_INFO_ADR);
+		buf_addr1 += S5P_FIMV_ENC_NBORINFO_SIZE;
+		buf_size1 -= S5P_FIMV_ENC_NBORINFO_SIZE;
+		mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
+			buf_size1, buf_size2);
+		break;
+	case S5P_FIMV_CODEC_MPEG4_ENC:
+		for (i = 0; i < 2; i++) {
+			mfc_write(dev, OFFSETA(buf_addr1),
+				S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
+			buf_addr1 += enc_ref_y_size;
+			buf_size1 -= enc_ref_y_size;
+			mfc_write(dev, OFFSETB(buf_addr2),
+				S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
+			buf_addr2 += enc_ref_y_size;
+			buf_size2 -= enc_ref_y_size;
+		}
+		for (i = 0; i < 4; i++) {
+			mfc_write(dev, OFFSETB(buf_addr2),
+				S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
+			buf_addr2 += enc_ref_c_size;
+			buf_size2 -= enc_ref_c_size;
+		}
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_MV_ADR);
+		buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
+		buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1),
+						S5P_FIMV_MPEG4_COZERO_FLAG_ADR);
+		buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
+		buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1),
+						S5P_FIMV_MPEG4_ACDC_COEF_ADR);
+		buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
+		buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
+		mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
+			buf_size1, buf_size2);
+		break;
+	case S5P_FIMV_CODEC_H263_ENC:
+		for (i = 0; i < 2; i++) {
+			mfc_write(dev, OFFSETA(buf_addr1),
+				S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
+			buf_addr1 += enc_ref_y_size;
+			buf_size1 -= enc_ref_y_size;
+			mfc_write(dev, OFFSETB(buf_addr2),
+				S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
+			buf_addr2 += enc_ref_y_size;
+			buf_size2 -= enc_ref_y_size;
+		}
+		for (i = 0; i < 4; i++) {
+			mfc_write(dev, OFFSETB(buf_addr2),
+				S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
+			buf_addr2 += enc_ref_c_size;
+			buf_size2 -= enc_ref_c_size;
+		}
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_MV_ADR);
+		buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
+		buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
+		mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_ACDC_COEF_ADR);
+		buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
+		buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
+		mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
+			buf_size1, buf_size2);
+		break;
+	default:
+		mfc_err("Unknown codec set for encoding: %d\n",
+			ctx->codec_mode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct s5p_mfc_enc_params *p = &ctx->enc_params;
+	unsigned int reg;
+	unsigned int shm;
+
+	/* width */
+	mfc_write(dev, ctx->img_width, S5P_FIMV_ENC_HSIZE_PX);
+	/* height */
+	mfc_write(dev, ctx->img_height, S5P_FIMV_ENC_VSIZE_PX);
+	/* pictype : enable, IDR period */
+	reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+	reg |= (1 << 18);
+	reg &= ~(0xFFFF);
+	reg |= p->gop_size;
+	mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+	mfc_write(dev, 0, S5P_FIMV_ENC_B_RECON_WRITE_ON);
+	/* multi-slice control */
+	/* multi-slice MB number or bit size */
+	mfc_write(dev, p->slice_mode, S5P_FIMV_ENC_MSLICE_CTRL);
+	if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
+		mfc_write(dev, p->slice_mb, S5P_FIMV_ENC_MSLICE_MB);
+	} else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
+		mfc_write(dev, p->slice_bit, S5P_FIMV_ENC_MSLICE_BIT);
+	} else {
+		mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_MB);
+		mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_BIT);
+	}
+	/* cyclic intra refresh */
+	mfc_write(dev, p->intra_refresh_mb, S5P_FIMV_ENC_CIR_CTRL);
+	/* memory structure cur. frame */
+	if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
+		mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
+	else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
+		mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
+	/* padding control & value */
+	reg = mfc_read(dev, S5P_FIMV_ENC_PADDING_CTRL);
+	if (p->pad) {
+		/** enable */
+		reg |= (1 << 31);
+		/** cr value */
+		reg &= ~(0xFF << 16);
+		reg |= (p->pad_cr << 16);
+		/** cb value */
+		reg &= ~(0xFF << 8);
+		reg |= (p->pad_cb << 8);
+		/** y value */
+		reg &= ~(0xFF);
+		reg |= (p->pad_luma);
+	} else {
+		/** disable & all value clear */
+		reg = 0;
+	}
+	mfc_write(dev, reg, S5P_FIMV_ENC_PADDING_CTRL);
+	/* rate control config. */
+	reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+	/** frame-level rate control */
+	reg &= ~(0x1 << 9);
+	reg |= (p->rc_frame << 9);
+	mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+	/* bit rate */
+	if (p->rc_frame)
+		mfc_write(dev, p->rc_bitrate,
+			S5P_FIMV_ENC_RC_BIT_RATE);
+	else
+		mfc_write(dev, 0, S5P_FIMV_ENC_RC_BIT_RATE);
+	/* reaction coefficient */
+	if (p->rc_frame)
+		mfc_write(dev, p->rc_reaction_coeff, S5P_FIMV_ENC_RC_RPARA);
+	shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+	/* seq header ctrl */
+	shm &= ~(0x1 << 3);
+	shm |= (p->seq_hdr_mode << 3);
+	/* frame skip mode */
+	shm &= ~(0x3 << 1);
+	shm |= (p->frame_skip_mode << 1);
+	s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+	/* fixed target bit */
+	s5p_mfc_write_shm(ctx, p->fixed_target_bit, RC_CONTROL_CONFIG);
+	return 0;
+}
+
+static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct s5p_mfc_enc_params *p = &ctx->enc_params;
+	struct s5p_mfc_h264_enc_params *p_264 = &p->codec.h264;
+	unsigned int reg;
+	unsigned int shm;
+
+	s5p_mfc_set_enc_params(ctx);
+	/* pictype : number of B */
+	reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+	/* num_b_frame - 0 ~ 2 */
+	reg &= ~(0x3 << 16);
+	reg |= (p->num_b_frame << 16);
+	mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+	/* profile & level */
+	reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
+	/* level */
+	reg &= ~(0xFF << 8);
+	reg |= (p_264->level << 8);
+	/* profile - 0 ~ 2 */
+	reg &= ~(0x3F);
+	reg |= p_264->profile;
+	mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
+	/* interlace  */
+	mfc_write(dev, p->interlace, S5P_FIMV_ENC_PIC_STRUCT);
+	/* height */
+	if (p->interlace)
+		mfc_write(dev, ctx->img_height >> 1, S5P_FIMV_ENC_VSIZE_PX);
+	/* loopfilter ctrl */
+	mfc_write(dev, p_264->loop_filter_mode, S5P_FIMV_ENC_LF_CTRL);
+	/* loopfilter alpha offset */
+	if (p_264->loop_filter_alpha < 0) {
+		reg = 0x10;
+		reg |= (0xFF - p_264->loop_filter_alpha) + 1;
+	} else {
+		reg = 0x00;
+		reg |= (p_264->loop_filter_alpha & 0xF);
+	}
+	mfc_write(dev, reg, S5P_FIMV_ENC_ALPHA_OFF);
+	/* loopfilter beta offset */
+	if (p_264->loop_filter_beta < 0) {
+		reg = 0x10;
+		reg |= (0xFF - p_264->loop_filter_beta) + 1;
+	} else {
+		reg = 0x00;
+		reg |= (p_264->loop_filter_beta & 0xF);
+	}
+	mfc_write(dev, reg, S5P_FIMV_ENC_BETA_OFF);
+	/* entropy coding mode */
+	if (p_264->entropy_mode == V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC)
+		mfc_write(dev, 1, S5P_FIMV_ENC_H264_ENTROPY_MODE);
+	else
+		mfc_write(dev, 0, S5P_FIMV_ENC_H264_ENTROPY_MODE);
+	/* number of ref. picture */
+	reg = mfc_read(dev, S5P_FIMV_ENC_H264_NUM_OF_REF);
+	/* num of ref. pictures of P */
+	reg &= ~(0x3 << 5);
+	reg |= (p_264->num_ref_pic_4p << 5);
+	/* max number of ref. pictures */
+	reg &= ~(0x1F);
+	reg |= p_264->max_ref_pic;
+	mfc_write(dev, reg, S5P_FIMV_ENC_H264_NUM_OF_REF);
+	/* 8x8 transform enable */
+	mfc_write(dev, p_264->_8x8_transform, S5P_FIMV_ENC_H264_TRANS_FLAG);
+	/* rate control config. */
+	reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+	/* macroblock level rate control */
+	reg &= ~(0x1 << 8);
+	reg |= (p_264->rc_mb << 8);
+	/* frame QP */
+	reg &= ~(0x3F);
+	reg |= p_264->rc_frame_qp;
+	mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+	/* frame rate */
+	if (p->rc_frame && p->rc_framerate_denom)
+		mfc_write(dev, p->rc_framerate_num * 1000
+			/ p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
+	else
+		mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
+	/* max & min value of QP */
+	reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
+	/* max QP */
+	reg &= ~(0x3F << 8);
+	reg |= (p_264->rc_max_qp << 8);
+	/* min QP */
+	reg &= ~(0x3F);
+	reg |= p_264->rc_min_qp;
+	mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
+	/* macroblock adaptive scaling features */
+	if (p_264->rc_mb) {
+		reg = mfc_read(dev, S5P_FIMV_ENC_RC_MB_CTRL);
+		/* dark region */
+		reg &= ~(0x1 << 3);
+		reg |= (p_264->rc_mb_dark << 3);
+		/* smooth region */
+		reg &= ~(0x1 << 2);
+		reg |= (p_264->rc_mb_smooth << 2);
+		/* static region */
+		reg &= ~(0x1 << 1);
+		reg |= (p_264->rc_mb_static << 1);
+		/* high activity region */
+		reg &= ~(0x1);
+		reg |= p_264->rc_mb_activity;
+		mfc_write(dev, reg, S5P_FIMV_ENC_RC_MB_CTRL);
+	}
+	if (!p->rc_frame &&
+	    !p_264->rc_mb) {
+		shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
+		shm &= ~(0xFFF);
+		shm |= ((p_264->rc_b_frame_qp & 0x3F) << 6);
+		shm |= (p_264->rc_p_frame_qp & 0x3F);
+		s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
+	}
+	/* extended encoder ctrl */
+	shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+	/* AR VUI control */
+	shm &= ~(0x1 << 15);
+	shm |= (p_264->vui_sar << 1);
+	s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+	if (p_264->vui_sar) {
+		/* aspect ration IDC */
+		shm = s5p_mfc_read_shm(ctx, SAMPLE_ASPECT_RATIO_IDC);
+		shm &= ~(0xFF);
+		shm |= p_264->vui_sar_idc;
+		s5p_mfc_write_shm(ctx, shm, SAMPLE_ASPECT_RATIO_IDC);
+		if (p_264->vui_sar_idc == 0xFF) {
+			/* sample  AR info */
+			shm = s5p_mfc_read_shm(ctx, EXTENDED_SAR);
+			shm &= ~(0xFFFFFFFF);
+			shm |= p_264->vui_ext_sar_width << 16;
+			shm |= p_264->vui_ext_sar_height;
+			s5p_mfc_write_shm(ctx, shm, EXTENDED_SAR);
+		}
+	}
+	/* intra picture period for H.264 */
+	shm = s5p_mfc_read_shm(ctx, H264_I_PERIOD);
+	/* control */
+	shm &= ~(0x1 << 16);
+	shm |= (p_264->open_gop << 16);
+	/* value */
+	if (p_264->open_gop) {
+		shm &= ~(0xFFFF);
+		shm |= p_264->open_gop_size;
+	}
+	s5p_mfc_write_shm(ctx, shm, H264_I_PERIOD);
+	/* extended encoder ctrl */
+	shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+	/* vbv buffer size */
+	if (p->frame_skip_mode ==
+			V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+		shm &= ~(0xFFFF << 16);
+		shm |= (p_264->cpb_size << 16);
+	}
+	s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+	return 0;
+}
+
+static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct s5p_mfc_enc_params *p = &ctx->enc_params;
+	struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
+	unsigned int reg;
+	unsigned int shm;
+	unsigned int framerate;
+
+	s5p_mfc_set_enc_params(ctx);
+	/* pictype : number of B */
+	reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+	/* num_b_frame - 0 ~ 2 */
+	reg &= ~(0x3 << 16);
+	reg |= (p->num_b_frame << 16);
+	mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+	/* profile & level */
+	reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
+	/* level */
+	reg &= ~(0xFF << 8);
+	reg |= (p_mpeg4->level << 8);
+	/* profile - 0 ~ 2 */
+	reg &= ~(0x3F);
+	reg |= p_mpeg4->profile;
+	mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
+	/* quarter_pixel */
+	mfc_write(dev, p_mpeg4->quarter_pixel, S5P_FIMV_ENC_MPEG4_QUART_PXL);
+	/* qp */
+	if (!p->rc_frame) {
+		shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
+		shm &= ~(0xFFF);
+		shm |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 6);
+		shm |= (p_mpeg4->rc_p_frame_qp & 0x3F);
+		s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
+	}
+	/* frame rate */
+	if (p->rc_frame) {
+		if (p->rc_framerate_denom > 0) {
+			framerate = p->rc_framerate_num * 1000 /
+						p->rc_framerate_denom;
+			mfc_write(dev, framerate,
+				S5P_FIMV_ENC_RC_FRAME_RATE);
+			shm = s5p_mfc_read_shm(ctx, RC_VOP_TIMING);
+			shm &= ~(0xFFFFFFFF);
+			shm |= (1 << 31);
+			shm |= ((p->rc_framerate_num & 0x7FFF) << 16);
+			shm |= (p->rc_framerate_denom & 0xFFFF);
+			s5p_mfc_write_shm(ctx, shm, RC_VOP_TIMING);
+		}
+	} else {
+		mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
+	}
+	/* rate control config. */
+	reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+	/* frame QP */
+	reg &= ~(0x3F);
+	reg |= p_mpeg4->rc_frame_qp;
+	mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+	/* max & min value of QP */
+	reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
+	/* max QP */
+	reg &= ~(0x3F << 8);
+	reg |= (p_mpeg4->rc_max_qp << 8);
+	/* min QP */
+	reg &= ~(0x3F);
+	reg |= p_mpeg4->rc_min_qp;
+	mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
+	/* extended encoder ctrl */
+	shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+	/* vbv buffer size */
+	if (p->frame_skip_mode ==
+			V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+		shm &= ~(0xFFFF << 16);
+		shm |= (p->vbv_size << 16);
+	}
+	s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+	return 0;
+}
+
+static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct s5p_mfc_enc_params *p = &ctx->enc_params;
+	struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
+	unsigned int reg;
+	unsigned int shm;
+
+	s5p_mfc_set_enc_params(ctx);
+	/* qp */
+	if (!p->rc_frame) {
+		shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
+		shm &= ~(0xFFF);
+		shm |= (p_h263->rc_p_frame_qp & 0x3F);
+		s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
+	}
+	/* frame rate */
+	if (p->rc_frame && p->rc_framerate_denom)
+		mfc_write(dev, p->rc_framerate_num * 1000
+			/ p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
+	else
+		mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
+	/* rate control config. */
+	reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+	/* frame QP */
+	reg &= ~(0x3F);
+	reg |= p_h263->rc_frame_qp;
+	mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+	/* max & min value of QP */
+	reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
+	/* max QP */
+	reg &= ~(0x3F << 8);
+	reg |= (p_h263->rc_max_qp << 8);
+	/* min QP */
+	reg &= ~(0x3F);
+	reg |= p_h263->rc_min_qp;
+	mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
+	/* extended encoder ctrl */
+	shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+	/* vbv buffer size */
+	if (p->frame_skip_mode ==
+			V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+		shm &= ~(0xFFFF << 16);
+		shm |= (p->vbv_size << 16);
+	}
+	s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+	return 0;
+}
+
+/* Initialize decoding */
+int s5p_mfc_init_decode(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	s5p_mfc_set_shared_buffer(ctx);
+	/* Setup loop filter, for decoding this is only valid for MPEG4 */
+	if (ctx->codec_mode == S5P_FIMV_CODEC_MPEG4_DEC)
+		mfc_write(dev, ctx->loop_filter_mpeg4, S5P_FIMV_ENC_LF_CTRL);
+	else
+		mfc_write(dev, 0, S5P_FIMV_ENC_LF_CTRL);
+	mfc_write(dev, ((ctx->slice_interface & S5P_FIMV_SLICE_INT_MASK) <<
+		S5P_FIMV_SLICE_INT_SHIFT) | (ctx->display_delay_enable <<
+		S5P_FIMV_DDELAY_ENA_SHIFT) | ((ctx->display_delay &
+		S5P_FIMV_DDELAY_VAL_MASK) << S5P_FIMV_DDELAY_VAL_SHIFT),
+		S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
+	mfc_write(dev,
+	((S5P_FIMV_CH_SEQ_HEADER & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT)
+				| (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+	return 0;
+}
+
+static void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned int dpb;
+
+	if (flush)
+		dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) | (
+			S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
+	else
+		dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
+			~(S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
+	mfc_write(dev, dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
+}
+
+/* Decode a single frame */
+int s5p_mfc_decode_one_frame(struct s5p_mfc_ctx *ctx,
+					enum s5p_mfc_decode_arg last_frame)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	mfc_write(dev, ctx->dec_dst_flag, S5P_FIMV_SI_CH0_RELEASE_BUF);
+	s5p_mfc_set_shared_buffer(ctx);
+	s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag);
+	/* Issue different commands to instance basing on whether it
+	 * is the last frame or not. */
+	switch (last_frame) {
+	case MFC_DEC_FRAME:
+		mfc_write(dev, ((S5P_FIMV_CH_FRAME_START & S5P_FIMV_CH_MASK) <<
+		S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+		break;
+	case MFC_DEC_LAST_FRAME:
+		mfc_write(dev, ((S5P_FIMV_CH_LAST_FRAME & S5P_FIMV_CH_MASK) <<
+		S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+		break;
+	case MFC_DEC_RES_CHANGE:
+		mfc_write(dev, ((S5P_FIMV_CH_FRAME_START_REALLOC &
+		S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
+		S5P_FIMV_SI_CH0_INST_ID);
+		break;
+	}
+	mfc_debug(2, "Decoding a usual frame\n");
+	return 0;
+}
+
+int s5p_mfc_init_encode(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC)
+		s5p_mfc_set_enc_params_h264(ctx);
+	else if (ctx->codec_mode == S5P_FIMV_CODEC_MPEG4_ENC)
+		s5p_mfc_set_enc_params_mpeg4(ctx);
+	else if (ctx->codec_mode == S5P_FIMV_CODEC_H263_ENC)
+		s5p_mfc_set_enc_params_h263(ctx);
+	else {
+		mfc_err("Unknown codec for encoding (%x)\n",
+			ctx->codec_mode);
+		return -EINVAL;
+	}
+	s5p_mfc_set_shared_buffer(ctx);
+	mfc_write(dev, ((S5P_FIMV_CH_SEQ_HEADER << 16) & 0x70000) |
+		(ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+	return 0;
+}
+
+/* Encode a single frame */
+int s5p_mfc_encode_one_frame(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	/* memory structure cur. frame */
+	if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
+		mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
+	else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
+		mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
+	s5p_mfc_set_shared_buffer(ctx);
+	mfc_write(dev, (S5P_FIMV_CH_FRAME_START << 16 & 0x70000) |
+		(ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+	return 0;
+}
+
+static int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev)
+{
+	unsigned long flags;
+	int new_ctx;
+	int cnt;
+
+	spin_lock_irqsave(&dev->condlock, flags);
+	new_ctx = (dev->curr_ctx + 1) % MFC_NUM_CONTEXTS;
+	cnt = 0;
+	while (!test_bit(new_ctx, &dev->ctx_work_bits)) {
+		new_ctx = (new_ctx + 1) % MFC_NUM_CONTEXTS;
+		if (++cnt > MFC_NUM_CONTEXTS) {
+			/* No contexts to run */
+			spin_unlock_irqrestore(&dev->condlock, flags);
+			return -EAGAIN;
+		}
+	}
+	spin_unlock_irqrestore(&dev->condlock, flags);
+	return new_ctx;
+}
+
+static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+
+	s5p_mfc_set_dec_stream_buffer(ctx, 0, 0, 0);
+	dev->curr_ctx = ctx->num;
+	s5p_mfc_clean_ctx_int_flags(ctx);
+	s5p_mfc_decode_one_frame(ctx, MFC_DEC_RES_CHANGE);
+}
+
+static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	struct s5p_mfc_buf *temp_vb;
+	unsigned long flags;
+	unsigned int index;
+
+	spin_lock_irqsave(&dev->irqlock, flags);
+	/* Frames are being decoded */
+	if (list_empty(&ctx->src_queue)) {
+		mfc_debug(2, "No src buffers\n");
+		spin_unlock_irqrestore(&dev->irqlock, flags);
+		return -EAGAIN;
+	}
+	/* Get the next source buffer */
+	temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+	temp_vb->used = 1;
+	s5p_mfc_set_dec_stream_buffer(ctx,
+		vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), ctx->consumed_stream,
+					temp_vb->b->v4l2_planes[0].bytesused);
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+	index = temp_vb->b->v4l2_buf.index;
+	dev->curr_ctx = ctx->num;
+	s5p_mfc_clean_ctx_int_flags(ctx);
+	if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
+		last_frame = MFC_DEC_LAST_FRAME;
+		mfc_debug(2, "Setting ctx->state to FINISHING\n");
+		ctx->state = MFCINST_FINISHING;
+	}
+	s5p_mfc_decode_one_frame(ctx, last_frame);
+	return 0;
+}
+
+static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned long flags;
+	struct s5p_mfc_buf *dst_mb;
+	struct s5p_mfc_buf *src_mb;
+	unsigned long src_y_addr, src_c_addr, dst_addr;
+	unsigned int dst_size;
+
+	spin_lock_irqsave(&dev->irqlock, flags);
+	if (list_empty(&ctx->src_queue)) {
+		mfc_debug(2, "no src buffers\n");
+		spin_unlock_irqrestore(&dev->irqlock, flags);
+		return -EAGAIN;
+	}
+	if (list_empty(&ctx->dst_queue)) {
+		mfc_debug(2, "no dst buffers\n");
+		spin_unlock_irqrestore(&dev->irqlock, flags);
+		return -EAGAIN;
+	}
+	src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+	src_mb->used = 1;
+	src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
+	src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
+	s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr, src_c_addr);
+	dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+	dst_mb->used = 1;
+	dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
+	dst_size = vb2_plane_size(dst_mb->b, 0);
+	s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+	dev->curr_ctx = ctx->num;
+	s5p_mfc_clean_ctx_int_flags(ctx);
+	s5p_mfc_encode_one_frame(ctx);
+	return 0;
+}
+
+static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned long flags;
+	struct s5p_mfc_buf *temp_vb;
+
+	/* Initializing decoding - parsing header */
+	spin_lock_irqsave(&dev->irqlock, flags);
+	mfc_debug(2, "Preparing to init decoding\n");
+	temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+	s5p_mfc_set_dec_desc_buffer(ctx);
+	mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
+	s5p_mfc_set_dec_stream_buffer(ctx,
+				vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
+				0, temp_vb->b->v4l2_planes[0].bytesused);
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+	dev->curr_ctx = ctx->num;
+	s5p_mfc_clean_ctx_int_flags(ctx);
+	s5p_mfc_init_decode(ctx);
+}
+
+static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned long flags;
+	struct s5p_mfc_buf *dst_mb;
+	unsigned long dst_addr;
+	unsigned int dst_size;
+
+	s5p_mfc_set_enc_ref_buffer(ctx);
+	spin_lock_irqsave(&dev->irqlock, flags);
+	dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+	dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
+	dst_size = vb2_plane_size(dst_mb->b, 0);
+	s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+	dev->curr_ctx = ctx->num;
+	s5p_mfc_clean_ctx_int_flags(ctx);
+	s5p_mfc_init_encode(ctx);
+}
+
+static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	unsigned long flags;
+	struct s5p_mfc_buf *temp_vb;
+	int ret;
+
+	/*
+	 * Header was parsed now starting processing
+	 * First set the output frame buffers
+	 */
+	if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
+		mfc_err("It seems that not all destionation buffers were "
+			"mmaped\nMFC requires that all destination are mmaped "
+			"before starting processing\n");
+		return -EAGAIN;
+	}
+	spin_lock_irqsave(&dev->irqlock, flags);
+	if (list_empty(&ctx->src_queue)) {
+		mfc_err("Header has been deallocated in the middle of"
+			" initialization\n");
+		spin_unlock_irqrestore(&dev->irqlock, flags);
+		return -EIO;
+	}
+	temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+	mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
+	s5p_mfc_set_dec_stream_buffer(ctx,
+				vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
+				0, temp_vb->b->v4l2_planes[0].bytesused);
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+	dev->curr_ctx = ctx->num;
+	s5p_mfc_clean_ctx_int_flags(ctx);
+	ret = s5p_mfc_set_dec_frame_buffer(ctx);
+	if (ret) {
+		mfc_err("Failed to alloc frame mem\n");
+		ctx->state = MFCINST_ERROR;
+	}
+	return ret;
+}
+
+/* Try running an operation on hardware */
+void s5p_mfc_try_run(struct s5p_mfc_dev *dev)
+{
+	struct s5p_mfc_ctx *ctx;
+	int new_ctx;
+	unsigned int ret = 0;
+
+	if (test_bit(0, &dev->enter_suspend)) {
+		mfc_debug(1, "Entering suspend so do not schedule any jobs\n");
+		return;
+	}
+	/* Check whether hardware is not running */
+	if (test_and_set_bit(0, &dev->hw_lock) != 0) {
+		/* This is perfectly ok, the scheduled ctx should wait */
+		mfc_debug(1, "Couldn't lock HW\n");
+		return;
+	}
+	/* Choose the context to run */
+	new_ctx = s5p_mfc_get_new_ctx(dev);
+	if (new_ctx < 0) {
+		/* No contexts to run */
+		if (test_and_clear_bit(0, &dev->hw_lock) == 0) {
+			mfc_err("Failed to unlock hardware\n");
+			return;
+		}
+		mfc_debug(1, "No ctx is scheduled to be run\n");
+		return;
+	}
+	ctx = dev->ctx[new_ctx];
+	/* Got context to run in ctx */
+	/*
+	 * Last frame has already been sent to MFC.
+	 * Now obtaining frames from MFC buffer
+	 */
+	s5p_mfc_clock_on();
+	if (ctx->type == MFCINST_DECODER) {
+		s5p_mfc_set_dec_desc_buffer(ctx);
+		switch (ctx->state) {
+		case MFCINST_FINISHING:
+			s5p_mfc_run_dec_frame(ctx, MFC_DEC_LAST_FRAME);
+			break;
+		case MFCINST_RUNNING:
+			ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
+			break;
+		case MFCINST_INIT:
+			s5p_mfc_clean_ctx_int_flags(ctx);
+			ret = s5p_mfc_open_inst_cmd(ctx);
+			break;
+		case MFCINST_RETURN_INST:
+			s5p_mfc_clean_ctx_int_flags(ctx);
+			ret = s5p_mfc_close_inst_cmd(ctx);
+			break;
+		case MFCINST_GOT_INST:
+			s5p_mfc_run_init_dec(ctx);
+			break;
+		case MFCINST_HEAD_PARSED:
+			ret = s5p_mfc_run_init_dec_buffers(ctx);
+			mfc_debug(1, "head parsed\n");
+			break;
+		case MFCINST_RES_CHANGE_INIT:
+			s5p_mfc_run_res_change(ctx);
+			break;
+		case MFCINST_RES_CHANGE_FLUSH:
+			s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
+			break;
+		case MFCINST_RES_CHANGE_END:
+			mfc_debug(2, "Finished remaining frames after resolution change\n");
+			ctx->capture_state = QUEUE_FREE;
+			mfc_debug(2, "Will re-init the codec\n");
+			s5p_mfc_run_init_dec(ctx);
+			break;
+		default:
+			ret = -EAGAIN;
+		}
+	} else if (ctx->type == MFCINST_ENCODER) {
+		switch (ctx->state) {
+		case MFCINST_FINISHING:
+		case MFCINST_RUNNING:
+			ret = s5p_mfc_run_enc_frame(ctx);
+			break;
+		case MFCINST_INIT:
+			s5p_mfc_clean_ctx_int_flags(ctx);
+			ret = s5p_mfc_open_inst_cmd(ctx);
+			break;
+		case MFCINST_RETURN_INST:
+			s5p_mfc_clean_ctx_int_flags(ctx);
+			ret = s5p_mfc_close_inst_cmd(ctx);
+			break;
+		case MFCINST_GOT_INST:
+			s5p_mfc_run_init_enc(ctx);
+			break;
+		default:
+			ret = -EAGAIN;
+		}
+	} else {
+		mfc_err("Invalid context type: %d\n", ctx->type);
+		ret = -EAGAIN;
+	}
+
+	if (ret) {
+		/* Free hardware lock */
+		if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+			mfc_err("Failed to unlock hardware\n");
+
+		/* This is in deed imporant, as no operation has been
+		 * scheduled, reduce the clock count as no one will
+		 * ever do this, because no interrupt related to this try_run
+		 * will ever come from hardware. */
+		s5p_mfc_clock_off();
+	}
+}
+
+
+void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq)
+{
+	struct s5p_mfc_buf *b;
+	int i;
+
+	while (!list_empty(lh)) {
+		b = list_entry(lh->next, struct s5p_mfc_buf, list);
+		for (i = 0; i < b->b->num_planes; i++)
+			vb2_set_plane_payload(b->b, i, 0);
+		vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR);
+		list_del(&b->list);
+	}
+}
+
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
new file mode 100644
index 0000000..2ad3def
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
@@ -0,0 +1,93 @@
+/*
+ * drivers/media/platform/samsung/mfc5/s5p_mfc_opr.h
+ *
+ * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * Contains declarations of hw related functions.
+ *
+ * Kamil Debski, Copyright (C) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S5P_MFC_OPR_H_
+#define S5P_MFC_OPR_H_
+
+#include "s5p_mfc_common.h"
+
+int s5p_mfc_init_decode(struct s5p_mfc_ctx *ctx);
+int s5p_mfc_init_encode(struct s5p_mfc_ctx *mfc_ctx);
+
+/* Decoding functions */
+int s5p_mfc_set_dec_frame_buffer(struct s5p_mfc_ctx *ctx);
+int s5p_mfc_set_dec_stream_buffer(struct s5p_mfc_ctx *ctx, int buf_addr,
+						  unsigned int start_num_byte,
+						  unsigned int buf_size);
+
+/* Encoding functions */
+void s5p_mfc_set_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
+		unsigned long y_addr, unsigned long c_addr);
+int s5p_mfc_set_enc_stream_buffer(struct s5p_mfc_ctx *ctx,
+		unsigned long addr, unsigned int size);
+void s5p_mfc_get_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
+		unsigned long *y_addr, unsigned long *c_addr);
+int s5p_mfc_set_enc_ref_buffer(struct s5p_mfc_ctx *mfc_ctx);
+
+int s5p_mfc_decode_one_frame(struct s5p_mfc_ctx *ctx,
+					enum s5p_mfc_decode_arg last_frame);
+int s5p_mfc_encode_one_frame(struct s5p_mfc_ctx *mfc_ctx);
+
+/* Memory allocation */
+int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_release_dec_desc_buffer(struct s5p_mfc_ctx *ctx);
+
+int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_release_codec_buffers(struct s5p_mfc_ctx *ctx);
+
+int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_release_instance_buffer(struct s5p_mfc_ctx *ctx);
+
+void s5p_mfc_try_run(struct s5p_mfc_dev *dev);
+void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq);
+
+#define s5p_mfc_get_dspl_y_adr()	(readl(dev->regs_base + \
+					S5P_FIMV_SI_DISPLAY_Y_ADR) << \
+					MFC_OFFSET_SHIFT)
+#define s5p_mfc_get_dec_y_adr()		(readl(dev->regs_base + \
+					S5P_FIMV_SI_DECODE_Y_ADR) << \
+					MFC_OFFSET_SHIFT)
+#define s5p_mfc_get_dspl_status()	readl(dev->regs_base + \
+						S5P_FIMV_SI_DISPLAY_STATUS)
+#define s5p_mfc_get_dec_status()	readl(dev->regs_base + \
+						S5P_FIMV_SI_DECODE_STATUS)
+#define s5p_mfc_get_frame_type()	(readl(dev->regs_base + \
+						S5P_FIMV_DECODE_FRAME_TYPE) \
+					& S5P_FIMV_DECODE_FRAME_MASK)
+#define s5p_mfc_get_consumed_stream()	readl(dev->regs_base + \
+						S5P_FIMV_SI_CONSUMED_BYTES)
+#define s5p_mfc_get_int_reason()	(readl(dev->regs_base + \
+					S5P_FIMV_RISC2HOST_CMD) & \
+					S5P_FIMV_RISC2HOST_CMD_MASK)
+#define s5p_mfc_get_int_err()		readl(dev->regs_base + \
+						S5P_FIMV_RISC2HOST_ARG2)
+#define s5p_mfc_err_dec(x)		(((x) & S5P_FIMV_ERR_DEC_MASK) >> \
+							S5P_FIMV_ERR_DEC_SHIFT)
+#define s5p_mfc_err_dspl(x)		(((x) & S5P_FIMV_ERR_DSPL_MASK) >> \
+							S5P_FIMV_ERR_DSPL_SHIFT)
+#define s5p_mfc_get_img_width()		readl(dev->regs_base + \
+						S5P_FIMV_SI_HRESOL)
+#define s5p_mfc_get_img_height()	readl(dev->regs_base + \
+						S5P_FIMV_SI_VRESOL)
+#define s5p_mfc_get_dpb_count()		readl(dev->regs_base + \
+						S5P_FIMV_SI_BUF_NUMBER)
+#define s5p_mfc_get_inst_no()		readl(dev->regs_base + \
+						S5P_FIMV_RISC2HOST_ARG1)
+#define s5p_mfc_get_enc_strm_size()	readl(dev->regs_base + \
+						S5P_FIMV_ENC_SI_STRM_SIZE)
+#define s5p_mfc_get_enc_slice_type()	readl(dev->regs_base + \
+						S5P_FIMV_ENC_SI_SLICE_TYPE)
+
+#endif /* S5P_MFC_OPR_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
new file mode 100644
index 0000000..0503d14
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -0,0 +1,137 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_pm.h"
+
+#define MFC_CLKNAME		"sclk_mfc"
+#define MFC_GATE_CLK_NAME	"mfc"
+
+#define CLK_DEBUG
+
+static struct s5p_mfc_pm *pm;
+static struct s5p_mfc_dev *p_dev;
+
+#ifdef CLK_DEBUG
+atomic_t clk_ref;
+#endif
+
+int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
+{
+	int ret = 0;
+
+	pm = &dev->pm;
+	p_dev = dev;
+	pm->clock_gate = clk_get(&dev->plat_dev->dev, MFC_GATE_CLK_NAME);
+	if (IS_ERR(pm->clock_gate)) {
+		mfc_err("Failed to get clock-gating control\n");
+		ret = PTR_ERR(pm->clock_gate);
+		goto err_g_ip_clk;
+	}
+
+	ret = clk_prepare(pm->clock_gate);
+	if (ret) {
+		mfc_err("Failed to preapre clock-gating control\n");
+		goto err_p_ip_clk;
+	}
+
+	pm->clock = clk_get(&dev->plat_dev->dev, MFC_CLKNAME);
+	if (IS_ERR(pm->clock)) {
+		mfc_err("Failed to get MFC clock\n");
+		ret = PTR_ERR(pm->clock);
+		goto err_g_ip_clk_2;
+	}
+
+	ret = clk_prepare(pm->clock);
+	if (ret) {
+		mfc_err("Failed to prepare MFC clock\n");
+		goto err_p_ip_clk_2;
+	}
+
+	atomic_set(&pm->power, 0);
+#ifdef CONFIG_PM_RUNTIME
+	pm->device = &dev->plat_dev->dev;
+	pm_runtime_enable(pm->device);
+#endif
+#ifdef CLK_DEBUG
+	atomic_set(&clk_ref, 0);
+#endif
+	return 0;
+err_p_ip_clk_2:
+	clk_put(pm->clock);
+err_g_ip_clk_2:
+	clk_unprepare(pm->clock_gate);
+err_p_ip_clk:
+	clk_put(pm->clock_gate);
+err_g_ip_clk:
+	return ret;
+}
+
+void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
+{
+	clk_unprepare(pm->clock_gate);
+	clk_put(pm->clock_gate);
+	clk_unprepare(pm->clock);
+	clk_put(pm->clock);
+#ifdef CONFIG_PM_RUNTIME
+	pm_runtime_disable(pm->device);
+#endif
+}
+
+int s5p_mfc_clock_on(void)
+{
+	int ret;
+#ifdef CLK_DEBUG
+	atomic_inc(&clk_ref);
+	mfc_debug(3, "+ %d", atomic_read(&clk_ref));
+#endif
+	ret = clk_enable(pm->clock_gate);
+	return ret;
+}
+
+void s5p_mfc_clock_off(void)
+{
+#ifdef CLK_DEBUG
+	atomic_dec(&clk_ref);
+	mfc_debug(3, "- %d", atomic_read(&clk_ref));
+#endif
+	clk_disable(pm->clock_gate);
+}
+
+int s5p_mfc_power_on(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+	return pm_runtime_get_sync(pm->device);
+#else
+	atomic_set(&pm->power, 1);
+	return 0;
+#endif
+}
+
+int s5p_mfc_power_off(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+	return pm_runtime_put_sync(pm->device);
+#else
+	atomic_set(&pm->power, 0);
+	return 0;
+#endif
+}
+
+
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.h b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.h
new file mode 100644
index 0000000..875c534
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.h
@@ -0,0 +1,24 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_pm.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_PM_H_
+#define S5P_MFC_PM_H_
+
+int s5p_mfc_init_pm(struct s5p_mfc_dev *dev);
+void s5p_mfc_final_pm(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_clock_on(void);
+void s5p_mfc_clock_off(void);
+int s5p_mfc_power_on(void);
+int s5p_mfc_power_off(void);
+
+#endif /* S5P_MFC_PM_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_shm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_shm.c
new file mode 100644
index 0000000..b5933d2
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_shm.c
@@ -0,0 +1,47 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_shm.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifdef CONFIG_ARCH_EXYNOS4
+#include <linux/dma-mapping.h>
+#endif
+#include <linux/io.h>
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+
+int s5p_mfc_init_shm(struct s5p_mfc_ctx *ctx)
+{
+	struct s5p_mfc_dev *dev = ctx->dev;
+	void *shm_alloc_ctx = dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+
+	ctx->shm_alloc = vb2_dma_contig_memops.alloc(shm_alloc_ctx,
+							SHARED_BUF_SIZE);
+	if (IS_ERR(ctx->shm_alloc)) {
+		mfc_err("failed to allocate shared memory\n");
+		return PTR_ERR(ctx->shm_alloc);
+	}
+	/* shm_ofs only keeps the offset from base (port a) */
+	ctx->shm_ofs = s5p_mfc_mem_cookie(shm_alloc_ctx, ctx->shm_alloc)
+								- dev->bank1;
+	BUG_ON(ctx->shm_ofs & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+	ctx->shm = vb2_dma_contig_memops.vaddr(ctx->shm_alloc);
+	if (!ctx->shm) {
+		vb2_dma_contig_memops.put(ctx->shm_alloc);
+		ctx->shm_ofs = 0;
+		ctx->shm_alloc = NULL;
+		mfc_err("failed to virt addr of shared memory\n");
+		return -ENOMEM;
+	}
+	memset((void *)ctx->shm, 0, SHARED_BUF_SIZE);
+	wmb();
+	return 0;
+}
+
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_shm.h b/drivers/media/platform/s5p-mfc/s5p_mfc_shm.h
new file mode 100644
index 0000000..416ebd7
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_shm.h
@@ -0,0 +1,90 @@
+/*
+ * linux/drivers/media/platform/s5p-mfc/s5p_mfc_shm.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_SHM_H_
+#define S5P_MFC_SHM_H_
+
+enum MFC_SHM_OFS {
+	EXTENEDED_DECODE_STATUS	= 0x00,	/* D */
+	SET_FRAME_TAG		= 0x04, /* D */
+	GET_FRAME_TAG_TOP	= 0x08, /* D */
+	GET_FRAME_TAG_BOT	= 0x0C, /* D */
+	PIC_TIME_TOP		= 0x10, /* D */
+	PIC_TIME_BOT		= 0x14, /* D */
+	START_BYTE_NUM		= 0x18, /* D */
+
+	CROP_INFO_H		= 0x20, /* D */
+	CROP_INFO_V		= 0x24, /* D */
+	EXT_ENC_CONTROL		= 0x28,	/* E */
+	ENC_PARAM_CHANGE	= 0x2C,	/* E */
+	RC_VOP_TIMING		= 0x30,	/* E, MPEG4 */
+	HEC_PERIOD		= 0x34,	/* E, MPEG4 */
+	METADATA_ENABLE		= 0x38, /* C */
+	METADATA_STATUS		= 0x3C, /* C */
+	METADATA_DISPLAY_INDEX	= 0x40,	/* C */
+	EXT_METADATA_START_ADDR	= 0x44, /* C */
+	PUT_EXTRADATA		= 0x48, /* C */
+	EXTRADATA_ADDR		= 0x4C, /* C */
+
+	ALLOC_LUMA_DPB_SIZE	= 0x64,	/* D */
+	ALLOC_CHROMA_DPB_SIZE	= 0x68,	/* D */
+	ALLOC_MV_SIZE		= 0x6C,	/* D */
+	P_B_FRAME_QP		= 0x70,	/* E */
+	SAMPLE_ASPECT_RATIO_IDC	= 0x74, /* E, H.264, depend on
+				ASPECT_RATIO_VUI_ENABLE in EXT_ENC_CONTROL */
+	EXTENDED_SAR		= 0x78, /* E, H.264, depned on
+				ASPECT_RATIO_VUI_ENABLE in EXT_ENC_CONTROL */
+	DISP_PIC_PROFILE	= 0x7C, /* D */
+	FLUSH_CMD_TYPE		= 0x80, /* C */
+	FLUSH_CMD_INBUF1	= 0x84, /* C */
+	FLUSH_CMD_INBUF2	= 0x88, /* C */
+	FLUSH_CMD_OUTBUF	= 0x8C, /* E */
+	NEW_RC_BIT_RATE		= 0x90, /* E, format as RC_BIT_RATE(0xC5A8)
+			depend on RC_BIT_RATE_CHANGE in ENC_PARAM_CHANGE */
+	NEW_RC_FRAME_RATE	= 0x94, /* E, format as RC_FRAME_RATE(0xD0D0)
+			depend on RC_FRAME_RATE_CHANGE in ENC_PARAM_CHANGE */
+	NEW_I_PERIOD		= 0x98, /* E, format as I_FRM_CTRL(0xC504)
+			depend on I_PERIOD_CHANGE in ENC_PARAM_CHANGE */
+	H264_I_PERIOD		= 0x9C, /* E, H.264, open GOP */
+	RC_CONTROL_CONFIG	= 0xA0, /* E */
+	BATCH_INPUT_ADDR	= 0xA4, /* E */
+	BATCH_OUTPUT_ADDR	= 0xA8, /* E */
+	BATCH_OUTPUT_SIZE	= 0xAC, /* E */
+	MIN_LUMA_DPB_SIZE	= 0xB0, /* D */
+	DEVICE_FORMAT_ID	= 0xB4, /* C */
+	H264_POC_TYPE		= 0xB8, /* D */
+	MIN_CHROMA_DPB_SIZE	= 0xBC, /* D */
+	DISP_PIC_FRAME_TYPE	= 0xC0, /* D */
+	FREE_LUMA_DPB		= 0xC4, /* D, VC1 MPEG4 */
+	ASPECT_RATIO_INFO	= 0xC8, /* D, MPEG4 */
+	EXTENDED_PAR		= 0xCC, /* D, MPEG4 */
+	DBG_HISTORY_INPUT0	= 0xD0, /* C */
+	DBG_HISTORY_INPUT1	= 0xD4,	/* C */
+	DBG_HISTORY_OUTPUT	= 0xD8,	/* C */
+	HIERARCHICAL_P_QP	= 0xE0, /* E, H.264 */
+};
+
+int s5p_mfc_init_shm(struct s5p_mfc_ctx *ctx);
+
+#define s5p_mfc_write_shm(ctx, x, ofs)		\
+	do {					\
+		writel(x, (ctx->shm + ofs));	\
+		wmb();				\
+	} while (0)
+
+static inline u32 s5p_mfc_read_shm(struct s5p_mfc_ctx *ctx, unsigned int ofs)
+{
+	rmb();
+	return readl(ctx->shm + ofs);
+}
+
+#endif /* S5P_MFC_SHM_H_ */
diff --git a/drivers/media/platform/s5p-tv/Kconfig b/drivers/media/platform/s5p-tv/Kconfig
new file mode 100644
index 0000000..ea11a51
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/Kconfig
@@ -0,0 +1,86 @@
+# drivers/media/platform/s5p-tv/Kconfig
+#
+# Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+#	http://www.samsung.com/
+# Tomasz Stanislawski <t.stanislaws@samsung.com>
+#
+# Licensed under GPL
+
+config VIDEO_SAMSUNG_S5P_TV
+	bool "Samsung TV driver for S5P platform (experimental)"
+	depends on PLAT_S5P && PM_RUNTIME
+	depends on EXPERIMENTAL
+	default n
+	---help---
+	  Say Y here to enable selecting the TV output devices for
+	  Samsung S5P platform.
+
+if VIDEO_SAMSUNG_S5P_TV
+
+config VIDEO_SAMSUNG_S5P_HDMI
+	tristate "Samsung HDMI Driver"
+	depends on VIDEO_V4L2
+	depends on VIDEO_SAMSUNG_S5P_TV
+	select VIDEO_SAMSUNG_S5P_HDMIPHY
+	help
+	  Say Y here if you want support for the HDMI output
+	  interface in S5P Samsung SoC. The driver can be compiled
+	  as module. It is an auxiliary driver, that exposes a V4L2
+	  subdev for use by other drivers. This driver requires
+	  hdmiphy driver to work correctly.
+
+config VIDEO_SAMSUNG_S5P_HDMI_DEBUG
+	bool "Enable debug for HDMI Driver"
+	depends on VIDEO_SAMSUNG_S5P_HDMI
+	default n
+	help
+	  Enables debugging for HDMI driver.
+
+config VIDEO_SAMSUNG_S5P_HDMIPHY
+	tristate "Samsung HDMIPHY Driver"
+	depends on VIDEO_DEV && VIDEO_V4L2 && I2C
+	depends on VIDEO_SAMSUNG_S5P_TV
+	help
+	  Say Y here if you want support for the physical HDMI
+	  interface in S5P Samsung SoC. The driver can be compiled
+	  as module. It is an I2C driver, that exposes a V4L2
+	  subdev for use by other drivers.
+
+config VIDEO_SAMSUNG_S5P_SII9234
+	tristate "Samsung SII9234 Driver"
+	depends on VIDEO_DEV && VIDEO_V4L2 && I2C
+	depends on VIDEO_SAMSUNG_S5P_TV
+	help
+	  Say Y here if you want support for the MHL interface
+	  in S5P Samsung SoC. The driver can be compiled
+	  as module. It is an I2C driver, that exposes a V4L2
+	  subdev for use by other drivers.
+
+config VIDEO_SAMSUNG_S5P_SDO
+	tristate "Samsung Analog TV Driver"
+	depends on VIDEO_DEV && VIDEO_V4L2
+	depends on VIDEO_SAMSUNG_S5P_TV
+	help
+	  Say Y here if you want support for the analog TV output
+	  interface in S5P Samsung SoC. The driver can be compiled
+	  as module. It is an auxiliary driver, that exposes a V4L2
+	  subdev for use by other drivers. This driver requires
+	  hdmiphy driver to work correctly.
+
+config VIDEO_SAMSUNG_S5P_MIXER
+	tristate "Samsung Mixer and Video Processor Driver"
+	depends on VIDEO_DEV && VIDEO_V4L2
+	depends on VIDEO_SAMSUNG_S5P_TV
+	select VIDEOBUF2_DMA_CONTIG
+	help
+	  Say Y here if you want support for the Mixer in Samsung S5P SoCs.
+	  This device produce image data to one of output interfaces.
+
+config VIDEO_SAMSUNG_S5P_MIXER_DEBUG
+	bool "Enable debug for Mixer Driver"
+	depends on VIDEO_SAMSUNG_S5P_MIXER
+	default n
+	help
+	  Enables debugging for Mixer driver.
+
+endif # VIDEO_SAMSUNG_S5P_TV
diff --git a/drivers/media/platform/s5p-tv/Makefile b/drivers/media/platform/s5p-tv/Makefile
new file mode 100644
index 0000000..7cd4790
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/Makefile
@@ -0,0 +1,19 @@
+# drivers/media/platform/samsung/tvout/Makefile
+#
+# Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+#	http://www.samsung.com/
+# Tomasz Stanislawski <t.stanislaws@samsung.com>
+#
+# Licensed under GPL
+
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_HDMIPHY) += s5p-hdmiphy.o
+s5p-hdmiphy-y += hdmiphy_drv.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_SII9234) += s5p-sii9234.o
+s5p-sii9234-y += sii9234_drv.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_HDMI) += s5p-hdmi.o
+s5p-hdmi-y += hdmi_drv.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_SDO) += s5p-sdo.o
+s5p-sdo-y += sdo_drv.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MIXER) += s5p-mixer.o
+s5p-mixer-y += mixer_drv.o mixer_video.o mixer_reg.o mixer_grp_layer.o mixer_vp_layer.o
+
diff --git a/drivers/media/platform/s5p-tv/hdmi_drv.c b/drivers/media/platform/s5p-tv/hdmi_drv.c
new file mode 100644
index 0000000..20cb6ee
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/hdmi_drv.c
@@ -0,0 +1,1007 @@
+/*
+ * Samsung HDMI interface driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#ifdef CONFIG_VIDEO_SAMSUNG_S5P_HDMI_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/bug.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/s5p_hdmi.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+
+#include "regs-hdmi.h"
+
+MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
+MODULE_DESCRIPTION("Samsung HDMI");
+MODULE_LICENSE("GPL");
+
+/* default preset configured on probe */
+#define HDMI_DEFAULT_PRESET V4L2_DV_480P59_94
+
+struct hdmi_pulse {
+	u32 beg;
+	u32 end;
+};
+
+struct hdmi_timings {
+	struct hdmi_pulse hact;
+	u32 hsyn_pol; /* 0 - high, 1 - low */
+	struct hdmi_pulse hsyn;
+	u32 interlaced;
+	struct hdmi_pulse vact[2];
+	u32 vsyn_pol; /* 0 - high, 1 - low */
+	u32 vsyn_off;
+	struct hdmi_pulse vsyn[2];
+};
+
+struct hdmi_resources {
+	struct clk *hdmi;
+	struct clk *sclk_hdmi;
+	struct clk *sclk_pixel;
+	struct clk *sclk_hdmiphy;
+	struct clk *hdmiphy;
+	struct regulator_bulk_data *regul_bulk;
+	int regul_count;
+};
+
+struct hdmi_device {
+	/** base address of HDMI registers */
+	void __iomem *regs;
+	/** HDMI interrupt */
+	unsigned int irq;
+	/** pointer to device parent */
+	struct device *dev;
+	/** subdev generated by HDMI device */
+	struct v4l2_subdev sd;
+	/** V4L2 device structure */
+	struct v4l2_device v4l2_dev;
+	/** subdev of HDMIPHY interface */
+	struct v4l2_subdev *phy_sd;
+	/** subdev of MHL interface */
+	struct v4l2_subdev *mhl_sd;
+	/** configuration of current graphic mode */
+	const struct hdmi_timings *cur_conf;
+	/** flag indicating that timings are dirty */
+	int cur_conf_dirty;
+	/** current preset */
+	u32 cur_preset;
+	/** other resources */
+	struct hdmi_resources res;
+};
+
+static struct platform_device_id hdmi_driver_types[] = {
+	{
+		.name		= "s5pv210-hdmi",
+	}, {
+		.name		= "exynos4-hdmi",
+	}, {
+		/* end node */
+	}
+};
+
+static const struct v4l2_subdev_ops hdmi_sd_ops;
+
+static struct hdmi_device *sd_to_hdmi_dev(struct v4l2_subdev *sd)
+{
+	return container_of(sd, struct hdmi_device, sd);
+}
+
+static inline
+void hdmi_write(struct hdmi_device *hdev, u32 reg_id, u32 value)
+{
+	writel(value, hdev->regs + reg_id);
+}
+
+static inline
+void hdmi_write_mask(struct hdmi_device *hdev, u32 reg_id, u32 value, u32 mask)
+{
+	u32 old = readl(hdev->regs + reg_id);
+	value = (value & mask) | (old & ~mask);
+	writel(value, hdev->regs + reg_id);
+}
+
+static inline
+void hdmi_writeb(struct hdmi_device *hdev, u32 reg_id, u8 value)
+{
+	writeb(value, hdev->regs + reg_id);
+}
+
+static inline
+void hdmi_writebn(struct hdmi_device *hdev, u32 reg_id, int n, u32 value)
+{
+	switch (n) {
+	default:
+		writeb(value >> 24, hdev->regs + reg_id + 12);
+	case 3:
+		writeb(value >> 16, hdev->regs + reg_id + 8);
+	case 2:
+		writeb(value >>  8, hdev->regs + reg_id + 4);
+	case 1:
+		writeb(value >>  0, hdev->regs + reg_id + 0);
+	}
+}
+
+static inline u32 hdmi_read(struct hdmi_device *hdev, u32 reg_id)
+{
+	return readl(hdev->regs + reg_id);
+}
+
+static irqreturn_t hdmi_irq_handler(int irq, void *dev_data)
+{
+	struct hdmi_device *hdev = dev_data;
+	u32 intc_flag;
+
+	(void)irq;
+	intc_flag = hdmi_read(hdev, HDMI_INTC_FLAG);
+	/* clearing flags for HPD plug/unplug */
+	if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
+		printk(KERN_INFO "unplugged\n");
+		hdmi_write_mask(hdev, HDMI_INTC_FLAG, ~0,
+			HDMI_INTC_FLAG_HPD_UNPLUG);
+	}
+	if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
+		printk(KERN_INFO "plugged\n");
+		hdmi_write_mask(hdev, HDMI_INTC_FLAG, ~0,
+			HDMI_INTC_FLAG_HPD_PLUG);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void hdmi_reg_init(struct hdmi_device *hdev)
+{
+	/* enable HPD interrupts */
+	hdmi_write_mask(hdev, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
+		HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+	/* choose DVI mode */
+	hdmi_write_mask(hdev, HDMI_MODE_SEL,
+		HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
+	hdmi_write_mask(hdev, HDMI_CON_2, ~0,
+		HDMI_DVI_PERAMBLE_EN | HDMI_DVI_BAND_EN);
+	/* disable bluescreen */
+	hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
+	/* choose bluescreen (fecal) color */
+	hdmi_writeb(hdev, HDMI_BLUE_SCREEN_0, 0x12);
+	hdmi_writeb(hdev, HDMI_BLUE_SCREEN_1, 0x34);
+	hdmi_writeb(hdev, HDMI_BLUE_SCREEN_2, 0x56);
+}
+
+static void hdmi_timing_apply(struct hdmi_device *hdev,
+	const struct hdmi_timings *t)
+{
+	/* setting core registers */
+	hdmi_writebn(hdev, HDMI_H_BLANK_0, 2, t->hact.beg);
+	hdmi_writebn(hdev, HDMI_H_SYNC_GEN_0, 3,
+		(t->hsyn_pol << 20) | (t->hsyn.end << 10) | t->hsyn.beg);
+	hdmi_writeb(hdev, HDMI_VSYNC_POL, t->vsyn_pol);
+	hdmi_writebn(hdev, HDMI_V_BLANK_0, 3,
+		(t->vact[0].beg << 11) | t->vact[0].end);
+	hdmi_writebn(hdev, HDMI_V_SYNC_GEN_1_0, 3,
+		(t->vsyn[0].beg << 12) | t->vsyn[0].end);
+	if (t->interlaced) {
+		u32 vsyn_trans = t->hsyn.beg + t->vsyn_off;
+
+		hdmi_writeb(hdev, HDMI_INT_PRO_MODE, 1);
+		hdmi_writebn(hdev, HDMI_H_V_LINE_0, 3,
+			(t->hact.end << 12) | t->vact[1].end);
+		hdmi_writebn(hdev, HDMI_V_BLANK_F_0, 3,
+			(t->vact[1].end << 11) | t->vact[1].beg);
+		hdmi_writebn(hdev, HDMI_V_SYNC_GEN_2_0, 3,
+			(t->vsyn[1].beg << 12) | t->vsyn[1].end);
+		hdmi_writebn(hdev, HDMI_V_SYNC_GEN_3_0, 3,
+			(vsyn_trans << 12) | vsyn_trans);
+	} else {
+		hdmi_writeb(hdev, HDMI_INT_PRO_MODE, 0);
+		hdmi_writebn(hdev, HDMI_H_V_LINE_0, 3,
+			(t->hact.end << 12) | t->vact[0].end);
+	}
+
+	/* Timing generator registers */
+	hdmi_writebn(hdev, HDMI_TG_H_FSZ_L, 2, t->hact.end);
+	hdmi_writebn(hdev, HDMI_TG_HACT_ST_L, 2, t->hact.beg);
+	hdmi_writebn(hdev, HDMI_TG_HACT_SZ_L, 2, t->hact.end - t->hact.beg);
+	hdmi_writebn(hdev, HDMI_TG_VSYNC_L, 2, t->vsyn[0].beg);
+	hdmi_writebn(hdev, HDMI_TG_VACT_ST_L, 2, t->vact[0].beg);
+	hdmi_writebn(hdev, HDMI_TG_VACT_SZ_L, 2,
+		t->vact[0].end - t->vact[0].beg);
+	hdmi_writebn(hdev, HDMI_TG_VSYNC_TOP_HDMI_L, 2, t->vsyn[0].beg);
+	hdmi_writebn(hdev, HDMI_TG_FIELD_TOP_HDMI_L, 2, t->vsyn[0].beg);
+	if (t->interlaced) {
+		hdmi_write_mask(hdev, HDMI_TG_CMD, ~0, HDMI_TG_FIELD_EN);
+		hdmi_writebn(hdev, HDMI_TG_V_FSZ_L, 2, t->vact[1].end);
+		hdmi_writebn(hdev, HDMI_TG_VSYNC2_L, 2, t->vsyn[1].beg);
+		hdmi_writebn(hdev, HDMI_TG_FIELD_CHG_L, 2, t->vact[0].end);
+		hdmi_writebn(hdev, HDMI_TG_VACT_ST2_L, 2, t->vact[1].beg);
+		hdmi_writebn(hdev, HDMI_TG_VSYNC_BOT_HDMI_L, 2, t->vsyn[1].beg);
+		hdmi_writebn(hdev, HDMI_TG_FIELD_BOT_HDMI_L, 2, t->vsyn[1].beg);
+	} else {
+		hdmi_write_mask(hdev, HDMI_TG_CMD, 0, HDMI_TG_FIELD_EN);
+		hdmi_writebn(hdev, HDMI_TG_V_FSZ_L, 2, t->vact[0].end);
+	}
+}
+
+static int hdmi_conf_apply(struct hdmi_device *hdmi_dev)
+{
+	struct device *dev = hdmi_dev->dev;
+	const struct hdmi_timings *conf = hdmi_dev->cur_conf;
+	struct v4l2_dv_preset preset;
+	int ret;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	/* skip if conf is already synchronized with HW */
+	if (!hdmi_dev->cur_conf_dirty)
+		return 0;
+
+	/* reset hdmiphy */
+	hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
+	mdelay(10);
+	hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT,  0, HDMI_PHY_SW_RSTOUT);
+	mdelay(10);
+
+	/* configure presets */
+	preset.preset = hdmi_dev->cur_preset;
+	ret = v4l2_subdev_call(hdmi_dev->phy_sd, video, s_dv_preset, &preset);
+	if (ret) {
+		dev_err(dev, "failed to set preset (%u)\n", preset.preset);
+		return ret;
+	}
+
+	/* resetting HDMI core */
+	hdmi_write_mask(hdmi_dev, HDMI_CORE_RSTOUT,  0, HDMI_CORE_SW_RSTOUT);
+	mdelay(10);
+	hdmi_write_mask(hdmi_dev, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT);
+	mdelay(10);
+
+	hdmi_reg_init(hdmi_dev);
+
+	/* setting core registers */
+	hdmi_timing_apply(hdmi_dev, conf);
+
+	hdmi_dev->cur_conf_dirty = 0;
+
+	return 0;
+}
+
+static void hdmi_dumpregs(struct hdmi_device *hdev, char *prefix)
+{
+#define DUMPREG(reg_id) \
+	dev_dbg(hdev->dev, "%s:" #reg_id " = %08x\n", prefix, \
+		readl(hdev->regs + reg_id))
+
+	dev_dbg(hdev->dev, "%s: ---- CONTROL REGISTERS ----\n", prefix);
+	DUMPREG(HDMI_INTC_FLAG);
+	DUMPREG(HDMI_INTC_CON);
+	DUMPREG(HDMI_HPD_STATUS);
+	DUMPREG(HDMI_PHY_RSTOUT);
+	DUMPREG(HDMI_PHY_VPLL);
+	DUMPREG(HDMI_PHY_CMU);
+	DUMPREG(HDMI_CORE_RSTOUT);
+
+	dev_dbg(hdev->dev, "%s: ---- CORE REGISTERS ----\n", prefix);
+	DUMPREG(HDMI_CON_0);
+	DUMPREG(HDMI_CON_1);
+	DUMPREG(HDMI_CON_2);
+	DUMPREG(HDMI_SYS_STATUS);
+	DUMPREG(HDMI_PHY_STATUS);
+	DUMPREG(HDMI_STATUS_EN);
+	DUMPREG(HDMI_HPD);
+	DUMPREG(HDMI_MODE_SEL);
+	DUMPREG(HDMI_HPD_GEN);
+	DUMPREG(HDMI_DC_CONTROL);
+	DUMPREG(HDMI_VIDEO_PATTERN_GEN);
+
+	dev_dbg(hdev->dev, "%s: ---- CORE SYNC REGISTERS ----\n", prefix);
+	DUMPREG(HDMI_H_BLANK_0);
+	DUMPREG(HDMI_H_BLANK_1);
+	DUMPREG(HDMI_V_BLANK_0);
+	DUMPREG(HDMI_V_BLANK_1);
+	DUMPREG(HDMI_V_BLANK_2);
+	DUMPREG(HDMI_H_V_LINE_0);
+	DUMPREG(HDMI_H_V_LINE_1);
+	DUMPREG(HDMI_H_V_LINE_2);
+	DUMPREG(HDMI_VSYNC_POL);
+	DUMPREG(HDMI_INT_PRO_MODE);
+	DUMPREG(HDMI_V_BLANK_F_0);
+	DUMPREG(HDMI_V_BLANK_F_1);
+	DUMPREG(HDMI_V_BLANK_F_2);
+	DUMPREG(HDMI_H_SYNC_GEN_0);
+	DUMPREG(HDMI_H_SYNC_GEN_1);
+	DUMPREG(HDMI_H_SYNC_GEN_2);
+	DUMPREG(HDMI_V_SYNC_GEN_1_0);
+	DUMPREG(HDMI_V_SYNC_GEN_1_1);
+	DUMPREG(HDMI_V_SYNC_GEN_1_2);
+	DUMPREG(HDMI_V_SYNC_GEN_2_0);
+	DUMPREG(HDMI_V_SYNC_GEN_2_1);
+	DUMPREG(HDMI_V_SYNC_GEN_2_2);
+	DUMPREG(HDMI_V_SYNC_GEN_3_0);
+	DUMPREG(HDMI_V_SYNC_GEN_3_1);
+	DUMPREG(HDMI_V_SYNC_GEN_3_2);
+
+	dev_dbg(hdev->dev, "%s: ---- TG REGISTERS ----\n", prefix);
+	DUMPREG(HDMI_TG_CMD);
+	DUMPREG(HDMI_TG_H_FSZ_L);
+	DUMPREG(HDMI_TG_H_FSZ_H);
+	DUMPREG(HDMI_TG_HACT_ST_L);
+	DUMPREG(HDMI_TG_HACT_ST_H);
+	DUMPREG(HDMI_TG_HACT_SZ_L);
+	DUMPREG(HDMI_TG_HACT_SZ_H);
+	DUMPREG(HDMI_TG_V_FSZ_L);
+	DUMPREG(HDMI_TG_V_FSZ_H);
+	DUMPREG(HDMI_TG_VSYNC_L);
+	DUMPREG(HDMI_TG_VSYNC_H);
+	DUMPREG(HDMI_TG_VSYNC2_L);
+	DUMPREG(HDMI_TG_VSYNC2_H);
+	DUMPREG(HDMI_TG_VACT_ST_L);
+	DUMPREG(HDMI_TG_VACT_ST_H);
+	DUMPREG(HDMI_TG_VACT_SZ_L);
+	DUMPREG(HDMI_TG_VACT_SZ_H);
+	DUMPREG(HDMI_TG_FIELD_CHG_L);
+	DUMPREG(HDMI_TG_FIELD_CHG_H);
+	DUMPREG(HDMI_TG_VACT_ST2_L);
+	DUMPREG(HDMI_TG_VACT_ST2_H);
+	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
+	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
+	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
+	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
+	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
+	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
+	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
+	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
+#undef DUMPREG
+}
+
+static const struct hdmi_timings hdmi_timings_480p = {
+	.hact = { .beg = 138, .end = 858 },
+	.hsyn_pol = 1,
+	.hsyn = { .beg = 16, .end = 16 + 62 },
+	.interlaced = 0,
+	.vact[0] = { .beg = 42 + 3, .end = 522 + 3 },
+	.vsyn_pol = 1,
+	.vsyn[0] = { .beg = 6 + 3, .end = 12 + 3},
+};
+
+static const struct hdmi_timings hdmi_timings_576p50 = {
+	.hact = { .beg = 144, .end = 864 },
+	.hsyn_pol = 1,
+	.hsyn = { .beg = 12, .end = 12 + 64 },
+	.interlaced = 0,
+	.vact[0] = { .beg = 44 + 5, .end = 620 + 5 },
+	.vsyn_pol = 1,
+	.vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
+};
+
+static const struct hdmi_timings hdmi_timings_720p60 = {
+	.hact = { .beg = 370, .end = 1650 },
+	.hsyn_pol = 0,
+	.hsyn = { .beg = 110, .end = 110 + 40 },
+	.interlaced = 0,
+	.vact[0] = { .beg = 25 + 5, .end = 745 + 5 },
+	.vsyn_pol = 0,
+	.vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
+};
+
+static const struct hdmi_timings hdmi_timings_720p50 = {
+	.hact = { .beg = 700, .end = 1980 },
+	.hsyn_pol = 0,
+	.hsyn = { .beg = 440, .end = 440 + 40 },
+	.interlaced = 0,
+	.vact[0] = { .beg = 25 + 5, .end = 745 + 5 },
+	.vsyn_pol = 0,
+	.vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
+};
+
+static const struct hdmi_timings hdmi_timings_1080p24 = {
+	.hact = { .beg = 830, .end = 2750 },
+	.hsyn_pol = 0,
+	.hsyn = { .beg = 638, .end = 638 + 44 },
+	.interlaced = 0,
+	.vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
+	.vsyn_pol = 0,
+	.vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
+};
+
+static const struct hdmi_timings hdmi_timings_1080p60 = {
+	.hact = { .beg = 280, .end = 2200 },
+	.hsyn_pol = 0,
+	.hsyn = { .beg = 88, .end = 88 + 44 },
+	.interlaced = 0,
+	.vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
+	.vsyn_pol = 0,
+	.vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
+};
+
+static const struct hdmi_timings hdmi_timings_1080i60 = {
+	.hact = { .beg = 280, .end = 2200 },
+	.hsyn_pol = 0,
+	.hsyn = { .beg = 88, .end = 88 + 44 },
+	.interlaced = 1,
+	.vact[0] = { .beg = 20 + 2, .end = 560 + 2 },
+	.vact[1] = { .beg = 583 + 2, .end = 1123 + 2 },
+	.vsyn_pol = 0,
+	.vsyn_off = 1100,
+	.vsyn[0] = { .beg = 0 + 2, .end = 5 + 2},
+	.vsyn[1] = { .beg = 562 + 2, .end = 567 + 2},
+};
+
+static const struct hdmi_timings hdmi_timings_1080i50 = {
+	.hact = { .beg = 720, .end = 2640 },
+	.hsyn_pol = 0,
+	.hsyn = { .beg = 528, .end = 528 + 44 },
+	.interlaced = 1,
+	.vact[0] = { .beg = 20 + 2, .end = 560 + 2 },
+	.vact[1] = { .beg = 583 + 2, .end = 1123 + 2 },
+	.vsyn_pol = 0,
+	.vsyn_off = 1320,
+	.vsyn[0] = { .beg = 0 + 2, .end = 5 + 2},
+	.vsyn[1] = { .beg = 562 + 2, .end = 567 + 2},
+};
+
+static const struct hdmi_timings hdmi_timings_1080p50 = {
+	.hact = { .beg = 720, .end = 2640 },
+	.hsyn_pol = 0,
+	.hsyn = { .beg = 528, .end = 528 + 44 },
+	.interlaced = 0,
+	.vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
+	.vsyn_pol = 0,
+	.vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
+};
+
+static const struct {
+	u32 preset;
+	const struct hdmi_timings *timings;
+} hdmi_timings[] = {
+	{ V4L2_DV_480P59_94, &hdmi_timings_480p },
+	{ V4L2_DV_576P50, &hdmi_timings_576p50 },
+	{ V4L2_DV_720P50, &hdmi_timings_720p50 },
+	{ V4L2_DV_720P59_94, &hdmi_timings_720p60 },
+	{ V4L2_DV_720P60, &hdmi_timings_720p60 },
+	{ V4L2_DV_1080P24, &hdmi_timings_1080p24 },
+	{ V4L2_DV_1080P30, &hdmi_timings_1080p60 },
+	{ V4L2_DV_1080P50, &hdmi_timings_1080p50 },
+	{ V4L2_DV_1080I50, &hdmi_timings_1080i50 },
+	{ V4L2_DV_1080I60, &hdmi_timings_1080i60 },
+	{ V4L2_DV_1080P60, &hdmi_timings_1080p60 },
+};
+
+static const struct hdmi_timings *hdmi_preset2timings(u32 preset)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hdmi_timings); ++i)
+		if (hdmi_timings[i].preset == preset)
+			return  hdmi_timings[i].timings;
+	return NULL;
+}
+
+static int hdmi_streamon(struct hdmi_device *hdev)
+{
+	struct device *dev = hdev->dev;
+	struct hdmi_resources *res = &hdev->res;
+	int ret, tries;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	ret = hdmi_conf_apply(hdev);
+	if (ret)
+		return ret;
+
+	ret = v4l2_subdev_call(hdev->phy_sd, video, s_stream, 1);
+	if (ret)
+		return ret;
+
+	/* waiting for HDMIPHY's PLL to get to steady state */
+	for (tries = 100; tries; --tries) {
+		u32 val = hdmi_read(hdev, HDMI_PHY_STATUS);
+		if (val & HDMI_PHY_STATUS_READY)
+			break;
+		mdelay(1);
+	}
+	/* steady state not achieved */
+	if (tries == 0) {
+		dev_err(dev, "hdmiphy's pll could not reach steady state.\n");
+		v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
+		hdmi_dumpregs(hdev, "hdmiphy - s_stream");
+		return -EIO;
+	}
+
+	/* starting MHL */
+	ret = v4l2_subdev_call(hdev->mhl_sd, video, s_stream, 1);
+	if (hdev->mhl_sd && ret) {
+		v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
+		hdmi_dumpregs(hdev, "mhl - s_stream");
+		return -EIO;
+	}
+
+	/* hdmiphy clock is used for HDMI in streaming mode */
+	clk_disable(res->sclk_hdmi);
+	clk_set_parent(res->sclk_hdmi, res->sclk_hdmiphy);
+	clk_enable(res->sclk_hdmi);
+
+	/* enable HDMI and timing generator */
+	hdmi_write_mask(hdev, HDMI_CON_0, ~0, HDMI_EN);
+	hdmi_write_mask(hdev, HDMI_TG_CMD, ~0, HDMI_TG_EN);
+	hdmi_dumpregs(hdev, "streamon");
+	return 0;
+}
+
+static int hdmi_streamoff(struct hdmi_device *hdev)
+{
+	struct device *dev = hdev->dev;
+	struct hdmi_resources *res = &hdev->res;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_EN);
+	hdmi_write_mask(hdev, HDMI_TG_CMD, 0, HDMI_TG_EN);
+
+	/* pixel(vpll) clock is used for HDMI in config mode */
+	clk_disable(res->sclk_hdmi);
+	clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
+	clk_enable(res->sclk_hdmi);
+
+	v4l2_subdev_call(hdev->mhl_sd, video, s_stream, 0);
+	v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
+
+	hdmi_dumpregs(hdev, "streamoff");
+	return 0;
+}
+
+static int hdmi_s_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+	struct device *dev = hdev->dev;
+
+	dev_dbg(dev, "%s(%d)\n", __func__, enable);
+	if (enable)
+		return hdmi_streamon(hdev);
+	return hdmi_streamoff(hdev);
+}
+
+static void hdmi_resource_poweron(struct hdmi_resources *res)
+{
+	/* turn HDMI power on */
+	regulator_bulk_enable(res->regul_count, res->regul_bulk);
+	/* power-on hdmi physical interface */
+	clk_enable(res->hdmiphy);
+	/* use VPP as parent clock; HDMIPHY is not working yet */
+	clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
+	/* turn clocks on */
+	clk_enable(res->sclk_hdmi);
+}
+
+static void hdmi_resource_poweroff(struct hdmi_resources *res)
+{
+	/* turn clocks off */
+	clk_disable(res->sclk_hdmi);
+	/* power-off hdmiphy */
+	clk_disable(res->hdmiphy);
+	/* turn HDMI power off */
+	regulator_bulk_disable(res->regul_count, res->regul_bulk);
+}
+
+static int hdmi_s_power(struct v4l2_subdev *sd, int on)
+{
+	struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+	int ret;
+
+	if (on)
+		ret = pm_runtime_get_sync(hdev->dev);
+	else
+		ret = pm_runtime_put_sync(hdev->dev);
+	/* only values < 0 indicate errors */
+	return IS_ERR_VALUE(ret) ? ret : 0;
+}
+
+static int hdmi_s_dv_preset(struct v4l2_subdev *sd,
+	struct v4l2_dv_preset *preset)
+{
+	struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+	struct device *dev = hdev->dev;
+	const struct hdmi_timings *conf;
+
+	conf = hdmi_preset2timings(preset->preset);
+	if (conf == NULL) {
+		dev_err(dev, "preset (%u) not supported\n", preset->preset);
+		return -EINVAL;
+	}
+	hdev->cur_conf = conf;
+	hdev->cur_conf_dirty = 1;
+	hdev->cur_preset = preset->preset;
+	return 0;
+}
+
+static int hdmi_g_dv_preset(struct v4l2_subdev *sd,
+	struct v4l2_dv_preset *preset)
+{
+	memset(preset, 0, sizeof(*preset));
+	preset->preset = sd_to_hdmi_dev(sd)->cur_preset;
+	return 0;
+}
+
+static int hdmi_g_mbus_fmt(struct v4l2_subdev *sd,
+	  struct v4l2_mbus_framefmt *fmt)
+{
+	struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+	const struct hdmi_timings *t = hdev->cur_conf;
+
+	dev_dbg(hdev->dev, "%s\n", __func__);
+	if (!hdev->cur_conf)
+		return -EINVAL;
+	memset(fmt, 0, sizeof *fmt);
+	fmt->width = t->hact.end - t->hact.beg;
+	fmt->height = t->vact[0].end - t->vact[0].beg;
+	fmt->code = V4L2_MBUS_FMT_FIXED; /* means RGB888 */
+	fmt->colorspace = V4L2_COLORSPACE_SRGB;
+	if (t->interlaced) {
+		fmt->field = V4L2_FIELD_INTERLACED;
+		fmt->height *= 2;
+	} else {
+		fmt->field = V4L2_FIELD_NONE;
+	}
+	return 0;
+}
+
+static int hdmi_enum_dv_presets(struct v4l2_subdev *sd,
+	struct v4l2_dv_enum_preset *preset)
+{
+	if (preset->index >= ARRAY_SIZE(hdmi_timings))
+		return -EINVAL;
+	return v4l_fill_dv_preset_info(hdmi_timings[preset->index].preset,
+		preset);
+}
+
+static const struct v4l2_subdev_core_ops hdmi_sd_core_ops = {
+	.s_power = hdmi_s_power,
+};
+
+static const struct v4l2_subdev_video_ops hdmi_sd_video_ops = {
+	.s_dv_preset = hdmi_s_dv_preset,
+	.g_dv_preset = hdmi_g_dv_preset,
+	.enum_dv_presets = hdmi_enum_dv_presets,
+	.g_mbus_fmt = hdmi_g_mbus_fmt,
+	.s_stream = hdmi_s_stream,
+};
+
+static const struct v4l2_subdev_ops hdmi_sd_ops = {
+	.core = &hdmi_sd_core_ops,
+	.video = &hdmi_sd_video_ops,
+};
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+
+	dev_dbg(dev, "%s\n", __func__);
+	v4l2_subdev_call(hdev->mhl_sd, core, s_power, 0);
+	hdmi_resource_poweroff(&hdev->res);
+	/* flag that device context is lost */
+	hdev->cur_conf_dirty = 1;
+	return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+	int ret = 0;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	hdmi_resource_poweron(&hdev->res);
+
+	/* starting MHL */
+	ret = v4l2_subdev_call(hdev->mhl_sd, core, s_power, 1);
+	if (hdev->mhl_sd && ret)
+		goto fail;
+
+	dev_dbg(dev, "poweron succeed\n");
+
+	return 0;
+
+fail:
+	hdmi_resource_poweroff(&hdev->res);
+	dev_err(dev, "poweron failed\n");
+
+	return ret;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+	.runtime_suspend = hdmi_runtime_suspend,
+	.runtime_resume	 = hdmi_runtime_resume,
+};
+
+static void hdmi_resources_cleanup(struct hdmi_device *hdev)
+{
+	struct hdmi_resources *res = &hdev->res;
+
+	dev_dbg(hdev->dev, "HDMI resource cleanup\n");
+	/* put clocks, power */
+	if (res->regul_count)
+		regulator_bulk_free(res->regul_count, res->regul_bulk);
+	/* kfree is NULL-safe */
+	kfree(res->regul_bulk);
+	if (!IS_ERR_OR_NULL(res->hdmiphy))
+		clk_put(res->hdmiphy);
+	if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
+		clk_put(res->sclk_hdmiphy);
+	if (!IS_ERR_OR_NULL(res->sclk_pixel))
+		clk_put(res->sclk_pixel);
+	if (!IS_ERR_OR_NULL(res->sclk_hdmi))
+		clk_put(res->sclk_hdmi);
+	if (!IS_ERR_OR_NULL(res->hdmi))
+		clk_put(res->hdmi);
+	memset(res, 0, sizeof *res);
+}
+
+static int hdmi_resources_init(struct hdmi_device *hdev)
+{
+	struct device *dev = hdev->dev;
+	struct hdmi_resources *res = &hdev->res;
+	static char *supply[] = {
+		"hdmi-en",
+		"vdd",
+		"vdd_osc",
+		"vdd_pll",
+	};
+	int i, ret;
+
+	dev_dbg(dev, "HDMI resource init\n");
+
+	memset(res, 0, sizeof *res);
+	/* get clocks, power */
+
+	res->hdmi = clk_get(dev, "hdmi");
+	if (IS_ERR_OR_NULL(res->hdmi)) {
+		dev_err(dev, "failed to get clock 'hdmi'\n");
+		goto fail;
+	}
+	res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+	if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+		dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
+		goto fail;
+	}
+	res->sclk_pixel = clk_get(dev, "sclk_pixel");
+	if (IS_ERR_OR_NULL(res->sclk_pixel)) {
+		dev_err(dev, "failed to get clock 'sclk_pixel'\n");
+		goto fail;
+	}
+	res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+	if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
+		dev_err(dev, "failed to get clock 'sclk_hdmiphy'\n");
+		goto fail;
+	}
+	res->hdmiphy = clk_get(dev, "hdmiphy");
+	if (IS_ERR_OR_NULL(res->hdmiphy)) {
+		dev_err(dev, "failed to get clock 'hdmiphy'\n");
+		goto fail;
+	}
+	res->regul_bulk = kcalloc(ARRAY_SIZE(supply),
+				  sizeof(res->regul_bulk[0]), GFP_KERNEL);
+	if (!res->regul_bulk) {
+		dev_err(dev, "failed to get memory for regulators\n");
+		goto fail;
+	}
+	for (i = 0; i < ARRAY_SIZE(supply); ++i) {
+		res->regul_bulk[i].supply = supply[i];
+		res->regul_bulk[i].consumer = NULL;
+	}
+
+	ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+	if (ret) {
+		dev_err(dev, "failed to get regulators\n");
+		goto fail;
+	}
+	res->regul_count = ARRAY_SIZE(supply);
+
+	return 0;
+fail:
+	dev_err(dev, "HDMI resource init - failed\n");
+	hdmi_resources_cleanup(hdev);
+	return -ENODEV;
+}
+
+static int __devinit hdmi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct i2c_adapter *adapter;
+	struct v4l2_subdev *sd;
+	struct hdmi_device *hdmi_dev = NULL;
+	struct s5p_hdmi_platform_data *pdata = dev->platform_data;
+	int ret;
+
+	dev_dbg(dev, "probe start\n");
+
+	if (!pdata) {
+		dev_err(dev, "platform data is missing\n");
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	hdmi_dev = devm_kzalloc(&pdev->dev, sizeof(*hdmi_dev), GFP_KERNEL);
+	if (!hdmi_dev) {
+		dev_err(dev, "out of memory\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	hdmi_dev->dev = dev;
+
+	ret = hdmi_resources_init(hdmi_dev);
+	if (ret)
+		goto fail;
+
+	/* mapping HDMI registers */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(dev, "get memory resource failed.\n");
+		ret = -ENXIO;
+		goto fail_init;
+	}
+
+	hdmi_dev->regs = devm_ioremap(&pdev->dev, res->start,
+				      resource_size(res));
+	if (hdmi_dev->regs == NULL) {
+		dev_err(dev, "register mapping failed.\n");
+		ret = -ENXIO;
+		goto fail_init;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (res == NULL) {
+		dev_err(dev, "get interrupt resource failed.\n");
+		ret = -ENXIO;
+		goto fail_init;
+	}
+
+	ret = devm_request_irq(&pdev->dev, res->start, hdmi_irq_handler, 0,
+			       "hdmi", hdmi_dev);
+	if (ret) {
+		dev_err(dev, "request interrupt failed.\n");
+		goto fail_init;
+	}
+	hdmi_dev->irq = res->start;
+
+	/* setting v4l2 name to prevent WARN_ON in v4l2_device_register */
+	strlcpy(hdmi_dev->v4l2_dev.name, dev_name(dev),
+		sizeof(hdmi_dev->v4l2_dev.name));
+	/* passing NULL owner prevents driver from erasing drvdata */
+	ret = v4l2_device_register(NULL, &hdmi_dev->v4l2_dev);
+	if (ret) {
+		dev_err(dev, "could not register v4l2 device.\n");
+		goto fail_init;
+	}
+
+	/* testing if hdmiphy info is present */
+	if (!pdata->hdmiphy_info) {
+		dev_err(dev, "hdmiphy info is missing in platform data\n");
+		ret = -ENXIO;
+		goto fail_vdev;
+	}
+
+	adapter = i2c_get_adapter(pdata->hdmiphy_bus);
+	if (adapter == NULL) {
+		dev_err(dev, "hdmiphy adapter request failed\n");
+		ret = -ENXIO;
+		goto fail_vdev;
+	}
+
+	hdmi_dev->phy_sd = v4l2_i2c_new_subdev_board(&hdmi_dev->v4l2_dev,
+		adapter, pdata->hdmiphy_info, NULL);
+	/* on failure or not adapter is no longer useful */
+	i2c_put_adapter(adapter);
+	if (hdmi_dev->phy_sd == NULL) {
+		dev_err(dev, "missing subdev for hdmiphy\n");
+		ret = -ENODEV;
+		goto fail_vdev;
+	}
+
+	/* initialization of MHL interface if present */
+	if (pdata->mhl_info) {
+		adapter = i2c_get_adapter(pdata->mhl_bus);
+		if (adapter == NULL) {
+			dev_err(dev, "MHL adapter request failed\n");
+			ret = -ENXIO;
+			goto fail_vdev;
+		}
+
+		hdmi_dev->mhl_sd = v4l2_i2c_new_subdev_board(
+			&hdmi_dev->v4l2_dev, adapter,
+			pdata->mhl_info, NULL);
+		/* on failure or not adapter is no longer useful */
+		i2c_put_adapter(adapter);
+		if (hdmi_dev->mhl_sd == NULL) {
+			dev_err(dev, "missing subdev for MHL\n");
+			ret = -ENODEV;
+			goto fail_vdev;
+		}
+	}
+
+	clk_enable(hdmi_dev->res.hdmi);
+
+	pm_runtime_enable(dev);
+
+	sd = &hdmi_dev->sd;
+	v4l2_subdev_init(sd, &hdmi_sd_ops);
+	sd->owner = THIS_MODULE;
+
+	strlcpy(sd->name, "s5p-hdmi", sizeof sd->name);
+	hdmi_dev->cur_preset = HDMI_DEFAULT_PRESET;
+	/* FIXME: missing fail preset is not supported */
+	hdmi_dev->cur_conf = hdmi_preset2timings(hdmi_dev->cur_preset);
+	hdmi_dev->cur_conf_dirty = 1;
+
+	/* storing subdev for call that have only access to struct device */
+	dev_set_drvdata(dev, sd);
+
+	dev_info(dev, "probe successful\n");
+
+	return 0;
+
+fail_vdev:
+	v4l2_device_unregister(&hdmi_dev->v4l2_dev);
+
+fail_init:
+	hdmi_resources_cleanup(hdmi_dev);
+
+fail:
+	dev_err(dev, "probe failed\n");
+	return ret;
+}
+
+static int __devexit hdmi_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct hdmi_device *hdmi_dev = sd_to_hdmi_dev(sd);
+
+	pm_runtime_disable(dev);
+	clk_disable(hdmi_dev->res.hdmi);
+	v4l2_device_unregister(&hdmi_dev->v4l2_dev);
+	disable_irq(hdmi_dev->irq);
+	hdmi_resources_cleanup(hdmi_dev);
+	dev_info(dev, "remove successful\n");
+
+	return 0;
+}
+
+static struct platform_driver hdmi_driver __refdata = {
+	.probe = hdmi_probe,
+	.remove = __devexit_p(hdmi_remove),
+	.id_table = hdmi_driver_types,
+	.driver = {
+		.name = "s5p-hdmi",
+		.owner = THIS_MODULE,
+		.pm = &hdmi_pm_ops,
+	}
+};
+
+module_platform_driver(hdmi_driver);
diff --git a/drivers/media/platform/s5p-tv/hdmiphy_drv.c b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
new file mode 100644
index 0000000..f67b386
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
@@ -0,0 +1,329 @@
+/*
+ * Samsung HDMI Physical interface driver
+ *
+ * Copyright (C) 2010-2011 Samsung Electronics Co.Ltd
+ * Author: Tomasz Stanislawski <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+
+#include <media/v4l2-subdev.h>
+
+MODULE_AUTHOR("Tomasz Stanislawski <t.stanislaws@samsung.com>");
+MODULE_DESCRIPTION("Samsung HDMI Physical interface driver");
+MODULE_LICENSE("GPL");
+
+struct hdmiphy_conf {
+	unsigned long pixclk;
+	const u8 *data;
+};
+
+struct hdmiphy_ctx {
+	struct v4l2_subdev sd;
+	const struct hdmiphy_conf *conf_tab;
+};
+
+static const struct hdmiphy_conf hdmiphy_conf_s5pv210[] = {
+	{ .pixclk = 27000000, .data = (u8 [32]) {
+		0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+		0x6B, 0x10, 0x02, 0x52, 0xDF, 0xF2, 0x54, 0x87,
+		0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+		0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }
+	},
+	{ .pixclk = 27027000, .data = (u8 [32]) {
+		0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+		0x6B, 0x10, 0x02, 0x52, 0xDF, 0xF2, 0x54, 0x87,
+		0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+		0x22, 0x40, 0xE2, 0x26, 0x00, 0x00, 0x00, 0x00, }
+	},
+	{ .pixclk = 74176000, .data = (u8 [32]) {
+		0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
+		0x6D, 0x10, 0x01, 0x52, 0xEF, 0xF3, 0x54, 0xB9,
+		0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+		0x22, 0x40, 0xA5, 0x26, 0x01, 0x00, 0x00, 0x00, }
+	},
+	{ .pixclk = 74250000, .data = (u8 [32]) {
+		0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
+		0x6A, 0x10, 0x01, 0x52, 0xFF, 0xF1, 0x54, 0xBA,
+		0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+		0x22, 0x40, 0xA4, 0x26, 0x01, 0x00, 0x00, 0x00, }
+	},
+	{ /* end marker */ }
+};
+
+static const struct hdmiphy_conf hdmiphy_conf_exynos4210[] = {
+	{ .pixclk = 27000000, .data = (u8 [32]) {
+		0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+		0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+		0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+		0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }
+	},
+	{ .pixclk = 27027000, .data = (u8 [32]) {
+		0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+		0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+		0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+		0x22, 0x40, 0xE2, 0x26, 0x00, 0x00, 0x00, 0x00, }
+	},
+	{ .pixclk = 74176000, .data = (u8 [32]) {
+		0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
+		0x6D, 0x10, 0x01, 0x51, 0xEF, 0xF3, 0x54, 0xB9,
+		0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+		0x22, 0x40, 0xA5, 0x26, 0x01, 0x00, 0x00, 0x00, }
+	},
+	{ .pixclk = 74250000, .data = (u8 [32]) {
+		0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
+		0x6A, 0x10, 0x01, 0x51, 0xFF, 0xF1, 0x54, 0xBA,
+		0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+		0x22, 0x40, 0xA4, 0x26, 0x01, 0x00, 0x00, 0x00, }
+	},
+	{ .pixclk = 148352000, .data = (u8 [32]) {
+		0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
+		0x6D, 0x18, 0x00, 0x51, 0xEF, 0xF3, 0x54, 0xB9,
+		0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+		0x11, 0x40, 0xA5, 0x26, 0x02, 0x00, 0x00, 0x00, }
+	},
+	{ .pixclk = 148500000, .data = (u8 [32]) {
+		0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
+		0x6A, 0x18, 0x00, 0x51, 0xFF, 0xF1, 0x54, 0xBA,
+		0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+		0x11, 0x40, 0xA4, 0x26, 0x02, 0x00, 0x00, 0x00, }
+	},
+	{ /* end marker */ }
+};
+
+static const struct hdmiphy_conf hdmiphy_conf_exynos4212[] = {
+	{ .pixclk = 27000000, .data = (u8 [32]) {
+		0x01, 0x11, 0x2D, 0x75, 0x00, 0x01, 0x00, 0x08,
+		0x82, 0x00, 0x0E, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+		0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x71,
+		0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
+	},
+	{ .pixclk = 27027000, .data = (u8 [32]) {
+		0x01, 0x91, 0x2D, 0x72, 0x00, 0x64, 0x12, 0x08,
+		0x43, 0x20, 0x0E, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+		0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x71,
+		0x54, 0xE2, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
+	},
+	{ .pixclk = 74176000, .data = (u8 [32]) {
+		0x01, 0x91, 0x3E, 0x35, 0x00, 0x5B, 0xDE, 0x08,
+		0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+		0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x52,
+		0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
+	},
+	{ .pixclk = 74250000, .data = (u8 [32]) {
+		0x01, 0x91, 0x3E, 0x35, 0x00, 0x40, 0xF0, 0x08,
+		0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+		0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x52,
+		0x54, 0xA4, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
+	},
+	{ .pixclk = 148500000, .data = (u8 [32]) {
+		0x01, 0x91, 0x3E, 0x15, 0x00, 0x40, 0xF0, 0x08,
+		0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+		0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0xA4,
+		0x54, 0x4A, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, }
+	},
+	{ /* end marker */ }
+};
+
+static const struct hdmiphy_conf hdmiphy_conf_exynos4412[] = {
+	{ .pixclk = 27000000, .data = (u8 [32]) {
+		0x01, 0x11, 0x2D, 0x75, 0x40, 0x01, 0x00, 0x08,
+		0x82, 0x00, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+		0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+		0x54, 0xE4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
+	},
+	{ .pixclk = 27027000, .data = (u8 [32]) {
+		0x01, 0x91, 0x2D, 0x72, 0x40, 0x64, 0x12, 0x08,
+		0x43, 0x20, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+		0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+		0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
+	},
+	{ .pixclk = 74176000, .data = (u8 [32]) {
+		0x01, 0x91, 0x1F, 0x10, 0x40, 0x5B, 0xEF, 0x08,
+		0x81, 0x20, 0xB9, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+		0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+		0x54, 0xA6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
+	},
+	{ .pixclk = 74250000, .data = (u8 [32]) {
+		0x01, 0x91, 0x1F, 0x10, 0x40, 0x40, 0xF8, 0x08,
+		0x81, 0x20, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+		0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+		0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
+	},
+	{ .pixclk = 148500000, .data = (u8 [32]) {
+		0x01, 0x91, 0x1F, 0x00, 0x40, 0x40, 0xF8, 0x08,
+		0x81, 0x20, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+		0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+		0x54, 0x4B, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, }
+	},
+	{ /* end marker */ }
+};
+
+static inline struct hdmiphy_ctx *sd_to_ctx(struct v4l2_subdev *sd)
+{
+	return container_of(sd, struct hdmiphy_ctx, sd);
+}
+
+static unsigned long hdmiphy_preset_to_pixclk(u32 preset)
+{
+	static const unsigned long pixclk[] = {
+		[V4L2_DV_480P59_94] =  27000000,
+		[V4L2_DV_576P50]    =  27000000,
+		[V4L2_DV_720P59_94] =  74176000,
+		[V4L2_DV_720P50]    =  74250000,
+		[V4L2_DV_720P60]    =  74250000,
+		[V4L2_DV_1080P24]   =  74250000,
+		[V4L2_DV_1080P30]   =  74250000,
+		[V4L2_DV_1080I50]   =  74250000,
+		[V4L2_DV_1080I60]   =  74250000,
+		[V4L2_DV_1080P50]   = 148500000,
+		[V4L2_DV_1080P60]   = 148500000,
+	};
+	if (preset < ARRAY_SIZE(pixclk))
+		return pixclk[preset];
+	else
+		return 0;
+}
+
+static const u8 *hdmiphy_find_conf(u32 preset, const struct hdmiphy_conf *conf)
+{
+	unsigned long pixclk;
+
+	pixclk = hdmiphy_preset_to_pixclk(preset);
+	if (!pixclk)
+		return NULL;
+
+	for (; conf->pixclk; ++conf)
+		if (conf->pixclk == pixclk)
+			return conf->data;
+	return NULL;
+}
+
+static int hdmiphy_s_power(struct v4l2_subdev *sd, int on)
+{
+	/* to be implemented */
+	return 0;
+}
+
+static int hdmiphy_s_dv_preset(struct v4l2_subdev *sd,
+	struct v4l2_dv_preset *preset)
+{
+	const u8 *data;
+	u8 buffer[32];
+	int ret;
+	struct hdmiphy_ctx *ctx = sd_to_ctx(sd);
+	struct i2c_client *client = v4l2_get_subdevdata(sd);
+	struct device *dev = &client->dev;
+
+	dev_info(dev, "s_dv_preset(preset = %d)\n", preset->preset);
+	data = hdmiphy_find_conf(preset->preset, ctx->conf_tab);
+	if (!data) {
+		dev_err(dev, "format not supported\n");
+		return -EINVAL;
+	}
+
+	/* storing configuration to the device */
+	memcpy(buffer, data, 32);
+	ret = i2c_master_send(client, buffer, 32);
+	if (ret != 32) {
+		dev_err(dev, "failed to configure HDMIPHY via I2C\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int hdmiphy_s_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct i2c_client *client = v4l2_get_subdevdata(sd);
+	struct device *dev = &client->dev;
+	u8 buffer[2];
+	int ret;
+
+	dev_info(dev, "s_stream(%d)\n", enable);
+	/* going to/from configuration from/to operation mode */
+	buffer[0] = 0x1f;
+	buffer[1] = enable ? 0x80 : 0x00;
+
+	ret = i2c_master_send(client, buffer, 2);
+	if (ret != 2) {
+		dev_err(dev, "stream (%d) failed\n", enable);
+		return -EIO;
+	}
+	return 0;
+}
+
+static const struct v4l2_subdev_core_ops hdmiphy_core_ops = {
+	.s_power =  hdmiphy_s_power,
+};
+
+static const struct v4l2_subdev_video_ops hdmiphy_video_ops = {
+	.s_dv_preset = hdmiphy_s_dv_preset,
+	.s_stream =  hdmiphy_s_stream,
+};
+
+static const struct v4l2_subdev_ops hdmiphy_ops = {
+	.core = &hdmiphy_core_ops,
+	.video = &hdmiphy_video_ops,
+};
+
+static int __devinit hdmiphy_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	struct hdmiphy_ctx *ctx;
+
+	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->conf_tab = (struct hdmiphy_conf *)id->driver_data;
+	v4l2_i2c_subdev_init(&ctx->sd, client, &hdmiphy_ops);
+
+	dev_info(&client->dev, "probe successful\n");
+	return 0;
+}
+
+static int __devexit hdmiphy_remove(struct i2c_client *client)
+{
+	struct v4l2_subdev *sd = i2c_get_clientdata(client);
+	struct hdmiphy_ctx *ctx = sd_to_ctx(sd);
+
+	kfree(ctx);
+	dev_info(&client->dev, "remove successful\n");
+
+	return 0;
+}
+
+static const struct i2c_device_id hdmiphy_id[] = {
+	{ "hdmiphy", (unsigned long)hdmiphy_conf_exynos4210 },
+	{ "hdmiphy-s5pv210", (unsigned long)hdmiphy_conf_s5pv210 },
+	{ "hdmiphy-exynos4210", (unsigned long)hdmiphy_conf_exynos4210 },
+	{ "hdmiphy-exynos4212", (unsigned long)hdmiphy_conf_exynos4212 },
+	{ "hdmiphy-exynos4412", (unsigned long)hdmiphy_conf_exynos4412 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, hdmiphy_id);
+
+static struct i2c_driver hdmiphy_driver = {
+	.driver = {
+		.name	= "s5p-hdmiphy",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= hdmiphy_probe,
+	.remove		= __devexit_p(hdmiphy_remove),
+	.id_table = hdmiphy_id,
+};
+
+module_i2c_driver(hdmiphy_driver);
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
new file mode 100644
index 0000000..ddb422e
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/mixer.h
@@ -0,0 +1,365 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#ifndef SAMSUNG_MIXER_H
+#define SAMSUNG_MIXER_H
+
+#ifdef CONFIG_VIDEO_SAMSUNG_S5P_MIXER_DEBUG
+	#define DEBUG
+#endif
+
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-core.h>
+
+#include "regs-mixer.h"
+
+/** maximum number of output interfaces */
+#define MXR_MAX_OUTPUTS 2
+/** maximum number of input interfaces (layers) */
+#define MXR_MAX_LAYERS 3
+#define MXR_DRIVER_NAME "s5p-mixer"
+/** maximal number of planes for every layer */
+#define MXR_MAX_PLANES	2
+
+#define MXR_ENABLE 1
+#define MXR_DISABLE 0
+
+/** description of a macroblock for packed formats */
+struct mxr_block {
+	/** vertical number of pixels in macroblock */
+	unsigned int width;
+	/** horizontal number of pixels in macroblock */
+	unsigned int height;
+	/** size of block in bytes */
+	unsigned int size;
+};
+
+/** description of supported format */
+struct mxr_format {
+	/** format name/mnemonic */
+	const char *name;
+	/** fourcc identifier */
+	u32 fourcc;
+	/** colorspace identifier */
+	enum v4l2_colorspace colorspace;
+	/** number of planes in image data */
+	int num_planes;
+	/** description of block for each plane */
+	struct mxr_block plane[MXR_MAX_PLANES];
+	/** number of subframes in image data */
+	int num_subframes;
+	/** specifies to which subframe belong given plane */
+	int plane2subframe[MXR_MAX_PLANES];
+	/** internal code, driver dependant */
+	unsigned long cookie;
+};
+
+/** description of crop configuration for image */
+struct mxr_crop {
+	/** width of layer in pixels */
+	unsigned int full_width;
+	/** height of layer in pixels */
+	unsigned int full_height;
+	/** horizontal offset of first pixel to be displayed */
+	unsigned int x_offset;
+	/** vertical offset of first pixel to be displayed */
+	unsigned int y_offset;
+	/** width of displayed data in pixels */
+	unsigned int width;
+	/** height of displayed data in pixels */
+	unsigned int height;
+	/** indicate which fields are present in buffer */
+	unsigned int field;
+};
+
+/** stages of geometry operations */
+enum mxr_geometry_stage {
+	MXR_GEOMETRY_SINK,
+	MXR_GEOMETRY_COMPOSE,
+	MXR_GEOMETRY_CROP,
+	MXR_GEOMETRY_SOURCE,
+};
+
+/* flag indicating that offset should be 0 */
+#define MXR_NO_OFFSET	0x80000000
+
+/** description of transformation from source to destination image */
+struct mxr_geometry {
+	/** cropping for source image */
+	struct mxr_crop src;
+	/** cropping for destination image */
+	struct mxr_crop dst;
+	/** layer-dependant description of horizontal scaling */
+	unsigned int x_ratio;
+	/** layer-dependant description of vertical scaling */
+	unsigned int y_ratio;
+};
+
+/** instance of a buffer */
+struct mxr_buffer {
+	/** common v4l buffer stuff -- must be first */
+	struct vb2_buffer	vb;
+	/** node for layer's lists */
+	struct list_head	list;
+};
+
+
+/** internal states of layer */
+enum mxr_layer_state {
+	/** layers is not shown */
+	MXR_LAYER_IDLE = 0,
+	/** layer is shown */
+	MXR_LAYER_STREAMING,
+	/** state before STREAMOFF is finished */
+	MXR_LAYER_STREAMING_FINISH,
+};
+
+/** forward declarations */
+struct mxr_device;
+struct mxr_layer;
+
+/** callback for layers operation */
+struct mxr_layer_ops {
+	/* TODO: try to port it to subdev API */
+	/** handler for resource release function */
+	void (*release)(struct mxr_layer *);
+	/** setting buffer to HW */
+	void (*buffer_set)(struct mxr_layer *, struct mxr_buffer *);
+	/** setting format and geometry in HW */
+	void (*format_set)(struct mxr_layer *);
+	/** streaming stop/start */
+	void (*stream_set)(struct mxr_layer *, int);
+	/** adjusting geometry */
+	void (*fix_geometry)(struct mxr_layer *,
+		enum mxr_geometry_stage, unsigned long);
+};
+
+/** layer instance, a single window and content displayed on output */
+struct mxr_layer {
+	/** parent mixer device */
+	struct mxr_device *mdev;
+	/** layer index (unique identifier) */
+	int idx;
+	/** callbacks for layer methods */
+	struct mxr_layer_ops ops;
+	/** format array */
+	const struct mxr_format **fmt_array;
+	/** size of format array */
+	unsigned long fmt_array_size;
+
+	/** lock for protection of list and state fields */
+	spinlock_t enq_slock;
+	/** list for enqueued buffers */
+	struct list_head enq_list;
+	/** buffer currently owned by hardware in temporary registers */
+	struct mxr_buffer *update_buf;
+	/** buffer currently owned by hardware in shadow registers */
+	struct mxr_buffer *shadow_buf;
+	/** state of layer IDLE/STREAMING */
+	enum mxr_layer_state state;
+
+	/** mutex for protection of fields below */
+	struct mutex mutex;
+	/** handler for video node */
+	struct video_device vfd;
+	/** queue for output buffers */
+	struct vb2_queue vb_queue;
+	/** current image format */
+	const struct mxr_format *fmt;
+	/** current geometry of image */
+	struct mxr_geometry geo;
+};
+
+/** description of mixers output interface */
+struct mxr_output {
+	/** name of output */
+	char name[32];
+	/** output subdev */
+	struct v4l2_subdev *sd;
+	/** cookie used for configuration of registers */
+	int cookie;
+};
+
+/** specify source of output subdevs */
+struct mxr_output_conf {
+	/** name of output (connector) */
+	char *output_name;
+	/** name of module that generates output subdev */
+	char *module_name;
+	/** cookie need for mixer HW */
+	int cookie;
+};
+
+struct clk;
+struct regulator;
+
+/** auxiliary resources used my mixer */
+struct mxr_resources {
+	/** interrupt index */
+	int irq;
+	/** pointer to Mixer registers */
+	void __iomem *mxr_regs;
+	/** pointer to Video Processor registers */
+	void __iomem *vp_regs;
+	/** other resources, should used under mxr_device.mutex */
+	struct clk *mixer;
+	struct clk *vp;
+	struct clk *sclk_mixer;
+	struct clk *sclk_hdmi;
+	struct clk *sclk_dac;
+};
+
+/* event flags used  */
+enum mxr_devide_flags {
+	MXR_EVENT_VSYNC = 0,
+	MXR_EVENT_TOP = 1,
+};
+
+/** drivers instance */
+struct mxr_device {
+	/** master device */
+	struct device *dev;
+	/** state of each layer */
+	struct mxr_layer *layer[MXR_MAX_LAYERS];
+	/** state of each output */
+	struct mxr_output *output[MXR_MAX_OUTPUTS];
+	/** number of registered outputs */
+	int output_cnt;
+
+	/* video resources */
+
+	/** V4L2 device */
+	struct v4l2_device v4l2_dev;
+	/** context of allocator */
+	void *alloc_ctx;
+	/** event wait queue */
+	wait_queue_head_t event_queue;
+	/** state flags */
+	unsigned long event_flags;
+
+	/** spinlock for protection of registers */
+	spinlock_t reg_slock;
+
+	/** mutex for protection of fields below */
+	struct mutex mutex;
+	/** number of entities depndant on output configuration */
+	int n_output;
+	/** number of users that do streaming */
+	int n_streamer;
+	/** index of current output */
+	int current_output;
+	/** auxiliary resources used my mixer */
+	struct mxr_resources res;
+};
+
+/** transform device structure into mixer device */
+static inline struct mxr_device *to_mdev(struct device *dev)
+{
+	struct v4l2_device *vdev = dev_get_drvdata(dev);
+	return container_of(vdev, struct mxr_device, v4l2_dev);
+}
+
+/** get current output data, should be called under mdev's mutex */
+static inline struct mxr_output *to_output(struct mxr_device *mdev)
+{
+	return mdev->output[mdev->current_output];
+}
+
+/** get current output subdev, should be called under mdev's mutex */
+static inline struct v4l2_subdev *to_outsd(struct mxr_device *mdev)
+{
+	struct mxr_output *out = to_output(mdev);
+	return out ? out->sd : NULL;
+}
+
+/** forward declaration for mixer platform data */
+struct mxr_platform_data;
+
+/** acquiring common video resources */
+int __devinit mxr_acquire_video(struct mxr_device *mdev,
+	struct mxr_output_conf *output_cont, int output_count);
+
+/** releasing common video resources */
+void mxr_release_video(struct mxr_device *mdev);
+
+struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx);
+struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx);
+struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
+	int idx, char *name, struct mxr_layer_ops *ops);
+
+void mxr_base_layer_release(struct mxr_layer *layer);
+void mxr_layer_release(struct mxr_layer *layer);
+
+int mxr_base_layer_register(struct mxr_layer *layer);
+void mxr_base_layer_unregister(struct mxr_layer *layer);
+
+unsigned long mxr_get_plane_size(const struct mxr_block *blk,
+	unsigned int width, unsigned int height);
+
+/** adds new consumer for mixer's power */
+int __must_check mxr_power_get(struct mxr_device *mdev);
+/** removes consumer for mixer's power */
+void mxr_power_put(struct mxr_device *mdev);
+/** add new client for output configuration */
+void mxr_output_get(struct mxr_device *mdev);
+/** removes new client for output configuration */
+void mxr_output_put(struct mxr_device *mdev);
+/** add new client for streaming */
+void mxr_streamer_get(struct mxr_device *mdev);
+/** removes new client for streaming */
+void mxr_streamer_put(struct mxr_device *mdev);
+/** returns format of data delivared to current output */
+void mxr_get_mbus_fmt(struct mxr_device *mdev,
+	struct v4l2_mbus_framefmt *mbus_fmt);
+
+/* Debug */
+
+#define mxr_err(mdev, fmt, ...)  dev_err(mdev->dev, fmt, ##__VA_ARGS__)
+#define mxr_warn(mdev, fmt, ...) dev_warn(mdev->dev, fmt, ##__VA_ARGS__)
+#define mxr_info(mdev, fmt, ...) dev_info(mdev->dev, fmt, ##__VA_ARGS__)
+
+#ifdef CONFIG_VIDEO_SAMSUNG_S5P_MIXER_DEBUG
+	#define mxr_dbg(mdev, fmt, ...)  dev_dbg(mdev->dev, fmt, ##__VA_ARGS__)
+#else
+	#define mxr_dbg(mdev, fmt, ...)  do { (void) mdev; } while (0)
+#endif
+
+/* accessing Mixer's and Video Processor's registers */
+
+void mxr_vsync_set_update(struct mxr_device *mdev, int en);
+void mxr_reg_reset(struct mxr_device *mdev);
+irqreturn_t mxr_irq_handler(int irq, void *dev_data);
+void mxr_reg_s_output(struct mxr_device *mdev, int cookie);
+void mxr_reg_streamon(struct mxr_device *mdev);
+void mxr_reg_streamoff(struct mxr_device *mdev);
+int mxr_reg_wait4vsync(struct mxr_device *mdev);
+void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
+	struct v4l2_mbus_framefmt *fmt);
+void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en);
+void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr);
+void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
+	const struct mxr_format *fmt, const struct mxr_geometry *geo);
+
+void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en);
+void mxr_reg_vp_buffer(struct mxr_device *mdev,
+	dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2]);
+void mxr_reg_vp_format(struct mxr_device *mdev,
+	const struct mxr_format *fmt, const struct mxr_geometry *geo);
+void mxr_reg_dump(struct mxr_device *mdev);
+
+#endif /* SAMSUNG_MIXER_H */
+
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
new file mode 100644
index 0000000..edca065
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/mixer_drv.c
@@ -0,0 +1,487 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+
+MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
+MODULE_DESCRIPTION("Samsung MIXER");
+MODULE_LICENSE("GPL");
+
+/* --------- DRIVER PARAMETERS ---------- */
+
+static struct mxr_output_conf mxr_output_conf[] = {
+	{
+		.output_name = "S5P HDMI connector",
+		.module_name = "s5p-hdmi",
+		.cookie = 1,
+	},
+	{
+		.output_name = "S5P SDO connector",
+		.module_name = "s5p-sdo",
+		.cookie = 0,
+	},
+};
+
+void mxr_get_mbus_fmt(struct mxr_device *mdev,
+	struct v4l2_mbus_framefmt *mbus_fmt)
+{
+	struct v4l2_subdev *sd;
+	int ret;
+
+	mutex_lock(&mdev->mutex);
+	sd = to_outsd(mdev);
+	ret = v4l2_subdev_call(sd, video, g_mbus_fmt, mbus_fmt);
+	WARN(ret, "failed to get mbus_fmt for output %s\n", sd->name);
+	mutex_unlock(&mdev->mutex);
+}
+
+void mxr_streamer_get(struct mxr_device *mdev)
+{
+	mutex_lock(&mdev->mutex);
+	++mdev->n_streamer;
+	mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer);
+	if (mdev->n_streamer == 1) {
+		struct v4l2_subdev *sd = to_outsd(mdev);
+		struct v4l2_mbus_framefmt mbus_fmt;
+		struct mxr_resources *res = &mdev->res;
+		int ret;
+
+		if (to_output(mdev)->cookie == 0)
+			clk_set_parent(res->sclk_mixer, res->sclk_dac);
+		else
+			clk_set_parent(res->sclk_mixer, res->sclk_hdmi);
+		mxr_reg_s_output(mdev, to_output(mdev)->cookie);
+
+		ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mbus_fmt);
+		WARN(ret, "failed to get mbus_fmt for output %s\n", sd->name);
+		ret = v4l2_subdev_call(sd, video, s_stream, 1);
+		WARN(ret, "starting stream failed for output %s\n", sd->name);
+
+		mxr_reg_set_mbus_fmt(mdev, &mbus_fmt);
+		mxr_reg_streamon(mdev);
+		ret = mxr_reg_wait4vsync(mdev);
+		WARN(ret, "failed to get vsync (%d) from output\n", ret);
+	}
+	mutex_unlock(&mdev->mutex);
+	mxr_reg_dump(mdev);
+	/* FIXME: what to do when streaming fails? */
+}
+
+void mxr_streamer_put(struct mxr_device *mdev)
+{
+	mutex_lock(&mdev->mutex);
+	--mdev->n_streamer;
+	mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer);
+	if (mdev->n_streamer == 0) {
+		int ret;
+		struct v4l2_subdev *sd = to_outsd(mdev);
+
+		mxr_reg_streamoff(mdev);
+		/* vsync applies Mixer setup */
+		ret = mxr_reg_wait4vsync(mdev);
+		WARN(ret, "failed to get vsync (%d) from output\n", ret);
+		ret = v4l2_subdev_call(sd, video, s_stream, 0);
+		WARN(ret, "stopping stream failed for output %s\n", sd->name);
+	}
+	WARN(mdev->n_streamer < 0, "negative number of streamers (%d)\n",
+		mdev->n_streamer);
+	mutex_unlock(&mdev->mutex);
+	mxr_reg_dump(mdev);
+}
+
+void mxr_output_get(struct mxr_device *mdev)
+{
+	mutex_lock(&mdev->mutex);
+	++mdev->n_output;
+	mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_output);
+	/* turn on auxiliary driver */
+	if (mdev->n_output == 1)
+		v4l2_subdev_call(to_outsd(mdev), core, s_power, 1);
+	mutex_unlock(&mdev->mutex);
+}
+
+void mxr_output_put(struct mxr_device *mdev)
+{
+	mutex_lock(&mdev->mutex);
+	--mdev->n_output;
+	mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_output);
+	/* turn on auxiliary driver */
+	if (mdev->n_output == 0)
+		v4l2_subdev_call(to_outsd(mdev), core, s_power, 0);
+	WARN(mdev->n_output < 0, "negative number of output users (%d)\n",
+		mdev->n_output);
+	mutex_unlock(&mdev->mutex);
+}
+
+int mxr_power_get(struct mxr_device *mdev)
+{
+	int ret = pm_runtime_get_sync(mdev->dev);
+
+	/* returning 1 means that power is already enabled,
+	 * so zero success be returned */
+	if (IS_ERR_VALUE(ret))
+		return ret;
+	return 0;
+}
+
+void mxr_power_put(struct mxr_device *mdev)
+{
+	pm_runtime_put_sync(mdev->dev);
+}
+
+/* --------- RESOURCE MANAGEMENT -------------*/
+
+static int __devinit mxr_acquire_plat_resources(struct mxr_device *mdev,
+	struct platform_device *pdev)
+{
+	struct resource *res;
+	int ret;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr");
+	if (res == NULL) {
+		mxr_err(mdev, "get memory resource failed.\n");
+		ret = -ENXIO;
+		goto fail;
+	}
+
+	mdev->res.mxr_regs = ioremap(res->start, resource_size(res));
+	if (mdev->res.mxr_regs == NULL) {
+		mxr_err(mdev, "register mapping failed.\n");
+		ret = -ENXIO;
+		goto fail;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp");
+	if (res == NULL) {
+		mxr_err(mdev, "get memory resource failed.\n");
+		ret = -ENXIO;
+		goto fail_mxr_regs;
+	}
+
+	mdev->res.vp_regs = ioremap(res->start, resource_size(res));
+	if (mdev->res.vp_regs == NULL) {
+		mxr_err(mdev, "register mapping failed.\n");
+		ret = -ENXIO;
+		goto fail_mxr_regs;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
+	if (res == NULL) {
+		mxr_err(mdev, "get interrupt resource failed.\n");
+		ret = -ENXIO;
+		goto fail_vp_regs;
+	}
+
+	ret = request_irq(res->start, mxr_irq_handler, 0, "s5p-mixer", mdev);
+	if (ret) {
+		mxr_err(mdev, "request interrupt failed.\n");
+		goto fail_vp_regs;
+	}
+	mdev->res.irq = res->start;
+
+	return 0;
+
+fail_vp_regs:
+	iounmap(mdev->res.vp_regs);
+
+fail_mxr_regs:
+	iounmap(mdev->res.mxr_regs);
+
+fail:
+	return ret;
+}
+
+static void mxr_release_plat_resources(struct mxr_device *mdev)
+{
+	free_irq(mdev->res.irq, mdev);
+	iounmap(mdev->res.vp_regs);
+	iounmap(mdev->res.mxr_regs);
+}
+
+static void mxr_release_clocks(struct mxr_device *mdev)
+{
+	struct mxr_resources *res = &mdev->res;
+
+	if (!IS_ERR_OR_NULL(res->sclk_dac))
+		clk_put(res->sclk_dac);
+	if (!IS_ERR_OR_NULL(res->sclk_hdmi))
+		clk_put(res->sclk_hdmi);
+	if (!IS_ERR_OR_NULL(res->sclk_mixer))
+		clk_put(res->sclk_mixer);
+	if (!IS_ERR_OR_NULL(res->vp))
+		clk_put(res->vp);
+	if (!IS_ERR_OR_NULL(res->mixer))
+		clk_put(res->mixer);
+}
+
+static int mxr_acquire_clocks(struct mxr_device *mdev)
+{
+	struct mxr_resources *res = &mdev->res;
+	struct device *dev = mdev->dev;
+
+	res->mixer = clk_get(dev, "mixer");
+	if (IS_ERR_OR_NULL(res->mixer)) {
+		mxr_err(mdev, "failed to get clock 'mixer'\n");
+		goto fail;
+	}
+	res->vp = clk_get(dev, "vp");
+	if (IS_ERR_OR_NULL(res->vp)) {
+		mxr_err(mdev, "failed to get clock 'vp'\n");
+		goto fail;
+	}
+	res->sclk_mixer = clk_get(dev, "sclk_mixer");
+	if (IS_ERR_OR_NULL(res->sclk_mixer)) {
+		mxr_err(mdev, "failed to get clock 'sclk_mixer'\n");
+		goto fail;
+	}
+	res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+	if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+		mxr_err(mdev, "failed to get clock 'sclk_hdmi'\n");
+		goto fail;
+	}
+	res->sclk_dac = clk_get(dev, "sclk_dac");
+	if (IS_ERR_OR_NULL(res->sclk_dac)) {
+		mxr_err(mdev, "failed to get clock 'sclk_dac'\n");
+		goto fail;
+	}
+
+	return 0;
+fail:
+	mxr_release_clocks(mdev);
+	return -ENODEV;
+}
+
+static int __devinit mxr_acquire_resources(struct mxr_device *mdev,
+	struct platform_device *pdev)
+{
+	int ret;
+	ret = mxr_acquire_plat_resources(mdev, pdev);
+
+	if (ret)
+		goto fail;
+
+	ret = mxr_acquire_clocks(mdev);
+	if (ret)
+		goto fail_plat;
+
+	mxr_info(mdev, "resources acquired\n");
+	return 0;
+
+fail_plat:
+	mxr_release_plat_resources(mdev);
+fail:
+	mxr_err(mdev, "resources acquire failed\n");
+	return ret;
+}
+
+static void mxr_release_resources(struct mxr_device *mdev)
+{
+	mxr_release_clocks(mdev);
+	mxr_release_plat_resources(mdev);
+	memset(&mdev->res, 0, sizeof mdev->res);
+}
+
+static void mxr_release_layers(struct mxr_device *mdev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mdev->layer); ++i)
+		if (mdev->layer[i])
+			mxr_layer_release(mdev->layer[i]);
+}
+
+static int __devinit mxr_acquire_layers(struct mxr_device *mdev,
+	struct mxr_platform_data *pdata)
+{
+	mdev->layer[0] = mxr_graph_layer_create(mdev, 0);
+	mdev->layer[1] = mxr_graph_layer_create(mdev, 1);
+	mdev->layer[2] = mxr_vp_layer_create(mdev, 0);
+
+	if (!mdev->layer[0] || !mdev->layer[1] || !mdev->layer[2]) {
+		mxr_err(mdev, "failed to acquire layers\n");
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	mxr_release_layers(mdev);
+	return -ENODEV;
+}
+
+/* ---------- POWER MANAGEMENT ----------- */
+
+static int mxr_runtime_resume(struct device *dev)
+{
+	struct mxr_device *mdev = to_mdev(dev);
+	struct mxr_resources *res = &mdev->res;
+
+	mxr_dbg(mdev, "resume - start\n");
+	mutex_lock(&mdev->mutex);
+	/* turn clocks on */
+	clk_enable(res->mixer);
+	clk_enable(res->vp);
+	clk_enable(res->sclk_mixer);
+	/* apply default configuration */
+	mxr_reg_reset(mdev);
+	mxr_dbg(mdev, "resume - finished\n");
+
+	mutex_unlock(&mdev->mutex);
+	return 0;
+}
+
+static int mxr_runtime_suspend(struct device *dev)
+{
+	struct mxr_device *mdev = to_mdev(dev);
+	struct mxr_resources *res = &mdev->res;
+	mxr_dbg(mdev, "suspend - start\n");
+	mutex_lock(&mdev->mutex);
+	/* turn clocks off */
+	clk_disable(res->sclk_mixer);
+	clk_disable(res->vp);
+	clk_disable(res->mixer);
+	mutex_unlock(&mdev->mutex);
+	mxr_dbg(mdev, "suspend - finished\n");
+	return 0;
+}
+
+static const struct dev_pm_ops mxr_pm_ops = {
+	.runtime_suspend = mxr_runtime_suspend,
+	.runtime_resume	 = mxr_runtime_resume,
+};
+
+/* --------- DRIVER INITIALIZATION ---------- */
+
+static int __devinit mxr_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mxr_platform_data *pdata = dev->platform_data;
+	struct mxr_device *mdev;
+	int ret;
+
+	/* mdev does not exist yet so no mxr_dbg is used */
+	dev_info(dev, "probe start\n");
+
+	mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
+	if (!mdev) {
+		mxr_err(mdev, "not enough memory.\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	/* setup pointer to master device */
+	mdev->dev = dev;
+
+	mutex_init(&mdev->mutex);
+	spin_lock_init(&mdev->reg_slock);
+	init_waitqueue_head(&mdev->event_queue);
+
+	/* acquire resources: regs, irqs, clocks, regulators */
+	ret = mxr_acquire_resources(mdev, pdev);
+	if (ret)
+		goto fail_mem;
+
+	/* configure resources for video output */
+	ret = mxr_acquire_video(mdev, mxr_output_conf,
+		ARRAY_SIZE(mxr_output_conf));
+	if (ret)
+		goto fail_resources;
+
+	/* configure layers */
+	ret = mxr_acquire_layers(mdev, pdata);
+	if (ret)
+		goto fail_video;
+
+	pm_runtime_enable(dev);
+
+	mxr_info(mdev, "probe successful\n");
+	return 0;
+
+fail_video:
+	mxr_release_video(mdev);
+
+fail_resources:
+	mxr_release_resources(mdev);
+
+fail_mem:
+	kfree(mdev);
+
+fail:
+	dev_info(dev, "probe failed\n");
+	return ret;
+}
+
+static int __devexit mxr_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mxr_device *mdev = to_mdev(dev);
+
+	pm_runtime_disable(dev);
+
+	mxr_release_layers(mdev);
+	mxr_release_video(mdev);
+	mxr_release_resources(mdev);
+
+	kfree(mdev);
+
+	dev_info(dev, "remove successful\n");
+	return 0;
+}
+
+static struct platform_driver mxr_driver __refdata = {
+	.probe = mxr_probe,
+	.remove = __devexit_p(mxr_remove),
+	.driver = {
+		.name = MXR_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.pm = &mxr_pm_ops,
+	}
+};
+
+static int __init mxr_init(void)
+{
+	int i, ret;
+	static const char banner[] __initconst = KERN_INFO
+		"Samsung TV Mixer driver, "
+		"(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
+	printk(banner);
+
+	/* Loading auxiliary modules */
+	for (i = 0; i < ARRAY_SIZE(mxr_output_conf); ++i)
+		request_module(mxr_output_conf[i].module_name);
+
+	ret = platform_driver_register(&mxr_driver);
+	if (ret != 0) {
+		printk(KERN_ERR "registration of MIXER driver failed\n");
+		return -ENXIO;
+	}
+
+	return 0;
+}
+module_init(mxr_init);
+
+static void __exit mxr_exit(void)
+{
+	platform_driver_unregister(&mxr_driver);
+}
+module_exit(mxr_exit);
diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
new file mode 100644
index 0000000..b93a21f
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
@@ -0,0 +1,270 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+
+#include <media/videobuf2-dma-contig.h>
+
+/* FORMAT DEFINITIONS */
+
+static const struct mxr_format mxr_fb_fmt_rgb565 = {
+	.name = "RGB565",
+	.fourcc = V4L2_PIX_FMT_RGB565,
+	.colorspace = V4L2_COLORSPACE_SRGB,
+	.num_planes = 1,
+	.plane = {
+		{ .width = 1, .height = 1, .size = 2 },
+	},
+	.num_subframes = 1,
+	.cookie = 4,
+};
+
+static const struct mxr_format mxr_fb_fmt_argb1555 = {
+	.name = "ARGB1555",
+	.num_planes = 1,
+	.fourcc = V4L2_PIX_FMT_RGB555,
+	.colorspace = V4L2_COLORSPACE_SRGB,
+	.plane = {
+		{ .width = 1, .height = 1, .size = 2 },
+	},
+	.num_subframes = 1,
+	.cookie = 5,
+};
+
+static const struct mxr_format mxr_fb_fmt_argb4444 = {
+	.name = "ARGB4444",
+	.num_planes = 1,
+	.fourcc = V4L2_PIX_FMT_RGB444,
+	.colorspace = V4L2_COLORSPACE_SRGB,
+	.plane = {
+		{ .width = 1, .height = 1, .size = 2 },
+	},
+	.num_subframes = 1,
+	.cookie = 6,
+};
+
+static const struct mxr_format mxr_fb_fmt_argb8888 = {
+	.name = "ARGB8888",
+	.fourcc = V4L2_PIX_FMT_BGR32,
+	.colorspace = V4L2_COLORSPACE_SRGB,
+	.num_planes = 1,
+	.plane = {
+		{ .width = 1, .height = 1, .size = 4 },
+	},
+	.num_subframes = 1,
+	.cookie = 7,
+};
+
+static const struct mxr_format *mxr_graph_format[] = {
+	&mxr_fb_fmt_rgb565,
+	&mxr_fb_fmt_argb1555,
+	&mxr_fb_fmt_argb4444,
+	&mxr_fb_fmt_argb8888,
+};
+
+/* AUXILIARY CALLBACKS */
+
+static void mxr_graph_layer_release(struct mxr_layer *layer)
+{
+	mxr_base_layer_unregister(layer);
+	mxr_base_layer_release(layer);
+}
+
+static void mxr_graph_buffer_set(struct mxr_layer *layer,
+	struct mxr_buffer *buf)
+{
+	dma_addr_t addr = 0;
+
+	if (buf)
+		addr = vb2_dma_contig_plane_dma_addr(&buf->vb, 0);
+	mxr_reg_graph_buffer(layer->mdev, layer->idx, addr);
+}
+
+static void mxr_graph_stream_set(struct mxr_layer *layer, int en)
+{
+	mxr_reg_graph_layer_stream(layer->mdev, layer->idx, en);
+}
+
+static void mxr_graph_format_set(struct mxr_layer *layer)
+{
+	mxr_reg_graph_format(layer->mdev, layer->idx,
+		layer->fmt, &layer->geo);
+}
+
+static inline unsigned int closest(unsigned int x, unsigned int a,
+	unsigned int b, unsigned long flags)
+{
+	unsigned int mid = (a + b) / 2;
+
+	/* choosing closest value with constraints according to table:
+	 * -------------+-----+-----+-----+-------+
+	 * flags	|  0  |  LE |  GE | LE|GE |
+	 * -------------+-----+-----+-----+-------+
+	 * x <= a	|  a  |  a  |  a  |   a   |
+	 * a < x <= mid	|  a  |  a  |  b  |   a   |
+	 * mid < x < b	|  b  |  a  |  b  |   b   |
+	 * b <= x	|  b  |  b  |  b  |   b   |
+	 * -------------+-----+-----+-----+-------+
+	 */
+
+	/* remove all non-constraint flags */
+	flags &= V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE;
+
+	if (x <= a)
+		return  a;
+	if (x >= b)
+		return b;
+	if (flags == V4L2_SEL_FLAG_LE)
+		return a;
+	if (flags == V4L2_SEL_FLAG_GE)
+		return b;
+	if (x <= mid)
+		return a;
+	return b;
+}
+
+static inline unsigned int do_center(unsigned int center,
+	unsigned int size, unsigned int upper, unsigned int flags)
+{
+	unsigned int lower;
+
+	if (flags & MXR_NO_OFFSET)
+		return 0;
+
+	lower = center - min(center, size / 2);
+	return min(lower, upper - size);
+}
+
+static void mxr_graph_fix_geometry(struct mxr_layer *layer,
+	enum mxr_geometry_stage stage, unsigned long flags)
+{
+	struct mxr_geometry *geo = &layer->geo;
+	struct mxr_crop *src = &geo->src;
+	struct mxr_crop *dst = &geo->dst;
+	unsigned int x_center, y_center;
+
+	switch (stage) {
+
+	case MXR_GEOMETRY_SINK: /* nothing to be fixed here */
+		flags = 0;
+		/* fall through */
+
+	case MXR_GEOMETRY_COMPOSE:
+		/* remember center of the area */
+		x_center = dst->x_offset + dst->width / 2;
+		y_center = dst->y_offset + dst->height / 2;
+		/* round up/down to 2 multiple depending on flags */
+		if (flags & V4L2_SEL_FLAG_LE) {
+			dst->width = round_down(dst->width, 2);
+			dst->height = round_down(dst->height, 2);
+		} else {
+			dst->width = round_up(dst->width, 2);
+			dst->height = round_up(dst->height, 2);
+		}
+		/* assure that compose rect is inside display area */
+		dst->width = min(dst->width, dst->full_width);
+		dst->height = min(dst->height, dst->full_height);
+
+		/* ensure that compose is reachable using 2x scaling */
+		dst->width = min(dst->width, 2 * src->full_width);
+		dst->height = min(dst->height, 2 * src->full_height);
+
+		/* setup offsets */
+		dst->x_offset = do_center(x_center, dst->width,
+			dst->full_width, flags);
+		dst->y_offset = do_center(y_center, dst->height,
+			dst->full_height, flags);
+		flags = 0;
+		/* fall through */
+
+	case MXR_GEOMETRY_CROP:
+		/* remember center of the area */
+		x_center = src->x_offset + src->width / 2;
+		y_center = src->y_offset + src->height / 2;
+		/* ensure that cropping area lies inside the buffer */
+		if (src->full_width < dst->width)
+			src->width = dst->width / 2;
+		else
+			src->width = closest(src->width, dst->width / 2,
+				dst->width, flags);
+
+		if (src->width == dst->width)
+			geo->x_ratio = 0;
+		else
+			geo->x_ratio = 1;
+
+		if (src->full_height < dst->height)
+			src->height = dst->height / 2;
+		else
+			src->height = closest(src->height, dst->height / 2,
+				dst->height, flags);
+
+		if (src->height == dst->height)
+			geo->y_ratio = 0;
+		else
+			geo->y_ratio = 1;
+
+		/* setup offsets */
+		src->x_offset = do_center(x_center, src->width,
+			src->full_width, flags);
+		src->y_offset = do_center(y_center, src->height,
+			src->full_height, flags);
+		flags = 0;
+		/* fall through */
+	case MXR_GEOMETRY_SOURCE:
+		src->full_width = clamp_val(src->full_width,
+			src->width + src->x_offset, 32767);
+		src->full_height = clamp_val(src->full_height,
+			src->height + src->y_offset, 2047);
+	};
+}
+
+/* PUBLIC API */
+
+struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
+{
+	struct mxr_layer *layer;
+	int ret;
+	struct mxr_layer_ops ops = {
+		.release = mxr_graph_layer_release,
+		.buffer_set = mxr_graph_buffer_set,
+		.stream_set = mxr_graph_stream_set,
+		.format_set = mxr_graph_format_set,
+		.fix_geometry = mxr_graph_fix_geometry,
+	};
+	char name[32];
+
+	sprintf(name, "graph%d", idx);
+
+	layer = mxr_base_layer_create(mdev, idx, name, &ops);
+	if (layer == NULL) {
+		mxr_err(mdev, "failed to initialize layer(%d) base\n", idx);
+		goto fail;
+	}
+
+	layer->fmt_array = mxr_graph_format;
+	layer->fmt_array_size = ARRAY_SIZE(mxr_graph_format);
+
+	ret = mxr_base_layer_register(layer);
+	if (ret)
+		goto fail_layer;
+
+	return layer;
+
+fail_layer:
+	mxr_base_layer_release(layer);
+
+fail:
+	return NULL;
+}
+
diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
new file mode 100644
index 0000000..3b1670a
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
@@ -0,0 +1,553 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+#include "regs-mixer.h"
+#include "regs-vp.h"
+
+#include <linux/delay.h>
+
+/* Register access subroutines */
+
+static inline u32 vp_read(struct mxr_device *mdev, u32 reg_id)
+{
+	return readl(mdev->res.vp_regs + reg_id);
+}
+
+static inline void vp_write(struct mxr_device *mdev, u32 reg_id, u32 val)
+{
+	writel(val, mdev->res.vp_regs + reg_id);
+}
+
+static inline void vp_write_mask(struct mxr_device *mdev, u32 reg_id,
+	u32 val, u32 mask)
+{
+	u32 old = vp_read(mdev, reg_id);
+
+	val = (val & mask) | (old & ~mask);
+	writel(val, mdev->res.vp_regs + reg_id);
+}
+
+static inline u32 mxr_read(struct mxr_device *mdev, u32 reg_id)
+{
+	return readl(mdev->res.mxr_regs + reg_id);
+}
+
+static inline void mxr_write(struct mxr_device *mdev, u32 reg_id, u32 val)
+{
+	writel(val, mdev->res.mxr_regs + reg_id);
+}
+
+static inline void mxr_write_mask(struct mxr_device *mdev, u32 reg_id,
+	u32 val, u32 mask)
+{
+	u32 old = mxr_read(mdev, reg_id);
+
+	val = (val & mask) | (old & ~mask);
+	writel(val, mdev->res.mxr_regs + reg_id);
+}
+
+void mxr_vsync_set_update(struct mxr_device *mdev, int en)
+{
+	/* block update on vsync */
+	mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0,
+		MXR_STATUS_SYNC_ENABLE);
+	vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0);
+}
+
+static void __mxr_reg_vp_reset(struct mxr_device *mdev)
+{
+	int tries = 100;
+
+	vp_write(mdev, VP_SRESET, VP_SRESET_PROCESSING);
+	for (tries = 100; tries; --tries) {
+		/* waiting until VP_SRESET_PROCESSING is 0 */
+		if (~vp_read(mdev, VP_SRESET) & VP_SRESET_PROCESSING)
+			break;
+		mdelay(10);
+	}
+	WARN(tries == 0, "failed to reset Video Processor\n");
+}
+
+static void mxr_reg_vp_default_filter(struct mxr_device *mdev);
+
+void mxr_reg_reset(struct mxr_device *mdev)
+{
+	unsigned long flags;
+	u32 val; /* value stored to register */
+
+	spin_lock_irqsave(&mdev->reg_slock, flags);
+	mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+	/* set output in RGB888 mode */
+	mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_RGB888);
+
+	/* 16 beat burst in DMA */
+	mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST,
+		MXR_STATUS_BURST_MASK);
+
+	/* setting default layer priority: layer1 > video > layer0
+	 * because typical usage scenario would be
+	 * layer0 - framebuffer
+	 * video - video overlay
+	 * layer1 - OSD
+	 */
+	val  = MXR_LAYER_CFG_GRP0_VAL(1);
+	val |= MXR_LAYER_CFG_VP_VAL(2);
+	val |= MXR_LAYER_CFG_GRP1_VAL(3);
+	mxr_write(mdev, MXR_LAYER_CFG, val);
+
+	/* use dark gray background color */
+	mxr_write(mdev, MXR_BG_COLOR0, 0x808080);
+	mxr_write(mdev, MXR_BG_COLOR1, 0x808080);
+	mxr_write(mdev, MXR_BG_COLOR2, 0x808080);
+
+	/* setting graphical layers */
+
+	val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
+	val |= MXR_GRP_CFG_BLEND_PRE_MUL; /* premul mode */
+	val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
+
+	/* the same configuration for both layers */
+	mxr_write(mdev, MXR_GRAPHIC_CFG(0), val);
+	mxr_write(mdev, MXR_GRAPHIC_CFG(1), val);
+
+	/* configuration of Video Processor Registers */
+	__mxr_reg_vp_reset(mdev);
+	mxr_reg_vp_default_filter(mdev);
+
+	/* enable all interrupts */
+	mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL);
+
+	mxr_vsync_set_update(mdev, MXR_ENABLE);
+	spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
+	const struct mxr_format *fmt, const struct mxr_geometry *geo)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mdev->reg_slock, flags);
+	mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+	/* setup format */
+	mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx),
+		MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK);
+
+	/* setup geometry */
+	mxr_write(mdev, MXR_GRAPHIC_SPAN(idx), geo->src.full_width);
+	val  = MXR_GRP_WH_WIDTH(geo->src.width);
+	val |= MXR_GRP_WH_HEIGHT(geo->src.height);
+	val |= MXR_GRP_WH_H_SCALE(geo->x_ratio);
+	val |= MXR_GRP_WH_V_SCALE(geo->y_ratio);
+	mxr_write(mdev, MXR_GRAPHIC_WH(idx), val);
+
+	/* setup offsets in source image */
+	val  = MXR_GRP_SXY_SX(geo->src.x_offset);
+	val |= MXR_GRP_SXY_SY(geo->src.y_offset);
+	mxr_write(mdev, MXR_GRAPHIC_SXY(idx), val);
+
+	/* setup offsets in display image */
+	val  = MXR_GRP_DXY_DX(geo->dst.x_offset);
+	val |= MXR_GRP_DXY_DY(geo->dst.y_offset);
+	mxr_write(mdev, MXR_GRAPHIC_DXY(idx), val);
+
+	mxr_vsync_set_update(mdev, MXR_ENABLE);
+	spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_vp_format(struct mxr_device *mdev,
+	const struct mxr_format *fmt, const struct mxr_geometry *geo)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&mdev->reg_slock, flags);
+	mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+	vp_write_mask(mdev, VP_MODE, fmt->cookie, VP_MODE_FMT_MASK);
+
+	/* setting size of input image */
+	vp_write(mdev, VP_IMG_SIZE_Y, VP_IMG_HSIZE(geo->src.full_width) |
+		VP_IMG_VSIZE(geo->src.full_height));
+	/* chroma height has to reduced by 2 to avoid chroma distorions */
+	vp_write(mdev, VP_IMG_SIZE_C, VP_IMG_HSIZE(geo->src.full_width) |
+		VP_IMG_VSIZE(geo->src.full_height / 2));
+
+	vp_write(mdev, VP_SRC_WIDTH, geo->src.width);
+	vp_write(mdev, VP_SRC_HEIGHT, geo->src.height);
+	vp_write(mdev, VP_SRC_H_POSITION,
+		VP_SRC_H_POSITION_VAL(geo->src.x_offset));
+	vp_write(mdev, VP_SRC_V_POSITION, geo->src.y_offset);
+
+	vp_write(mdev, VP_DST_WIDTH, geo->dst.width);
+	vp_write(mdev, VP_DST_H_POSITION, geo->dst.x_offset);
+	if (geo->dst.field == V4L2_FIELD_INTERLACED) {
+		vp_write(mdev, VP_DST_HEIGHT, geo->dst.height / 2);
+		vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset / 2);
+	} else {
+		vp_write(mdev, VP_DST_HEIGHT, geo->dst.height);
+		vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset);
+	}
+
+	vp_write(mdev, VP_H_RATIO, geo->x_ratio);
+	vp_write(mdev, VP_V_RATIO, geo->y_ratio);
+
+	vp_write(mdev, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
+
+	mxr_vsync_set_update(mdev, MXR_ENABLE);
+	spin_unlock_irqrestore(&mdev->reg_slock, flags);
+
+}
+
+void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr)
+{
+	u32 val = addr ? ~0 : 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mdev->reg_slock, flags);
+	mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+	if (idx == 0)
+		mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
+	else
+		mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
+	mxr_write(mdev, MXR_GRAPHIC_BASE(idx), addr);
+
+	mxr_vsync_set_update(mdev, MXR_ENABLE);
+	spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_vp_buffer(struct mxr_device *mdev,
+	dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2])
+{
+	u32 val = luma_addr[0] ? ~0 : 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mdev->reg_slock, flags);
+	mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+	mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VP_ENABLE);
+	vp_write_mask(mdev, VP_ENABLE, val, VP_ENABLE_ON);
+	/* TODO: fix tiled mode */
+	vp_write(mdev, VP_TOP_Y_PTR, luma_addr[0]);
+	vp_write(mdev, VP_TOP_C_PTR, chroma_addr[0]);
+	vp_write(mdev, VP_BOT_Y_PTR, luma_addr[1]);
+	vp_write(mdev, VP_BOT_C_PTR, chroma_addr[1]);
+
+	mxr_vsync_set_update(mdev, MXR_ENABLE);
+	spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+static void mxr_irq_layer_handle(struct mxr_layer *layer)
+{
+	struct list_head *head = &layer->enq_list;
+	struct mxr_buffer *done;
+
+	/* skip non-existing layer */
+	if (layer == NULL)
+		return;
+
+	spin_lock(&layer->enq_slock);
+	if (layer->state == MXR_LAYER_IDLE)
+		goto done;
+
+	done = layer->shadow_buf;
+	layer->shadow_buf = layer->update_buf;
+
+	if (list_empty(head)) {
+		if (layer->state != MXR_LAYER_STREAMING)
+			layer->update_buf = NULL;
+	} else {
+		struct mxr_buffer *next;
+		next = list_first_entry(head, struct mxr_buffer, list);
+		list_del(&next->list);
+		layer->update_buf = next;
+	}
+
+	layer->ops.buffer_set(layer, layer->update_buf);
+
+	if (done && done != layer->shadow_buf)
+		vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
+
+done:
+	spin_unlock(&layer->enq_slock);
+}
+
+irqreturn_t mxr_irq_handler(int irq, void *dev_data)
+{
+	struct mxr_device *mdev = dev_data;
+	u32 i, val;
+
+	spin_lock(&mdev->reg_slock);
+	val = mxr_read(mdev, MXR_INT_STATUS);
+
+	/* wake up process waiting for VSYNC */
+	if (val & MXR_INT_STATUS_VSYNC) {
+		set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
+		/* toggle TOP field event if working in interlaced mode */
+		if (~mxr_read(mdev, MXR_CFG) & MXR_CFG_SCAN_PROGRASSIVE)
+			change_bit(MXR_EVENT_TOP, &mdev->event_flags);
+		wake_up(&mdev->event_queue);
+		/* vsync interrupt use different bit for read and clear */
+		val &= ~MXR_INT_STATUS_VSYNC;
+		val |= MXR_INT_CLEAR_VSYNC;
+	}
+
+	/* clear interrupts */
+	mxr_write(mdev, MXR_INT_STATUS, val);
+
+	spin_unlock(&mdev->reg_slock);
+	/* leave on non-vsync event */
+	if (~val & MXR_INT_CLEAR_VSYNC)
+		return IRQ_HANDLED;
+	/* skip layer update on bottom field */
+	if (!test_bit(MXR_EVENT_TOP, &mdev->event_flags))
+		return IRQ_HANDLED;
+	for (i = 0; i < MXR_MAX_LAYERS; ++i)
+		mxr_irq_layer_handle(mdev->layer[i]);
+	return IRQ_HANDLED;
+}
+
+void mxr_reg_s_output(struct mxr_device *mdev, int cookie)
+{
+	u32 val;
+
+	val = cookie == 0 ? MXR_CFG_DST_SDO : MXR_CFG_DST_HDMI;
+	mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_DST_MASK);
+}
+
+void mxr_reg_streamon(struct mxr_device *mdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&mdev->reg_slock, flags);
+	/* single write -> no need to block vsync update */
+
+	/* start MIXER */
+	mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
+	set_bit(MXR_EVENT_TOP, &mdev->event_flags);
+
+	spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_streamoff(struct mxr_device *mdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&mdev->reg_slock, flags);
+	/* single write -> no need to block vsync update */
+
+	/* stop MIXER */
+	mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
+
+	spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+int mxr_reg_wait4vsync(struct mxr_device *mdev)
+{
+	int ret;
+
+	clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
+	/* TODO: consider adding interruptible */
+	ret = wait_event_timeout(mdev->event_queue,
+		test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
+		msecs_to_jiffies(1000));
+	if (ret > 0)
+		return 0;
+	if (ret < 0)
+		return ret;
+	mxr_warn(mdev, "no vsync detected - timeout\n");
+	return -ETIME;
+}
+
+void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
+	struct v4l2_mbus_framefmt *fmt)
+{
+	u32 val = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mdev->reg_slock, flags);
+	mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+	/* selecting colorspace accepted by output */
+	if (fmt->colorspace == V4L2_COLORSPACE_JPEG)
+		val |= MXR_CFG_OUT_YUV444;
+	else
+		val |= MXR_CFG_OUT_RGB888;
+
+	/* choosing between interlace and progressive mode */
+	if (fmt->field == V4L2_FIELD_INTERLACED)
+		val |= MXR_CFG_SCAN_INTERLACE;
+	else
+		val |= MXR_CFG_SCAN_PROGRASSIVE;
+
+	/* choosing between porper HD and SD mode */
+	if (fmt->height == 480)
+		val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
+	else if (fmt->height == 576)
+		val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
+	else if (fmt->height == 720)
+		val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+	else if (fmt->height == 1080)
+		val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
+	else
+		WARN(1, "unrecognized mbus height %u!\n", fmt->height);
+
+	mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK |
+		MXR_CFG_OUT_MASK);
+
+	val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0;
+	vp_write_mask(mdev, VP_MODE, val,
+		VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING);
+
+	mxr_vsync_set_update(mdev, MXR_ENABLE);
+	spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en)
+{
+	/* no extra actions need to be done */
+}
+
+void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en)
+{
+	/* no extra actions need to be done */
+}
+
+static const u8 filter_y_horiz_tap8[] = {
+	0,	-1,	-1,	-1,	-1,	-1,	-1,	-1,
+	-1,	-1,	-1,	-1,	-1,	0,	0,	0,
+	0,	2,	4,	5,	6,	6,	6,	6,
+	6,	5,	5,	4,	3,	2,	1,	1,
+	0,	-6,	-12,	-16,	-18,	-20,	-21,	-20,
+	-20,	-18,	-16,	-13,	-10,	-8,	-5,	-2,
+	127,	126,	125,	121,	114,	107,	99,	89,
+	79,	68,	57,	46,	35,	25,	16,	8,
+};
+
+static const u8 filter_y_vert_tap4[] = {
+	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7,
+	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0,
+	127,	126,	124,	118,	111,	102,	92,	81,
+	70,	59,	48,	37,	27,	19,	11,	5,
+	0,	5,	11,	19,	27,	37,	48,	59,
+	70,	81,	92,	102,	111,	118,	124,	126,
+	0,	0,	-1,	-1,	-2,	-3,	-4,	-5,
+	-6,	-7,	-8,	-8,	-8,	-8,	-6,	-3,
+};
+
+static const u8 filter_cr_horiz_tap4[] = {
+	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7,
+	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0,
+	127,	126,	124,	118,	111,	102,	92,	81,
+	70,	59,	48,	37,	27,	19,	11,	5,
+};
+
+static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
+	int reg_id, const u8 *data, unsigned int size)
+{
+	/* assure 4-byte align */
+	BUG_ON(size & 3);
+	for (; size; size -= 4, reg_id += 4, data += 4) {
+		u32 val = (data[0] << 24) |  (data[1] << 16) |
+			(data[2] << 8) | data[3];
+		vp_write(mdev, reg_id, val);
+	}
+}
+
+static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
+{
+	mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
+		filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
+	mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
+		filter_y_vert_tap4, sizeof filter_y_vert_tap4);
+	mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
+		filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
+}
+
+static void mxr_reg_mxr_dump(struct mxr_device *mdev)
+{
+#define DUMPREG(reg_id) \
+do { \
+	mxr_dbg(mdev, #reg_id " = %08x\n", \
+		(u32)readl(mdev->res.mxr_regs + reg_id)); \
+} while (0)
+
+	DUMPREG(MXR_STATUS);
+	DUMPREG(MXR_CFG);
+	DUMPREG(MXR_INT_EN);
+	DUMPREG(MXR_INT_STATUS);
+
+	DUMPREG(MXR_LAYER_CFG);
+	DUMPREG(MXR_VIDEO_CFG);
+
+	DUMPREG(MXR_GRAPHIC0_CFG);
+	DUMPREG(MXR_GRAPHIC0_BASE);
+	DUMPREG(MXR_GRAPHIC0_SPAN);
+	DUMPREG(MXR_GRAPHIC0_WH);
+	DUMPREG(MXR_GRAPHIC0_SXY);
+	DUMPREG(MXR_GRAPHIC0_DXY);
+
+	DUMPREG(MXR_GRAPHIC1_CFG);
+	DUMPREG(MXR_GRAPHIC1_BASE);
+	DUMPREG(MXR_GRAPHIC1_SPAN);
+	DUMPREG(MXR_GRAPHIC1_WH);
+	DUMPREG(MXR_GRAPHIC1_SXY);
+	DUMPREG(MXR_GRAPHIC1_DXY);
+#undef DUMPREG
+}
+
+static void mxr_reg_vp_dump(struct mxr_device *mdev)
+{
+#define DUMPREG(reg_id) \
+do { \
+	mxr_dbg(mdev, #reg_id " = %08x\n", \
+		(u32) readl(mdev->res.vp_regs + reg_id)); \
+} while (0)
+
+
+	DUMPREG(VP_ENABLE);
+	DUMPREG(VP_SRESET);
+	DUMPREG(VP_SHADOW_UPDATE);
+	DUMPREG(VP_FIELD_ID);
+	DUMPREG(VP_MODE);
+	DUMPREG(VP_IMG_SIZE_Y);
+	DUMPREG(VP_IMG_SIZE_C);
+	DUMPREG(VP_PER_RATE_CTRL);
+	DUMPREG(VP_TOP_Y_PTR);
+	DUMPREG(VP_BOT_Y_PTR);
+	DUMPREG(VP_TOP_C_PTR);
+	DUMPREG(VP_BOT_C_PTR);
+	DUMPREG(VP_ENDIAN_MODE);
+	DUMPREG(VP_SRC_H_POSITION);
+	DUMPREG(VP_SRC_V_POSITION);
+	DUMPREG(VP_SRC_WIDTH);
+	DUMPREG(VP_SRC_HEIGHT);
+	DUMPREG(VP_DST_H_POSITION);
+	DUMPREG(VP_DST_V_POSITION);
+	DUMPREG(VP_DST_WIDTH);
+	DUMPREG(VP_DST_HEIGHT);
+	DUMPREG(VP_H_RATIO);
+	DUMPREG(VP_V_RATIO);
+
+#undef DUMPREG
+}
+
+void mxr_reg_dump(struct mxr_device *mdev)
+{
+	mxr_reg_mxr_dump(mdev);
+	mxr_reg_vp_dump(mdev);
+}
+
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
new file mode 100644
index 0000000..e0e02cc
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -0,0 +1,1125 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+
+#include <media/v4l2-ioctl.h>
+#include <linux/videodev2.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <media/videobuf2-dma-contig.h>
+
+static int find_reg_callback(struct device *dev, void *p)
+{
+	struct v4l2_subdev **sd = p;
+
+	*sd = dev_get_drvdata(dev);
+	/* non-zero value stops iteration */
+	return 1;
+}
+
+static struct v4l2_subdev *find_and_register_subdev(
+	struct mxr_device *mdev, char *module_name)
+{
+	struct device_driver *drv;
+	struct v4l2_subdev *sd = NULL;
+	int ret;
+
+	/* TODO: add waiting until probe is finished */
+	drv = driver_find(module_name, &platform_bus_type);
+	if (!drv) {
+		mxr_warn(mdev, "module %s is missing\n", module_name);
+		return NULL;
+	}
+	/* driver refcnt is increased, it is safe to iterate over devices */
+	ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
+	/* ret == 0 means that find_reg_callback was never executed */
+	if (sd == NULL) {
+		mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
+		goto done;
+	}
+	/* v4l2_device_register_subdev detects if sd is NULL */
+	ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
+	if (ret) {
+		mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
+		sd = NULL;
+	}
+
+done:
+	return sd;
+}
+
+int __devinit mxr_acquire_video(struct mxr_device *mdev,
+	struct mxr_output_conf *output_conf, int output_count)
+{
+	struct device *dev = mdev->dev;
+	struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
+	int i;
+	int ret = 0;
+	struct v4l2_subdev *sd;
+
+	strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
+	/* prepare context for V4L2 device */
+	ret = v4l2_device_register(dev, v4l2_dev);
+	if (ret) {
+		mxr_err(mdev, "could not register v4l2 device.\n");
+		goto fail;
+	}
+
+	mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
+	if (IS_ERR_OR_NULL(mdev->alloc_ctx)) {
+		mxr_err(mdev, "could not acquire vb2 allocator\n");
+		goto fail_v4l2_dev;
+	}
+
+	/* registering outputs */
+	mdev->output_cnt = 0;
+	for (i = 0; i < output_count; ++i) {
+		struct mxr_output_conf *conf = &output_conf[i];
+		struct mxr_output *out;
+
+		sd = find_and_register_subdev(mdev, conf->module_name);
+		/* trying to register next output */
+		if (sd == NULL)
+			continue;
+		out = kzalloc(sizeof *out, GFP_KERNEL);
+		if (out == NULL) {
+			mxr_err(mdev, "no memory for '%s'\n",
+				conf->output_name);
+			ret = -ENOMEM;
+			/* registered subdevs are removed in fail_v4l2_dev */
+			goto fail_output;
+		}
+		strlcpy(out->name, conf->output_name, sizeof(out->name));
+		out->sd = sd;
+		out->cookie = conf->cookie;
+		mdev->output[mdev->output_cnt++] = out;
+		mxr_info(mdev, "added output '%s' from module '%s'\n",
+			conf->output_name, conf->module_name);
+		/* checking if maximal number of outputs is reached */
+		if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
+			break;
+	}
+
+	if (mdev->output_cnt == 0) {
+		mxr_err(mdev, "failed to register any output\n");
+		ret = -ENODEV;
+		/* skipping fail_output because there is nothing to free */
+		goto fail_vb2_allocator;
+	}
+
+	return 0;
+
+fail_output:
+	/* kfree is NULL-safe */
+	for (i = 0; i < mdev->output_cnt; ++i)
+		kfree(mdev->output[i]);
+	memset(mdev->output, 0, sizeof mdev->output);
+
+fail_vb2_allocator:
+	/* freeing allocator context */
+	vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
+
+fail_v4l2_dev:
+	/* NOTE: automatically unregister all subdevs */
+	v4l2_device_unregister(v4l2_dev);
+
+fail:
+	return ret;
+}
+
+void mxr_release_video(struct mxr_device *mdev)
+{
+	int i;
+
+	/* kfree is NULL-safe */
+	for (i = 0; i < mdev->output_cnt; ++i)
+		kfree(mdev->output[i]);
+
+	vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
+	v4l2_device_unregister(&mdev->v4l2_dev);
+}
+
+static int mxr_querycap(struct file *file, void *priv,
+	struct v4l2_capability *cap)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+
+	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+
+	strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof cap->driver);
+	strlcpy(cap->card, layer->vfd.name, sizeof cap->card);
+	sprintf(cap->bus_info, "%d", layer->idx);
+	cap->version = KERNEL_VERSION(0, 1, 0);
+	cap->capabilities = V4L2_CAP_STREAMING |
+		V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+
+	return 0;
+}
+
+static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
+{
+	mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
+		geo->src.full_width, geo->src.full_height);
+	mxr_dbg(mdev, "src.size = (%u, %u)\n",
+		geo->src.width, geo->src.height);
+	mxr_dbg(mdev, "src.offset = (%u, %u)\n",
+		geo->src.x_offset, geo->src.y_offset);
+	mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
+		geo->dst.full_width, geo->dst.full_height);
+	mxr_dbg(mdev, "dst.size = (%u, %u)\n",
+		geo->dst.width, geo->dst.height);
+	mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
+		geo->dst.x_offset, geo->dst.y_offset);
+	mxr_dbg(mdev, "ratio = (%u, %u)\n",
+		geo->x_ratio, geo->y_ratio);
+}
+
+static void mxr_layer_default_geo(struct mxr_layer *layer)
+{
+	struct mxr_device *mdev = layer->mdev;
+	struct v4l2_mbus_framefmt mbus_fmt;
+
+	memset(&layer->geo, 0, sizeof layer->geo);
+
+	mxr_get_mbus_fmt(mdev, &mbus_fmt);
+
+	layer->geo.dst.full_width = mbus_fmt.width;
+	layer->geo.dst.full_height = mbus_fmt.height;
+	layer->geo.dst.width = layer->geo.dst.full_width;
+	layer->geo.dst.height = layer->geo.dst.full_height;
+	layer->geo.dst.field = mbus_fmt.field;
+
+	layer->geo.src.full_width = mbus_fmt.width;
+	layer->geo.src.full_height = mbus_fmt.height;
+	layer->geo.src.width = layer->geo.src.full_width;
+	layer->geo.src.height = layer->geo.src.full_height;
+
+	mxr_geometry_dump(mdev, &layer->geo);
+	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
+	mxr_geometry_dump(mdev, &layer->geo);
+}
+
+static void mxr_layer_update_output(struct mxr_layer *layer)
+{
+	struct mxr_device *mdev = layer->mdev;
+	struct v4l2_mbus_framefmt mbus_fmt;
+
+	mxr_get_mbus_fmt(mdev, &mbus_fmt);
+	/* checking if update is needed */
+	if (layer->geo.dst.full_width == mbus_fmt.width &&
+		layer->geo.dst.full_height == mbus_fmt.width)
+		return;
+
+	layer->geo.dst.full_width = mbus_fmt.width;
+	layer->geo.dst.full_height = mbus_fmt.height;
+	layer->geo.dst.field = mbus_fmt.field;
+	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
+
+	mxr_geometry_dump(mdev, &layer->geo);
+}
+
+static const struct mxr_format *find_format_by_fourcc(
+	struct mxr_layer *layer, unsigned long fourcc);
+static const struct mxr_format *find_format_by_index(
+	struct mxr_layer *layer, unsigned long index);
+
+static int mxr_enum_fmt(struct file *file, void  *priv,
+	struct v4l2_fmtdesc *f)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	struct mxr_device *mdev = layer->mdev;
+	const struct mxr_format *fmt;
+
+	mxr_dbg(mdev, "%s\n", __func__);
+	fmt = find_format_by_index(layer, f->index);
+	if (fmt == NULL)
+		return -EINVAL;
+
+	strlcpy(f->description, fmt->name, sizeof(f->description));
+	f->pixelformat = fmt->fourcc;
+
+	return 0;
+}
+
+static unsigned int divup(unsigned int divident, unsigned int divisor)
+{
+	return (divident + divisor - 1) / divisor;
+}
+
+unsigned long mxr_get_plane_size(const struct mxr_block *blk,
+	unsigned int width, unsigned int height)
+{
+	unsigned int bl_width = divup(width, blk->width);
+	unsigned int bl_height = divup(height, blk->height);
+
+	return bl_width * bl_height * blk->size;
+}
+
+static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
+	const struct mxr_format *fmt, u32 width, u32 height)
+{
+	int i;
+
+	/* checking if nothing to fill */
+	if (!planes)
+		return;
+
+	memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
+	for (i = 0; i < fmt->num_planes; ++i) {
+		struct v4l2_plane_pix_format *plane = planes
+			+ fmt->plane2subframe[i];
+		const struct mxr_block *blk = &fmt->plane[i];
+		u32 bl_width = divup(width, blk->width);
+		u32 bl_height = divup(height, blk->height);
+		u32 sizeimage = bl_width * bl_height * blk->size;
+		u16 bytesperline = bl_width * blk->size / blk->height;
+
+		plane->sizeimage += sizeimage;
+		plane->bytesperline = max(plane->bytesperline, bytesperline);
+	}
+}
+
+static int mxr_g_fmt(struct file *file, void *priv,
+			     struct v4l2_format *f)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+
+	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+
+	pix->width = layer->geo.src.full_width;
+	pix->height = layer->geo.src.full_height;
+	pix->field = V4L2_FIELD_NONE;
+	pix->pixelformat = layer->fmt->fourcc;
+	pix->colorspace = layer->fmt->colorspace;
+	mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
+
+	return 0;
+}
+
+static int mxr_s_fmt(struct file *file, void *priv,
+	struct v4l2_format *f)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	const struct mxr_format *fmt;
+	struct v4l2_pix_format_mplane *pix;
+	struct mxr_device *mdev = layer->mdev;
+	struct mxr_geometry *geo = &layer->geo;
+
+	mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
+
+	pix = &f->fmt.pix_mp;
+	fmt = find_format_by_fourcc(layer, pix->pixelformat);
+	if (fmt == NULL) {
+		mxr_warn(mdev, "not recognized fourcc: %08x\n",
+			pix->pixelformat);
+		return -EINVAL;
+	}
+	layer->fmt = fmt;
+	/* set source size to highest accepted value */
+	geo->src.full_width = max(geo->dst.full_width, pix->width);
+	geo->src.full_height = max(geo->dst.full_height, pix->height);
+	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
+	mxr_geometry_dump(mdev, &layer->geo);
+	/* set cropping to total visible screen */
+	geo->src.width = pix->width;
+	geo->src.height = pix->height;
+	geo->src.x_offset = 0;
+	geo->src.y_offset = 0;
+	/* assure consistency of geometry */
+	layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
+	mxr_geometry_dump(mdev, &layer->geo);
+	/* set full size to lowest possible value */
+	geo->src.full_width = 0;
+	geo->src.full_height = 0;
+	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
+	mxr_geometry_dump(mdev, &layer->geo);
+
+	/* returning results */
+	mxr_g_fmt(file, priv, f);
+
+	return 0;
+}
+
+static int mxr_g_selection(struct file *file, void *fh,
+	struct v4l2_selection *s)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	struct mxr_geometry *geo = &layer->geo;
+
+	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+
+	if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+		s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		return -EINVAL;
+
+	switch (s->target) {
+	case V4L2_SEL_TGT_CROP:
+		s->r.left = geo->src.x_offset;
+		s->r.top = geo->src.y_offset;
+		s->r.width = geo->src.width;
+		s->r.height = geo->src.height;
+		break;
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = geo->src.full_width;
+		s->r.height = geo->src.full_height;
+		break;
+	case V4L2_SEL_TGT_COMPOSE:
+	case V4L2_SEL_TGT_COMPOSE_PADDED:
+		s->r.left = geo->dst.x_offset;
+		s->r.top = geo->dst.y_offset;
+		s->r.width = geo->dst.width;
+		s->r.height = geo->dst.height;
+		break;
+	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = geo->dst.full_width;
+		s->r.height = geo->dst.full_height;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* returns 1 if rectangle 'a' is inside 'b' */
+static int mxr_is_rect_inside(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+	if (a->left < b->left)
+		return 0;
+	if (a->top < b->top)
+		return 0;
+	if (a->left + a->width > b->left + b->width)
+		return 0;
+	if (a->top + a->height > b->top + b->height)
+		return 0;
+	return 1;
+}
+
+static int mxr_s_selection(struct file *file, void *fh,
+	struct v4l2_selection *s)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	struct mxr_geometry *geo = &layer->geo;
+	struct mxr_crop *target = NULL;
+	enum mxr_geometry_stage stage;
+	struct mxr_geometry tmp;
+	struct v4l2_rect res;
+
+	memset(&res, 0, sizeof res);
+
+	mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
+		s->r.width, s->r.height, s->r.left, s->r.top);
+
+	if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+		s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		return -EINVAL;
+
+	switch (s->target) {
+	/* ignore read-only targets */
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		res.width = geo->src.full_width;
+		res.height = geo->src.full_height;
+		break;
+
+	/* ignore read-only targets */
+	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+		res.width = geo->dst.full_width;
+		res.height = geo->dst.full_height;
+		break;
+
+	case V4L2_SEL_TGT_CROP:
+		target = &geo->src;
+		stage = MXR_GEOMETRY_CROP;
+		break;
+	case V4L2_SEL_TGT_COMPOSE:
+	case V4L2_SEL_TGT_COMPOSE_PADDED:
+		target = &geo->dst;
+		stage = MXR_GEOMETRY_COMPOSE;
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* apply change and update geometry if needed */
+	if (target) {
+		/* backup current geometry if setup fails */
+		memcpy(&tmp, geo, sizeof tmp);
+
+		/* apply requested selection */
+		target->x_offset = s->r.left;
+		target->y_offset = s->r.top;
+		target->width = s->r.width;
+		target->height = s->r.height;
+
+		layer->ops.fix_geometry(layer, stage, s->flags);
+
+		/* retrieve update selection rectangle */
+		res.left = target->x_offset;
+		res.top = target->y_offset;
+		res.width = target->width;
+		res.height = target->height;
+
+		mxr_geometry_dump(layer->mdev, &layer->geo);
+	}
+
+	/* checking if the rectangle satisfies constraints */
+	if ((s->flags & V4L2_SEL_FLAG_LE) && !mxr_is_rect_inside(&res, &s->r))
+		goto fail;
+	if ((s->flags & V4L2_SEL_FLAG_GE) && !mxr_is_rect_inside(&s->r, &res))
+		goto fail;
+
+	/* return result rectangle */
+	s->r = res;
+
+	return 0;
+fail:
+	/* restore old geometry, which is not touched if target is NULL */
+	if (target)
+		memcpy(geo, &tmp, sizeof tmp);
+	return -ERANGE;
+}
+
+static int mxr_enum_dv_presets(struct file *file, void *fh,
+	struct v4l2_dv_enum_preset *preset)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	struct mxr_device *mdev = layer->mdev;
+	int ret;
+
+	/* lock protects from changing sd_out */
+	mutex_lock(&mdev->mutex);
+	ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_presets, preset);
+	mutex_unlock(&mdev->mutex);
+
+	return ret ? -EINVAL : 0;
+}
+
+static int mxr_s_dv_preset(struct file *file, void *fh,
+	struct v4l2_dv_preset *preset)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	struct mxr_device *mdev = layer->mdev;
+	int ret;
+
+	/* lock protects from changing sd_out */
+	mutex_lock(&mdev->mutex);
+
+	/* preset change cannot be done while there is an entity
+	 * dependant on output configuration
+	 */
+	if (mdev->n_output > 0) {
+		mutex_unlock(&mdev->mutex);
+		return -EBUSY;
+	}
+
+	ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_preset, preset);
+
+	mutex_unlock(&mdev->mutex);
+
+	mxr_layer_update_output(layer);
+
+	/* any failure should return EINVAL according to V4L2 doc */
+	return ret ? -EINVAL : 0;
+}
+
+static int mxr_g_dv_preset(struct file *file, void *fh,
+	struct v4l2_dv_preset *preset)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	struct mxr_device *mdev = layer->mdev;
+	int ret;
+
+	/* lock protects from changing sd_out */
+	mutex_lock(&mdev->mutex);
+	ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_preset, preset);
+	mutex_unlock(&mdev->mutex);
+
+	return ret ? -EINVAL : 0;
+}
+
+static int mxr_s_std(struct file *file, void *fh, v4l2_std_id *norm)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	struct mxr_device *mdev = layer->mdev;
+	int ret;
+
+	/* lock protects from changing sd_out */
+	mutex_lock(&mdev->mutex);
+
+	/* standard change cannot be done while there is an entity
+	 * dependant on output configuration
+	 */
+	if (mdev->n_output > 0) {
+		mutex_unlock(&mdev->mutex);
+		return -EBUSY;
+	}
+
+	ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, *norm);
+
+	mutex_unlock(&mdev->mutex);
+
+	mxr_layer_update_output(layer);
+
+	return ret ? -EINVAL : 0;
+}
+
+static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	struct mxr_device *mdev = layer->mdev;
+	int ret;
+
+	/* lock protects from changing sd_out */
+	mutex_lock(&mdev->mutex);
+	ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
+	mutex_unlock(&mdev->mutex);
+
+	return ret ? -EINVAL : 0;
+}
+
+static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	struct mxr_device *mdev = layer->mdev;
+	struct mxr_output *out;
+	struct v4l2_subdev *sd;
+
+	if (a->index >= mdev->output_cnt)
+		return -EINVAL;
+	out = mdev->output[a->index];
+	BUG_ON(out == NULL);
+	sd = out->sd;
+	strlcpy(a->name, out->name, sizeof(a->name));
+
+	/* try to obtain supported tv norms */
+	v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
+	a->capabilities = 0;
+	if (sd->ops->video && sd->ops->video->s_dv_preset)
+		a->capabilities |= V4L2_OUT_CAP_PRESETS;
+	if (sd->ops->video && sd->ops->video->s_std_output)
+		a->capabilities |= V4L2_OUT_CAP_STD;
+	a->type = V4L2_OUTPUT_TYPE_ANALOG;
+
+	return 0;
+}
+
+static int mxr_s_output(struct file *file, void *fh, unsigned int i)
+{
+	struct video_device *vfd = video_devdata(file);
+	struct mxr_layer *layer = video_drvdata(file);
+	struct mxr_device *mdev = layer->mdev;
+
+	if (i >= mdev->output_cnt || mdev->output[i] == NULL)
+		return -EINVAL;
+
+	mutex_lock(&mdev->mutex);
+	if (mdev->n_output > 0) {
+		mutex_unlock(&mdev->mutex);
+		return -EBUSY;
+	}
+	mdev->current_output = i;
+	vfd->tvnorms = 0;
+	v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
+		&vfd->tvnorms);
+	mutex_unlock(&mdev->mutex);
+
+	/* update layers geometry */
+	mxr_layer_update_output(layer);
+
+	mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
+
+	return 0;
+}
+
+static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	struct mxr_device *mdev = layer->mdev;
+
+	mutex_lock(&mdev->mutex);
+	*p = mdev->current_output;
+	mutex_unlock(&mdev->mutex);
+
+	return 0;
+}
+
+static int mxr_reqbufs(struct file *file, void *priv,
+			  struct v4l2_requestbuffers *p)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+
+	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+	return vb2_reqbufs(&layer->vb_queue, p);
+}
+
+static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+
+	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+	return vb2_querybuf(&layer->vb_queue, p);
+}
+
+static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+
+	mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
+	return vb2_qbuf(&layer->vb_queue, p);
+}
+
+static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+
+	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+	return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
+}
+
+static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+
+	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+	return vb2_streamon(&layer->vb_queue, i);
+}
+
+static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+
+	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+	return vb2_streamoff(&layer->vb_queue, i);
+}
+
+static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
+	.vidioc_querycap = mxr_querycap,
+	/* format handling */
+	.vidioc_enum_fmt_vid_out = mxr_enum_fmt,
+	.vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
+	.vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
+	/* buffer control */
+	.vidioc_reqbufs = mxr_reqbufs,
+	.vidioc_querybuf = mxr_querybuf,
+	.vidioc_qbuf = mxr_qbuf,
+	.vidioc_dqbuf = mxr_dqbuf,
+	/* Streaming control */
+	.vidioc_streamon = mxr_streamon,
+	.vidioc_streamoff = mxr_streamoff,
+	/* Preset functions */
+	.vidioc_enum_dv_presets = mxr_enum_dv_presets,
+	.vidioc_s_dv_preset = mxr_s_dv_preset,
+	.vidioc_g_dv_preset = mxr_g_dv_preset,
+	/* analog TV standard functions */
+	.vidioc_s_std = mxr_s_std,
+	.vidioc_g_std = mxr_g_std,
+	/* Output handling */
+	.vidioc_enum_output = mxr_enum_output,
+	.vidioc_s_output = mxr_s_output,
+	.vidioc_g_output = mxr_g_output,
+	/* selection ioctls */
+	.vidioc_g_selection = mxr_g_selection,
+	.vidioc_s_selection = mxr_s_selection,
+};
+
+static int mxr_video_open(struct file *file)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	struct mxr_device *mdev = layer->mdev;
+	int ret = 0;
+
+	mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
+	if (mutex_lock_interruptible(&layer->mutex))
+		return -ERESTARTSYS;
+	/* assure device probe is finished */
+	wait_for_device_probe();
+	/* creating context for file descriptor */
+	ret = v4l2_fh_open(file);
+	if (ret) {
+		mxr_err(mdev, "v4l2_fh_open failed\n");
+		goto unlock;
+	}
+
+	/* leaving if layer is already initialized */
+	if (!v4l2_fh_is_singular_file(file))
+		goto unlock;
+
+	/* FIXME: should power be enabled on open? */
+	ret = mxr_power_get(mdev);
+	if (ret) {
+		mxr_err(mdev, "power on failed\n");
+		goto fail_fh_open;
+	}
+
+	ret = vb2_queue_init(&layer->vb_queue);
+	if (ret != 0) {
+		mxr_err(mdev, "failed to initialize vb2 queue\n");
+		goto fail_power;
+	}
+	/* set default format, first on the list */
+	layer->fmt = layer->fmt_array[0];
+	/* setup default geometry */
+	mxr_layer_default_geo(layer);
+	mutex_unlock(&layer->mutex);
+
+	return 0;
+
+fail_power:
+	mxr_power_put(mdev);
+
+fail_fh_open:
+	v4l2_fh_release(file);
+
+unlock:
+	mutex_unlock(&layer->mutex);
+
+	return ret;
+}
+
+static unsigned int
+mxr_video_poll(struct file *file, struct poll_table_struct *wait)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	unsigned int res;
+
+	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+
+	mutex_lock(&layer->mutex);
+	res = vb2_poll(&layer->vb_queue, file, wait);
+	mutex_unlock(&layer->mutex);
+	return res;
+}
+
+static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+	int ret;
+
+	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+
+	if (mutex_lock_interruptible(&layer->mutex))
+		return -ERESTARTSYS;
+	ret = vb2_mmap(&layer->vb_queue, vma);
+	mutex_unlock(&layer->mutex);
+	return ret;
+}
+
+static int mxr_video_release(struct file *file)
+{
+	struct mxr_layer *layer = video_drvdata(file);
+
+	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+	mutex_lock(&layer->mutex);
+	if (v4l2_fh_is_singular_file(file)) {
+		vb2_queue_release(&layer->vb_queue);
+		mxr_power_put(layer->mdev);
+	}
+	v4l2_fh_release(file);
+	mutex_unlock(&layer->mutex);
+	return 0;
+}
+
+static const struct v4l2_file_operations mxr_fops = {
+	.owner = THIS_MODULE,
+	.open = mxr_video_open,
+	.poll = mxr_video_poll,
+	.mmap = mxr_video_mmap,
+	.release = mxr_video_release,
+	.unlocked_ioctl = video_ioctl2,
+};
+
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+	unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
+	void *alloc_ctxs[])
+{
+	struct mxr_layer *layer = vb2_get_drv_priv(vq);
+	const struct mxr_format *fmt = layer->fmt;
+	int i;
+	struct mxr_device *mdev = layer->mdev;
+	struct v4l2_plane_pix_format planes[3];
+
+	mxr_dbg(mdev, "%s\n", __func__);
+	/* checking if format was configured */
+	if (fmt == NULL)
+		return -EINVAL;
+	mxr_dbg(mdev, "fmt = %s\n", fmt->name);
+	mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
+		layer->geo.src.full_height);
+
+	*nplanes = fmt->num_subframes;
+	for (i = 0; i < fmt->num_subframes; ++i) {
+		alloc_ctxs[i] = layer->mdev->alloc_ctx;
+		sizes[i] = planes[i].sizeimage;
+		mxr_dbg(mdev, "size[%d] = %08x\n", i, sizes[i]);
+	}
+
+	if (*nbuffers == 0)
+		*nbuffers = 1;
+
+	return 0;
+}
+
+static void buf_queue(struct vb2_buffer *vb)
+{
+	struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
+	struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
+	struct mxr_device *mdev = layer->mdev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&layer->enq_slock, flags);
+	list_add_tail(&buffer->list, &layer->enq_list);
+	spin_unlock_irqrestore(&layer->enq_slock, flags);
+
+	mxr_dbg(mdev, "queuing buffer\n");
+}
+
+static void wait_lock(struct vb2_queue *vq)
+{
+	struct mxr_layer *layer = vb2_get_drv_priv(vq);
+
+	mxr_dbg(layer->mdev, "%s\n", __func__);
+	mutex_lock(&layer->mutex);
+}
+
+static void wait_unlock(struct vb2_queue *vq)
+{
+	struct mxr_layer *layer = vb2_get_drv_priv(vq);
+
+	mxr_dbg(layer->mdev, "%s\n", __func__);
+	mutex_unlock(&layer->mutex);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct mxr_layer *layer = vb2_get_drv_priv(vq);
+	struct mxr_device *mdev = layer->mdev;
+	unsigned long flags;
+
+	mxr_dbg(mdev, "%s\n", __func__);
+
+	if (count == 0) {
+		mxr_dbg(mdev, "no output buffers queued\n");
+		return -EINVAL;
+	}
+
+	/* block any changes in output configuration */
+	mxr_output_get(mdev);
+
+	mxr_layer_update_output(layer);
+	layer->ops.format_set(layer);
+	/* enabling layer in hardware */
+	spin_lock_irqsave(&layer->enq_slock, flags);
+	layer->state = MXR_LAYER_STREAMING;
+	spin_unlock_irqrestore(&layer->enq_slock, flags);
+
+	layer->ops.stream_set(layer, MXR_ENABLE);
+	mxr_streamer_get(mdev);
+
+	return 0;
+}
+
+static void mxr_watchdog(unsigned long arg)
+{
+	struct mxr_layer *layer = (struct mxr_layer *) arg;
+	struct mxr_device *mdev = layer->mdev;
+	unsigned long flags;
+
+	mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
+
+	spin_lock_irqsave(&layer->enq_slock, flags);
+
+	if (layer->update_buf == layer->shadow_buf)
+		layer->update_buf = NULL;
+	if (layer->update_buf) {
+		vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
+		layer->update_buf = NULL;
+	}
+	if (layer->shadow_buf) {
+		vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
+		layer->shadow_buf = NULL;
+	}
+	spin_unlock_irqrestore(&layer->enq_slock, flags);
+}
+
+static int stop_streaming(struct vb2_queue *vq)
+{
+	struct mxr_layer *layer = vb2_get_drv_priv(vq);
+	struct mxr_device *mdev = layer->mdev;
+	unsigned long flags;
+	struct timer_list watchdog;
+	struct mxr_buffer *buf, *buf_tmp;
+
+	mxr_dbg(mdev, "%s\n", __func__);
+
+	spin_lock_irqsave(&layer->enq_slock, flags);
+
+	/* reset list */
+	layer->state = MXR_LAYER_STREAMING_FINISH;
+
+	/* set all buffer to be done */
+	list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
+		list_del(&buf->list);
+		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+	}
+
+	spin_unlock_irqrestore(&layer->enq_slock, flags);
+
+	/* give 1 seconds to complete to complete last buffers */
+	setup_timer_on_stack(&watchdog, mxr_watchdog,
+		(unsigned long)layer);
+	mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
+
+	/* wait until all buffers are goes to done state */
+	vb2_wait_for_all_buffers(vq);
+
+	/* stop timer if all synchronization is done */
+	del_timer_sync(&watchdog);
+	destroy_timer_on_stack(&watchdog);
+
+	/* stopping hardware */
+	spin_lock_irqsave(&layer->enq_slock, flags);
+	layer->state = MXR_LAYER_IDLE;
+	spin_unlock_irqrestore(&layer->enq_slock, flags);
+
+	/* disabling layer in hardware */
+	layer->ops.stream_set(layer, MXR_DISABLE);
+	/* remove one streamer */
+	mxr_streamer_put(mdev);
+	/* allow changes in output configuration */
+	mxr_output_put(mdev);
+	return 0;
+}
+
+static struct vb2_ops mxr_video_qops = {
+	.queue_setup = queue_setup,
+	.buf_queue = buf_queue,
+	.wait_prepare = wait_unlock,
+	.wait_finish = wait_lock,
+	.start_streaming = start_streaming,
+	.stop_streaming = stop_streaming,
+};
+
+/* FIXME: try to put this functions to mxr_base_layer_create */
+int mxr_base_layer_register(struct mxr_layer *layer)
+{
+	struct mxr_device *mdev = layer->mdev;
+	int ret;
+
+	ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
+	if (ret)
+		mxr_err(mdev, "failed to register video device\n");
+	else
+		mxr_info(mdev, "registered layer %s as /dev/video%d\n",
+			layer->vfd.name, layer->vfd.num);
+	return ret;
+}
+
+void mxr_base_layer_unregister(struct mxr_layer *layer)
+{
+	video_unregister_device(&layer->vfd);
+}
+
+void mxr_layer_release(struct mxr_layer *layer)
+{
+	if (layer->ops.release)
+		layer->ops.release(layer);
+}
+
+void mxr_base_layer_release(struct mxr_layer *layer)
+{
+	kfree(layer);
+}
+
+static void mxr_vfd_release(struct video_device *vdev)
+{
+	printk(KERN_INFO "video device release\n");
+}
+
+struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
+	int idx, char *name, struct mxr_layer_ops *ops)
+{
+	struct mxr_layer *layer;
+
+	layer = kzalloc(sizeof *layer, GFP_KERNEL);
+	if (layer == NULL) {
+		mxr_err(mdev, "not enough memory for layer.\n");
+		goto fail;
+	}
+
+	layer->mdev = mdev;
+	layer->idx = idx;
+	layer->ops = *ops;
+
+	spin_lock_init(&layer->enq_slock);
+	INIT_LIST_HEAD(&layer->enq_list);
+	mutex_init(&layer->mutex);
+
+	layer->vfd = (struct video_device) {
+		.minor = -1,
+		.release = mxr_vfd_release,
+		.fops = &mxr_fops,
+		.ioctl_ops = &mxr_ioctl_ops,
+	};
+	strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
+	/* let framework control PRIORITY */
+	set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
+
+	video_set_drvdata(&layer->vfd, layer);
+	layer->vfd.lock = &layer->mutex;
+	layer->vfd.v4l2_dev = &mdev->v4l2_dev;
+
+	layer->vb_queue = (struct vb2_queue) {
+		.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+		.io_modes = VB2_MMAP | VB2_USERPTR,
+		.drv_priv = layer,
+		.buf_struct_size = sizeof(struct mxr_buffer),
+		.ops = &mxr_video_qops,
+		.mem_ops = &vb2_dma_contig_memops,
+	};
+
+	return layer;
+
+fail:
+	return NULL;
+}
+
+static const struct mxr_format *find_format_by_fourcc(
+	struct mxr_layer *layer, unsigned long fourcc)
+{
+	int i;
+
+	for (i = 0; i < layer->fmt_array_size; ++i)
+		if (layer->fmt_array[i]->fourcc == fourcc)
+			return layer->fmt_array[i];
+	return NULL;
+}
+
+static const struct mxr_format *find_format_by_index(
+	struct mxr_layer *layer, unsigned long index)
+{
+	if (index >= layer->fmt_array_size)
+		return NULL;
+	return layer->fmt_array[index];
+}
+
diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
new file mode 100644
index 0000000..3d13a63
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
@@ -0,0 +1,241 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+
+#include "regs-vp.h"
+
+#include <media/videobuf2-dma-contig.h>
+
+/* FORMAT DEFINITIONS */
+static const struct mxr_format mxr_fmt_nv12 = {
+	.name = "NV12",
+	.fourcc = V4L2_PIX_FMT_NV12,
+	.colorspace = V4L2_COLORSPACE_JPEG,
+	.num_planes = 2,
+	.plane = {
+		{ .width = 1, .height = 1, .size = 1 },
+		{ .width = 2, .height = 2, .size = 2 },
+	},
+	.num_subframes = 1,
+	.cookie = VP_MODE_NV12 | VP_MODE_MEM_LINEAR,
+};
+
+static const struct mxr_format mxr_fmt_nv21 = {
+	.name = "NV21",
+	.fourcc = V4L2_PIX_FMT_NV21,
+	.colorspace = V4L2_COLORSPACE_JPEG,
+	.num_planes = 2,
+	.plane = {
+		{ .width = 1, .height = 1, .size = 1 },
+		{ .width = 2, .height = 2, .size = 2 },
+	},
+	.num_subframes = 1,
+	.cookie = VP_MODE_NV21 | VP_MODE_MEM_LINEAR,
+};
+
+static const struct mxr_format mxr_fmt_nv12m = {
+	.name = "NV12 (mplane)",
+	.fourcc = V4L2_PIX_FMT_NV12M,
+	.colorspace = V4L2_COLORSPACE_JPEG,
+	.num_planes = 2,
+	.plane = {
+		{ .width = 1, .height = 1, .size = 1 },
+		{ .width = 2, .height = 2, .size = 2 },
+	},
+	.num_subframes = 2,
+	.plane2subframe = {0, 1},
+	.cookie = VP_MODE_NV12 | VP_MODE_MEM_LINEAR,
+};
+
+static const struct mxr_format mxr_fmt_nv12mt = {
+	.name = "NV12 tiled (mplane)",
+	.fourcc = V4L2_PIX_FMT_NV12MT,
+	.colorspace = V4L2_COLORSPACE_JPEG,
+	.num_planes = 2,
+	.plane = {
+		{ .width = 128, .height = 32, .size = 4096 },
+		{ .width = 128, .height = 32, .size = 2048 },
+	},
+	.num_subframes = 2,
+	.plane2subframe = {0, 1},
+	.cookie = VP_MODE_NV12 | VP_MODE_MEM_TILED,
+};
+
+static const struct mxr_format *mxr_video_format[] = {
+	&mxr_fmt_nv12,
+	&mxr_fmt_nv21,
+	&mxr_fmt_nv12m,
+	&mxr_fmt_nv12mt,
+};
+
+/* AUXILIARY CALLBACKS */
+
+static void mxr_vp_layer_release(struct mxr_layer *layer)
+{
+	mxr_base_layer_unregister(layer);
+	mxr_base_layer_release(layer);
+}
+
+static void mxr_vp_buffer_set(struct mxr_layer *layer,
+	struct mxr_buffer *buf)
+{
+	dma_addr_t luma_addr[2] = {0, 0};
+	dma_addr_t chroma_addr[2] = {0, 0};
+
+	if (buf == NULL) {
+		mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
+		return;
+	}
+	luma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb, 0);
+	if (layer->fmt->num_subframes == 2) {
+		chroma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb, 1);
+	} else {
+		/* FIXME: mxr_get_plane_size compute integer division,
+		 * which is slow and should not be performed in interrupt */
+		chroma_addr[0] = luma_addr[0] + mxr_get_plane_size(
+			&layer->fmt->plane[0], layer->geo.src.full_width,
+			layer->geo.src.full_height);
+	}
+	if (layer->fmt->cookie & VP_MODE_MEM_TILED) {
+		luma_addr[1] = luma_addr[0] + 0x40;
+		chroma_addr[1] = chroma_addr[0] + 0x40;
+	} else {
+		luma_addr[1] = luma_addr[0] + layer->geo.src.full_width;
+		chroma_addr[1] = chroma_addr[0];
+	}
+	mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
+}
+
+static void mxr_vp_stream_set(struct mxr_layer *layer, int en)
+{
+	mxr_reg_vp_layer_stream(layer->mdev, en);
+}
+
+static void mxr_vp_format_set(struct mxr_layer *layer)
+{
+	mxr_reg_vp_format(layer->mdev, layer->fmt, &layer->geo);
+}
+
+static inline unsigned int do_center(unsigned int center,
+	unsigned int size, unsigned int upper, unsigned int flags)
+{
+	unsigned int lower;
+
+	if (flags & MXR_NO_OFFSET)
+		return 0;
+
+	lower = center - min(center, size / 2);
+	return min(lower, upper - size);
+}
+
+static void mxr_vp_fix_geometry(struct mxr_layer *layer,
+	enum mxr_geometry_stage stage, unsigned long flags)
+{
+	struct mxr_geometry *geo = &layer->geo;
+	struct mxr_crop *src = &geo->src;
+	struct mxr_crop *dst = &geo->dst;
+	unsigned long x_center, y_center;
+
+	switch (stage) {
+
+	case MXR_GEOMETRY_SINK: /* nothing to be fixed here */
+	case MXR_GEOMETRY_COMPOSE:
+		/* remember center of the area */
+		x_center = dst->x_offset + dst->width / 2;
+		y_center = dst->y_offset + dst->height / 2;
+
+		/* ensure that compose is reachable using 16x scaling */
+		dst->width = clamp(dst->width, 8U, 16 * src->full_width);
+		dst->height = clamp(dst->height, 1U, 16 * src->full_height);
+
+		/* setup offsets */
+		dst->x_offset = do_center(x_center, dst->width,
+			dst->full_width, flags);
+		dst->y_offset = do_center(y_center, dst->height,
+			dst->full_height, flags);
+		flags = 0; /* remove possible MXR_NO_OFFSET flag */
+		/* fall through */
+	case MXR_GEOMETRY_CROP:
+		/* remember center of the area */
+		x_center = src->x_offset + src->width / 2;
+		y_center = src->y_offset + src->height / 2;
+
+		/* ensure scaling is between 0.25x .. 16x */
+		src->width = clamp(src->width, round_up(dst->width / 16, 4),
+			dst->width * 4);
+		src->height = clamp(src->height, round_up(dst->height / 16, 4),
+			dst->height * 4);
+
+		/* hardware limits */
+		src->width = clamp(src->width, 32U, 2047U);
+		src->height = clamp(src->height, 4U, 2047U);
+
+		/* setup offsets */
+		src->x_offset = do_center(x_center, src->width,
+			src->full_width, flags);
+		src->y_offset = do_center(y_center, src->height,
+			src->full_height, flags);
+
+		/* setting scaling ratio */
+		geo->x_ratio = (src->width << 16) / dst->width;
+		geo->y_ratio = (src->height << 16) / dst->height;
+		/* fall through */
+
+	case MXR_GEOMETRY_SOURCE:
+		src->full_width = clamp(src->full_width,
+			ALIGN(src->width + src->x_offset, 8), 8192U);
+		src->full_height = clamp(src->full_height,
+			src->height + src->y_offset, 8192U);
+	};
+}
+
+/* PUBLIC API */
+
+struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
+{
+	struct mxr_layer *layer;
+	int ret;
+	struct mxr_layer_ops ops = {
+		.release = mxr_vp_layer_release,
+		.buffer_set = mxr_vp_buffer_set,
+		.stream_set = mxr_vp_stream_set,
+		.format_set = mxr_vp_format_set,
+		.fix_geometry = mxr_vp_fix_geometry,
+	};
+	char name[32];
+
+	sprintf(name, "video%d", idx);
+
+	layer = mxr_base_layer_create(mdev, idx, name, &ops);
+	if (layer == NULL) {
+		mxr_err(mdev, "failed to initialize layer(%d) base\n", idx);
+		goto fail;
+	}
+
+	layer->fmt_array = mxr_video_format;
+	layer->fmt_array_size = ARRAY_SIZE(mxr_video_format);
+
+	ret = mxr_base_layer_register(layer);
+	if (ret)
+		goto fail_layer;
+
+	return layer;
+
+fail_layer:
+	mxr_base_layer_release(layer);
+
+fail:
+	return NULL;
+}
+
diff --git a/drivers/media/platform/s5p-tv/regs-hdmi.h b/drivers/media/platform/s5p-tv/regs-hdmi.h
new file mode 100644
index 0000000..a889d1f
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/regs-hdmi.h
@@ -0,0 +1,146 @@
+/* linux/arch/arm/mach-exynos4/include/mach/regs-hdmi.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * HDMI register header file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef SAMSUNG_REGS_HDMI_H
+#define SAMSUNG_REGS_HDMI_H
+
+/*
+ * Register part
+*/
+
+#define HDMI_CTRL_BASE(x)		((x) + 0x00000000)
+#define HDMI_CORE_BASE(x)		((x) + 0x00010000)
+#define HDMI_TG_BASE(x)			((x) + 0x00050000)
+
+/* Control registers */
+#define HDMI_INTC_CON			HDMI_CTRL_BASE(0x0000)
+#define HDMI_INTC_FLAG			HDMI_CTRL_BASE(0x0004)
+#define HDMI_HPD_STATUS			HDMI_CTRL_BASE(0x000C)
+#define HDMI_PHY_RSTOUT			HDMI_CTRL_BASE(0x0014)
+#define HDMI_PHY_VPLL			HDMI_CTRL_BASE(0x0018)
+#define HDMI_PHY_CMU			HDMI_CTRL_BASE(0x001C)
+#define HDMI_CORE_RSTOUT		HDMI_CTRL_BASE(0x0020)
+
+/* Core registers */
+#define HDMI_CON_0			HDMI_CORE_BASE(0x0000)
+#define HDMI_CON_1			HDMI_CORE_BASE(0x0004)
+#define HDMI_CON_2			HDMI_CORE_BASE(0x0008)
+#define HDMI_SYS_STATUS			HDMI_CORE_BASE(0x0010)
+#define HDMI_PHY_STATUS			HDMI_CORE_BASE(0x0014)
+#define HDMI_STATUS_EN			HDMI_CORE_BASE(0x0020)
+#define HDMI_HPD			HDMI_CORE_BASE(0x0030)
+#define HDMI_MODE_SEL			HDMI_CORE_BASE(0x0040)
+#define HDMI_BLUE_SCREEN_0		HDMI_CORE_BASE(0x0050)
+#define HDMI_BLUE_SCREEN_1		HDMI_CORE_BASE(0x0054)
+#define HDMI_BLUE_SCREEN_2		HDMI_CORE_BASE(0x0058)
+#define HDMI_H_BLANK_0			HDMI_CORE_BASE(0x00A0)
+#define HDMI_H_BLANK_1			HDMI_CORE_BASE(0x00A4)
+#define HDMI_V_BLANK_0			HDMI_CORE_BASE(0x00B0)
+#define HDMI_V_BLANK_1			HDMI_CORE_BASE(0x00B4)
+#define HDMI_V_BLANK_2			HDMI_CORE_BASE(0x00B8)
+#define HDMI_H_V_LINE_0			HDMI_CORE_BASE(0x00C0)
+#define HDMI_H_V_LINE_1			HDMI_CORE_BASE(0x00C4)
+#define HDMI_H_V_LINE_2			HDMI_CORE_BASE(0x00C8)
+#define HDMI_VSYNC_POL			HDMI_CORE_BASE(0x00E4)
+#define HDMI_INT_PRO_MODE		HDMI_CORE_BASE(0x00E8)
+#define HDMI_V_BLANK_F_0		HDMI_CORE_BASE(0x0110)
+#define HDMI_V_BLANK_F_1		HDMI_CORE_BASE(0x0114)
+#define HDMI_V_BLANK_F_2		HDMI_CORE_BASE(0x0118)
+#define HDMI_H_SYNC_GEN_0		HDMI_CORE_BASE(0x0120)
+#define HDMI_H_SYNC_GEN_1		HDMI_CORE_BASE(0x0124)
+#define HDMI_H_SYNC_GEN_2		HDMI_CORE_BASE(0x0128)
+#define HDMI_V_SYNC_GEN_1_0		HDMI_CORE_BASE(0x0130)
+#define HDMI_V_SYNC_GEN_1_1		HDMI_CORE_BASE(0x0134)
+#define HDMI_V_SYNC_GEN_1_2		HDMI_CORE_BASE(0x0138)
+#define HDMI_V_SYNC_GEN_2_0		HDMI_CORE_BASE(0x0140)
+#define HDMI_V_SYNC_GEN_2_1		HDMI_CORE_BASE(0x0144)
+#define HDMI_V_SYNC_GEN_2_2		HDMI_CORE_BASE(0x0148)
+#define HDMI_V_SYNC_GEN_3_0		HDMI_CORE_BASE(0x0150)
+#define HDMI_V_SYNC_GEN_3_1		HDMI_CORE_BASE(0x0154)
+#define HDMI_V_SYNC_GEN_3_2		HDMI_CORE_BASE(0x0158)
+#define HDMI_AVI_CON			HDMI_CORE_BASE(0x0300)
+#define HDMI_AVI_BYTE(n)		HDMI_CORE_BASE(0x0320 + 4 * (n))
+#define	HDMI_DC_CONTROL			HDMI_CORE_BASE(0x05C0)
+#define HDMI_VIDEO_PATTERN_GEN		HDMI_CORE_BASE(0x05C4)
+#define HDMI_HPD_GEN			HDMI_CORE_BASE(0x05C8)
+
+/* Timing generator registers */
+#define HDMI_TG_CMD			HDMI_TG_BASE(0x0000)
+#define HDMI_TG_H_FSZ_L			HDMI_TG_BASE(0x0018)
+#define HDMI_TG_H_FSZ_H			HDMI_TG_BASE(0x001C)
+#define HDMI_TG_HACT_ST_L		HDMI_TG_BASE(0x0020)
+#define HDMI_TG_HACT_ST_H		HDMI_TG_BASE(0x0024)
+#define HDMI_TG_HACT_SZ_L		HDMI_TG_BASE(0x0028)
+#define HDMI_TG_HACT_SZ_H		HDMI_TG_BASE(0x002C)
+#define HDMI_TG_V_FSZ_L			HDMI_TG_BASE(0x0030)
+#define HDMI_TG_V_FSZ_H			HDMI_TG_BASE(0x0034)
+#define HDMI_TG_VSYNC_L			HDMI_TG_BASE(0x0038)
+#define HDMI_TG_VSYNC_H			HDMI_TG_BASE(0x003C)
+#define HDMI_TG_VSYNC2_L		HDMI_TG_BASE(0x0040)
+#define HDMI_TG_VSYNC2_H		HDMI_TG_BASE(0x0044)
+#define HDMI_TG_VACT_ST_L		HDMI_TG_BASE(0x0048)
+#define HDMI_TG_VACT_ST_H		HDMI_TG_BASE(0x004C)
+#define HDMI_TG_VACT_SZ_L		HDMI_TG_BASE(0x0050)
+#define HDMI_TG_VACT_SZ_H		HDMI_TG_BASE(0x0054)
+#define HDMI_TG_FIELD_CHG_L		HDMI_TG_BASE(0x0058)
+#define HDMI_TG_FIELD_CHG_H		HDMI_TG_BASE(0x005C)
+#define HDMI_TG_VACT_ST2_L		HDMI_TG_BASE(0x0060)
+#define HDMI_TG_VACT_ST2_H		HDMI_TG_BASE(0x0064)
+#define HDMI_TG_VSYNC_TOP_HDMI_L	HDMI_TG_BASE(0x0078)
+#define HDMI_TG_VSYNC_TOP_HDMI_H	HDMI_TG_BASE(0x007C)
+#define HDMI_TG_VSYNC_BOT_HDMI_L	HDMI_TG_BASE(0x0080)
+#define HDMI_TG_VSYNC_BOT_HDMI_H	HDMI_TG_BASE(0x0084)
+#define HDMI_TG_FIELD_TOP_HDMI_L	HDMI_TG_BASE(0x0088)
+#define HDMI_TG_FIELD_TOP_HDMI_H	HDMI_TG_BASE(0x008C)
+#define HDMI_TG_FIELD_BOT_HDMI_L	HDMI_TG_BASE(0x0090)
+#define HDMI_TG_FIELD_BOT_HDMI_H	HDMI_TG_BASE(0x0094)
+
+/*
+ * Bit definition part
+ */
+
+/* HDMI_INTC_CON */
+#define HDMI_INTC_EN_GLOBAL		(1 << 6)
+#define HDMI_INTC_EN_HPD_PLUG		(1 << 3)
+#define HDMI_INTC_EN_HPD_UNPLUG		(1 << 2)
+
+/* HDMI_INTC_FLAG */
+#define HDMI_INTC_FLAG_HPD_PLUG		(1 << 3)
+#define HDMI_INTC_FLAG_HPD_UNPLUG	(1 << 2)
+
+/* HDMI_PHY_RSTOUT */
+#define HDMI_PHY_SW_RSTOUT		(1 << 0)
+
+/* HDMI_CORE_RSTOUT */
+#define HDMI_CORE_SW_RSTOUT		(1 << 0)
+
+/* HDMI_CON_0 */
+#define HDMI_BLUE_SCR_EN		(1 << 5)
+#define HDMI_EN				(1 << 0)
+
+/* HDMI_CON_2 */
+#define HDMI_DVI_PERAMBLE_EN		(1 << 5)
+#define HDMI_DVI_BAND_EN		(1 << 1)
+
+/* HDMI_PHY_STATUS */
+#define HDMI_PHY_STATUS_READY		(1 << 0)
+
+/* HDMI_MODE_SEL */
+#define HDMI_MODE_HDMI_EN		(1 << 1)
+#define HDMI_MODE_DVI_EN		(1 << 0)
+#define HDMI_MODE_MASK			(3 << 0)
+
+/* HDMI_TG_CMD */
+#define HDMI_TG_FIELD_EN		(1 << 1)
+#define HDMI_TG_EN			(1 << 0)
+
+#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/media/platform/s5p-tv/regs-mixer.h b/drivers/media/platform/s5p-tv/regs-mixer.h
new file mode 100644
index 0000000..158abb4
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/regs-mixer.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Mixer register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#ifndef SAMSUNG_REGS_MIXER_H
+#define SAMSUNG_REGS_MIXER_H
+
+/*
+ * Register part
+ */
+#define MXR_STATUS			0x0000
+#define MXR_CFG				0x0004
+#define MXR_INT_EN			0x0008
+#define MXR_INT_STATUS			0x000C
+#define MXR_LAYER_CFG			0x0010
+#define MXR_VIDEO_CFG			0x0014
+#define MXR_GRAPHIC0_CFG		0x0020
+#define MXR_GRAPHIC0_BASE		0x0024
+#define MXR_GRAPHIC0_SPAN		0x0028
+#define MXR_GRAPHIC0_SXY		0x002C
+#define MXR_GRAPHIC0_WH			0x0030
+#define MXR_GRAPHIC0_DXY		0x0034
+#define MXR_GRAPHIC0_BLANK		0x0038
+#define MXR_GRAPHIC1_CFG		0x0040
+#define MXR_GRAPHIC1_BASE		0x0044
+#define MXR_GRAPHIC1_SPAN		0x0048
+#define MXR_GRAPHIC1_SXY		0x004C
+#define MXR_GRAPHIC1_WH			0x0050
+#define MXR_GRAPHIC1_DXY		0x0054
+#define MXR_GRAPHIC1_BLANK		0x0058
+#define MXR_BG_CFG			0x0060
+#define MXR_BG_COLOR0			0x0064
+#define MXR_BG_COLOR1			0x0068
+#define MXR_BG_COLOR2			0x006C
+
+/* for parametrized access to layer registers */
+#define MXR_GRAPHIC_CFG(i)		(0x0020 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE(i)		(0x0024 + (i) * 0x20)
+#define MXR_GRAPHIC_SPAN(i)		(0x0028 + (i) * 0x20)
+#define MXR_GRAPHIC_SXY(i)		(0x002C + (i) * 0x20)
+#define MXR_GRAPHIC_WH(i)		(0x0030 + (i) * 0x20)
+#define MXR_GRAPHIC_DXY(i)		(0x0034 + (i) * 0x20)
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+#define MXR_MASK(high_bit, low_bit) \
+	(((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define MXR_MASK_VAL(val, high_bit, low_bit) \
+	(((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
+
+/* bits for MXR_STATUS */
+#define MXR_STATUS_16_BURST		(1 << 7)
+#define MXR_STATUS_BURST_MASK		(1 << 7)
+#define MXR_STATUS_SYNC_ENABLE		(1 << 2)
+#define MXR_STATUS_REG_RUN		(1 << 0)
+
+/* bits for MXR_CFG */
+#define MXR_CFG_OUT_YUV444		(0 << 8)
+#define MXR_CFG_OUT_RGB888		(1 << 8)
+#define MXR_CFG_OUT_MASK		(1 << 8)
+#define MXR_CFG_DST_SDO			(0 << 7)
+#define MXR_CFG_DST_HDMI		(1 << 7)
+#define MXR_CFG_DST_MASK		(1 << 7)
+#define MXR_CFG_SCAN_HD_720		(0 << 6)
+#define MXR_CFG_SCAN_HD_1080		(1 << 6)
+#define MXR_CFG_GRP1_ENABLE		(1 << 5)
+#define MXR_CFG_GRP0_ENABLE		(1 << 4)
+#define MXR_CFG_VP_ENABLE		(1 << 3)
+#define MXR_CFG_SCAN_INTERLACE		(0 << 2)
+#define MXR_CFG_SCAN_PROGRASSIVE	(1 << 2)
+#define MXR_CFG_SCAN_NTSC		(0 << 1)
+#define MXR_CFG_SCAN_PAL		(1 << 1)
+#define MXR_CFG_SCAN_SD			(0 << 0)
+#define MXR_CFG_SCAN_HD			(1 << 0)
+#define MXR_CFG_SCAN_MASK		0x47
+
+/* bits for MXR_GRAPHICn_CFG */
+#define MXR_GRP_CFG_COLOR_KEY_DISABLE	(1 << 21)
+#define MXR_GRP_CFG_BLEND_PRE_MUL	(1 << 20)
+#define MXR_GRP_CFG_FORMAT_VAL(x)	MXR_MASK_VAL(x, 11, 8)
+#define MXR_GRP_CFG_FORMAT_MASK		MXR_GRP_CFG_FORMAT_VAL(~0)
+#define MXR_GRP_CFG_ALPHA_VAL(x)	MXR_MASK_VAL(x, 7, 0)
+
+/* bits for MXR_GRAPHICn_WH */
+#define MXR_GRP_WH_H_SCALE(x)		MXR_MASK_VAL(x, 28, 28)
+#define MXR_GRP_WH_V_SCALE(x)		MXR_MASK_VAL(x, 12, 12)
+#define MXR_GRP_WH_WIDTH(x)		MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_WH_HEIGHT(x)		MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_SXY */
+#define MXR_GRP_SXY_SX(x)		MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_SXY_SY(x)		MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_DXY */
+#define MXR_GRP_DXY_DX(x)		MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_DXY_DY(x)		MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_INT_EN */
+#define MXR_INT_EN_VSYNC		(1 << 11)
+#define MXR_INT_EN_ALL			(0x0f << 8)
+
+/* bit for MXR_INT_STATUS */
+#define MXR_INT_CLEAR_VSYNC		(1 << 11)
+#define MXR_INT_STATUS_VSYNC		(1 << 0)
+
+/* bit for MXR_LAYER_CFG */
+#define MXR_LAYER_CFG_GRP1_VAL(x)	MXR_MASK_VAL(x, 11, 8)
+#define MXR_LAYER_CFG_GRP0_VAL(x)	MXR_MASK_VAL(x, 7, 4)
+#define MXR_LAYER_CFG_VP_VAL(x)		MXR_MASK_VAL(x, 3, 0)
+
+#endif /* SAMSUNG_REGS_MIXER_H */
+
diff --git a/drivers/media/platform/s5p-tv/regs-sdo.h b/drivers/media/platform/s5p-tv/regs-sdo.h
new file mode 100644
index 0000000..6f22fbf
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/regs-sdo.h
@@ -0,0 +1,63 @@
+/* drivers/media/platform/s5p-tv/regs-sdo.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * SDO register description file
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SAMSUNG_REGS_SDO_H
+#define SAMSUNG_REGS_SDO_H
+
+/*
+ * Register part
+ */
+
+#define SDO_CLKCON			0x0000
+#define SDO_CONFIG			0x0008
+#define SDO_VBI				0x0014
+#define SDO_DAC				0x003C
+#define SDO_CCCON			0x0180
+#define SDO_IRQ				0x0280
+#define SDO_IRQMASK			0x0284
+#define SDO_VERSION			0x03D8
+
+/*
+ * Bit definition part
+ */
+
+/* SDO Clock Control Register (SDO_CLKCON) */
+#define SDO_TVOUT_SW_RESET		(1 << 4)
+#define SDO_TVOUT_CLOCK_READY		(1 << 1)
+#define SDO_TVOUT_CLOCK_ON		(1 << 0)
+
+/* SDO Video Standard Configuration Register (SDO_CONFIG) */
+#define SDO_PROGRESSIVE			(1 << 4)
+#define SDO_NTSC_M			0
+#define SDO_PAL_M			1
+#define SDO_PAL_BGHID			2
+#define SDO_PAL_N			3
+#define SDO_PAL_NC			4
+#define SDO_NTSC_443			8
+#define SDO_PAL_60			9
+#define SDO_STANDARD_MASK		0xf
+
+/* SDO VBI Configuration Register (SDO_VBI) */
+#define SDO_CVBS_WSS_INS		(1 << 14)
+#define SDO_CVBS_CLOSED_CAPTION_MASK	(3 << 12)
+
+/* SDO DAC Configuration Register (SDO_DAC) */
+#define SDO_POWER_ON_DAC		(1 << 0)
+
+/* SDO Color Compensation On/Off Control (SDO_CCCON) */
+#define SDO_COMPENSATION_BHS_ADJ_OFF	(1 << 4)
+#define SDO_COMPENSATION_CVBS_COMP_OFF	(1 << 0)
+
+/* SDO Interrupt Request Register (SDO_IRQ) */
+#define SDO_VSYNC_IRQ_PEND		(1 << 0)
+
+#endif /* SAMSUNG_REGS_SDO_H */
diff --git a/drivers/media/platform/s5p-tv/regs-vp.h b/drivers/media/platform/s5p-tv/regs-vp.h
new file mode 100644
index 0000000..6c63984
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/regs-vp.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * Video processor register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SAMSUNG_REGS_VP_H
+#define SAMSUNG_REGS_VP_H
+
+/*
+ * Register part
+ */
+
+#define VP_ENABLE			0x0000
+#define VP_SRESET			0x0004
+#define VP_SHADOW_UPDATE		0x0008
+#define VP_FIELD_ID			0x000C
+#define VP_MODE				0x0010
+#define VP_IMG_SIZE_Y			0x0014
+#define VP_IMG_SIZE_C			0x0018
+#define VP_PER_RATE_CTRL		0x001C
+#define VP_TOP_Y_PTR			0x0028
+#define VP_BOT_Y_PTR			0x002C
+#define VP_TOP_C_PTR			0x0030
+#define VP_BOT_C_PTR			0x0034
+#define VP_ENDIAN_MODE			0x03CC
+#define VP_SRC_H_POSITION		0x0044
+#define VP_SRC_V_POSITION		0x0048
+#define VP_SRC_WIDTH			0x004C
+#define VP_SRC_HEIGHT			0x0050
+#define VP_DST_H_POSITION		0x0054
+#define VP_DST_V_POSITION		0x0058
+#define VP_DST_WIDTH			0x005C
+#define VP_DST_HEIGHT			0x0060
+#define VP_H_RATIO			0x0064
+#define VP_V_RATIO			0x0068
+#define VP_POLY8_Y0_LL			0x006C
+#define VP_POLY4_Y0_LL			0x00EC
+#define VP_POLY4_C0_LL			0x012C
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+
+#define VP_MASK(high_bit, low_bit) \
+	(((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define VP_MASK_VAL(val, high_bit, low_bit) \
+	(((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
+
+ /* VP_ENABLE */
+#define VP_ENABLE_ON			(1 << 0)
+
+/* VP_SRESET */
+#define VP_SRESET_PROCESSING		(1 << 0)
+
+/* VP_SHADOW_UPDATE */
+#define VP_SHADOW_UPDATE_ENABLE		(1 << 0)
+
+/* VP_MODE */
+#define VP_MODE_NV12			(0 << 6)
+#define VP_MODE_NV21			(1 << 6)
+#define VP_MODE_LINE_SKIP		(1 << 5)
+#define VP_MODE_MEM_LINEAR		(0 << 4)
+#define VP_MODE_MEM_TILED		(1 << 4)
+#define VP_MODE_FMT_MASK		(5 << 4)
+#define VP_MODE_FIELD_ID_AUTO_TOGGLING	(1 << 2)
+#define VP_MODE_2D_IPC			(1 << 1)
+
+/* VP_IMG_SIZE_Y */
+/* VP_IMG_SIZE_C */
+#define VP_IMG_HSIZE(x)			VP_MASK_VAL(x, 29, 16)
+#define VP_IMG_VSIZE(x)			VP_MASK_VAL(x, 13, 0)
+
+/* VP_SRC_H_POSITION */
+#define VP_SRC_H_POSITION_VAL(x)	VP_MASK_VAL(x, 14, 4)
+
+/* VP_ENDIAN_MODE */
+#define VP_ENDIAN_MODE_LITTLE		(1 << 0)
+
+#endif /* SAMSUNG_REGS_VP_H */
diff --git a/drivers/media/platform/s5p-tv/sdo_drv.c b/drivers/media/platform/s5p-tv/sdo_drv.c
new file mode 100644
index 0000000..f6bca2c
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/sdo_drv.c
@@ -0,0 +1,452 @@
+/*
+ * Samsung Standard Definition Output (SDO) driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "regs-sdo.h"
+
+MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
+MODULE_DESCRIPTION("Samsung Standard Definition Output (SDO)");
+MODULE_LICENSE("GPL");
+
+#define SDO_DEFAULT_STD	V4L2_STD_PAL
+
+struct sdo_format {
+	v4l2_std_id id;
+	/* all modes are 720 pixels wide */
+	unsigned int height;
+	unsigned int cookie;
+};
+
+struct sdo_device {
+	/** pointer to device parent */
+	struct device *dev;
+	/** base address of SDO registers */
+	void __iomem *regs;
+	/** SDO interrupt */
+	unsigned int irq;
+	/** DAC source clock */
+	struct clk *sclk_dac;
+	/** DAC clock */
+	struct clk *dac;
+	/** DAC physical interface */
+	struct clk *dacphy;
+	/** clock for control of VPLL */
+	struct clk *fout_vpll;
+	/** regulator for SDO IP power */
+	struct regulator *vdac;
+	/** regulator for SDO plug detection */
+	struct regulator *vdet;
+	/** subdev used as device interface */
+	struct v4l2_subdev sd;
+	/** current format */
+	const struct sdo_format *fmt;
+};
+
+static inline struct sdo_device *sd_to_sdev(struct v4l2_subdev *sd)
+{
+	return container_of(sd, struct sdo_device, sd);
+}
+
+static inline
+void sdo_write_mask(struct sdo_device *sdev, u32 reg_id, u32 value, u32 mask)
+{
+	u32 old = readl(sdev->regs + reg_id);
+	value = (value & mask) | (old & ~mask);
+	writel(value, sdev->regs + reg_id);
+}
+
+static inline
+void sdo_write(struct sdo_device *sdev, u32 reg_id, u32 value)
+{
+	writel(value, sdev->regs + reg_id);
+}
+
+static inline
+u32 sdo_read(struct sdo_device *sdev, u32 reg_id)
+{
+	return readl(sdev->regs + reg_id);
+}
+
+static irqreturn_t sdo_irq_handler(int irq, void *dev_data)
+{
+	struct sdo_device *sdev = dev_data;
+
+	/* clear interrupt */
+	sdo_write_mask(sdev, SDO_IRQ, ~0, SDO_VSYNC_IRQ_PEND);
+	return IRQ_HANDLED;
+}
+
+static void sdo_reg_debug(struct sdo_device *sdev)
+{
+#define DBGREG(reg_id) \
+	dev_info(sdev->dev, #reg_id " = %08x\n", \
+		sdo_read(sdev, reg_id))
+
+	DBGREG(SDO_CLKCON);
+	DBGREG(SDO_CONFIG);
+	DBGREG(SDO_VBI);
+	DBGREG(SDO_DAC);
+	DBGREG(SDO_IRQ);
+	DBGREG(SDO_IRQMASK);
+	DBGREG(SDO_VERSION);
+}
+
+static const struct sdo_format sdo_format[] = {
+	{ V4L2_STD_PAL_N,	.height = 576, .cookie = SDO_PAL_N },
+	{ V4L2_STD_PAL_Nc,	.height = 576, .cookie = SDO_PAL_NC },
+	{ V4L2_STD_PAL_M,	.height = 480, .cookie = SDO_PAL_M },
+	{ V4L2_STD_PAL_60,	.height = 480, .cookie = SDO_PAL_60 },
+	{ V4L2_STD_NTSC_443,	.height = 480, .cookie = SDO_NTSC_443 },
+	{ V4L2_STD_PAL,		.height = 576, .cookie = SDO_PAL_BGHID },
+	{ V4L2_STD_NTSC_M,	.height = 480, .cookie = SDO_NTSC_M },
+};
+
+static const struct sdo_format *sdo_find_format(v4l2_std_id id)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(sdo_format); ++i)
+		if (sdo_format[i].id & id)
+			return &sdo_format[i];
+	return NULL;
+}
+
+static int sdo_g_tvnorms_output(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+	*std = V4L2_STD_NTSC_M | V4L2_STD_PAL_M | V4L2_STD_PAL |
+		V4L2_STD_PAL_N | V4L2_STD_PAL_Nc |
+		V4L2_STD_NTSC_443 | V4L2_STD_PAL_60;
+	return 0;
+}
+
+static int sdo_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+	struct sdo_device *sdev = sd_to_sdev(sd);
+	const struct sdo_format *fmt;
+	fmt = sdo_find_format(std);
+	if (fmt == NULL)
+		return -EINVAL;
+	sdev->fmt = fmt;
+	return 0;
+}
+
+static int sdo_g_std_output(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+	*std = sd_to_sdev(sd)->fmt->id;
+	return 0;
+}
+
+static int sdo_g_mbus_fmt(struct v4l2_subdev *sd,
+	struct v4l2_mbus_framefmt *fmt)
+{
+	struct sdo_device *sdev = sd_to_sdev(sd);
+
+	if (!sdev->fmt)
+		return -ENXIO;
+	/* all modes are 720 pixels wide */
+	fmt->width = 720;
+	fmt->height = sdev->fmt->height;
+	fmt->code = V4L2_MBUS_FMT_FIXED;
+	fmt->field = V4L2_FIELD_INTERLACED;
+	fmt->colorspace = V4L2_COLORSPACE_JPEG;
+	return 0;
+}
+
+static int sdo_s_power(struct v4l2_subdev *sd, int on)
+{
+	struct sdo_device *sdev = sd_to_sdev(sd);
+	struct device *dev = sdev->dev;
+	int ret;
+
+	dev_info(dev, "sdo_s_power(%d)\n", on);
+
+	if (on)
+		ret = pm_runtime_get_sync(dev);
+	else
+		ret = pm_runtime_put_sync(dev);
+
+	/* only values < 0 indicate errors */
+	return IS_ERR_VALUE(ret) ? ret : 0;
+}
+
+static int sdo_streamon(struct sdo_device *sdev)
+{
+	/* set proper clock for Timing Generator */
+	clk_set_rate(sdev->fout_vpll, 54000000);
+	dev_info(sdev->dev, "fout_vpll.rate = %lu\n",
+	clk_get_rate(sdev->fout_vpll));
+	/* enable clock in SDO */
+	sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_CLOCK_ON);
+	clk_enable(sdev->dacphy);
+	/* enable DAC */
+	sdo_write_mask(sdev, SDO_DAC, ~0, SDO_POWER_ON_DAC);
+	sdo_reg_debug(sdev);
+	return 0;
+}
+
+static int sdo_streamoff(struct sdo_device *sdev)
+{
+	int tries;
+
+	sdo_write_mask(sdev, SDO_DAC, 0, SDO_POWER_ON_DAC);
+	clk_disable(sdev->dacphy);
+	sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON);
+	for (tries = 100; tries; --tries) {
+		if (sdo_read(sdev, SDO_CLKCON) & SDO_TVOUT_CLOCK_READY)
+			break;
+		mdelay(1);
+	}
+	if (tries == 0)
+		dev_err(sdev->dev, "failed to stop streaming\n");
+	return tries ? 0 : -EIO;
+}
+
+static int sdo_s_stream(struct v4l2_subdev *sd, int on)
+{
+	struct sdo_device *sdev = sd_to_sdev(sd);
+	return on ? sdo_streamon(sdev) : sdo_streamoff(sdev);
+}
+
+static const struct v4l2_subdev_core_ops sdo_sd_core_ops = {
+	.s_power = sdo_s_power,
+};
+
+static const struct v4l2_subdev_video_ops sdo_sd_video_ops = {
+	.s_std_output = sdo_s_std_output,
+	.g_std_output = sdo_g_std_output,
+	.g_tvnorms_output = sdo_g_tvnorms_output,
+	.g_mbus_fmt = sdo_g_mbus_fmt,
+	.s_stream = sdo_s_stream,
+};
+
+static const struct v4l2_subdev_ops sdo_sd_ops = {
+	.core = &sdo_sd_core_ops,
+	.video = &sdo_sd_video_ops,
+};
+
+static int sdo_runtime_suspend(struct device *dev)
+{
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct sdo_device *sdev = sd_to_sdev(sd);
+
+	dev_info(dev, "suspend\n");
+	regulator_disable(sdev->vdet);
+	regulator_disable(sdev->vdac);
+	clk_disable(sdev->sclk_dac);
+	return 0;
+}
+
+static int sdo_runtime_resume(struct device *dev)
+{
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct sdo_device *sdev = sd_to_sdev(sd);
+
+	dev_info(dev, "resume\n");
+	clk_enable(sdev->sclk_dac);
+	regulator_enable(sdev->vdac);
+	regulator_enable(sdev->vdet);
+
+	/* software reset */
+	sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_SW_RESET);
+	mdelay(10);
+	sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_SW_RESET);
+
+	/* setting TV mode */
+	sdo_write_mask(sdev, SDO_CONFIG, sdev->fmt->cookie, SDO_STANDARD_MASK);
+	/* XXX: forcing interlaced mode using undocumented bit */
+	sdo_write_mask(sdev, SDO_CONFIG, 0, SDO_PROGRESSIVE);
+	/* turn all VBI off */
+	sdo_write_mask(sdev, SDO_VBI, 0, SDO_CVBS_WSS_INS |
+		SDO_CVBS_CLOSED_CAPTION_MASK);
+	/* turn all post processing off */
+	sdo_write_mask(sdev, SDO_CCCON, ~0, SDO_COMPENSATION_BHS_ADJ_OFF |
+		SDO_COMPENSATION_CVBS_COMP_OFF);
+	sdo_reg_debug(sdev);
+	return 0;
+}
+
+static const struct dev_pm_ops sdo_pm_ops = {
+	.runtime_suspend = sdo_runtime_suspend,
+	.runtime_resume	 = sdo_runtime_resume,
+};
+
+static int __devinit sdo_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct sdo_device *sdev;
+	struct resource *res;
+	int ret = 0;
+	struct clk *sclk_vpll;
+
+	dev_info(dev, "probe start\n");
+	sdev = devm_kzalloc(&pdev->dev, sizeof *sdev, GFP_KERNEL);
+	if (!sdev) {
+		dev_err(dev, "not enough memory.\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+	sdev->dev = dev;
+
+	/* mapping registers */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(dev, "get memory resource failed.\n");
+		ret = -ENXIO;
+		goto fail;
+	}
+
+	sdev->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (sdev->regs == NULL) {
+		dev_err(dev, "register mapping failed.\n");
+		ret = -ENXIO;
+		goto fail;
+	}
+
+	/* acquiring interrupt */
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (res == NULL) {
+		dev_err(dev, "get interrupt resource failed.\n");
+		ret = -ENXIO;
+		goto fail;
+	}
+	ret = devm_request_irq(&pdev->dev, res->start, sdo_irq_handler, 0,
+			       "s5p-sdo", sdev);
+	if (ret) {
+		dev_err(dev, "request interrupt failed.\n");
+		goto fail;
+	}
+	sdev->irq = res->start;
+
+	/* acquire clocks */
+	sdev->sclk_dac = clk_get(dev, "sclk_dac");
+	if (IS_ERR_OR_NULL(sdev->sclk_dac)) {
+		dev_err(dev, "failed to get clock 'sclk_dac'\n");
+		ret = -ENXIO;
+		goto fail;
+	}
+	sdev->dac = clk_get(dev, "dac");
+	if (IS_ERR_OR_NULL(sdev->dac)) {
+		dev_err(dev, "failed to get clock 'dac'\n");
+		ret = -ENXIO;
+		goto fail_sclk_dac;
+	}
+	sdev->dacphy = clk_get(dev, "dacphy");
+	if (IS_ERR_OR_NULL(sdev->dacphy)) {
+		dev_err(dev, "failed to get clock 'dacphy'\n");
+		ret = -ENXIO;
+		goto fail_dac;
+	}
+	sclk_vpll = clk_get(dev, "sclk_vpll");
+	if (IS_ERR_OR_NULL(sclk_vpll)) {
+		dev_err(dev, "failed to get clock 'sclk_vpll'\n");
+		ret = -ENXIO;
+		goto fail_dacphy;
+	}
+	clk_set_parent(sdev->sclk_dac, sclk_vpll);
+	clk_put(sclk_vpll);
+	sdev->fout_vpll = clk_get(dev, "fout_vpll");
+	if (IS_ERR_OR_NULL(sdev->fout_vpll)) {
+		dev_err(dev, "failed to get clock 'fout_vpll'\n");
+		goto fail_dacphy;
+	}
+	dev_info(dev, "fout_vpll.rate = %lu\n", clk_get_rate(sclk_vpll));
+
+	/* acquire regulator */
+	sdev->vdac = regulator_get(dev, "vdd33a_dac");
+	if (IS_ERR_OR_NULL(sdev->vdac)) {
+		dev_err(dev, "failed to get regulator 'vdac'\n");
+		goto fail_fout_vpll;
+	}
+	sdev->vdet = regulator_get(dev, "vdet");
+	if (IS_ERR_OR_NULL(sdev->vdet)) {
+		dev_err(dev, "failed to get regulator 'vdet'\n");
+		goto fail_vdac;
+	}
+
+	/* enable gate for dac clock, because mixer uses it */
+	clk_enable(sdev->dac);
+
+	/* configure power management */
+	pm_runtime_enable(dev);
+
+	/* configuration of interface subdevice */
+	v4l2_subdev_init(&sdev->sd, &sdo_sd_ops);
+	sdev->sd.owner = THIS_MODULE;
+	strlcpy(sdev->sd.name, "s5p-sdo", sizeof sdev->sd.name);
+
+	/* set default format */
+	sdev->fmt = sdo_find_format(SDO_DEFAULT_STD);
+	BUG_ON(sdev->fmt == NULL);
+
+	/* keeping subdev in device's private for use by other drivers */
+	dev_set_drvdata(dev, &sdev->sd);
+
+	dev_info(dev, "probe succeeded\n");
+	return 0;
+
+fail_vdac:
+	regulator_put(sdev->vdac);
+fail_fout_vpll:
+	clk_put(sdev->fout_vpll);
+fail_dacphy:
+	clk_put(sdev->dacphy);
+fail_dac:
+	clk_put(sdev->dac);
+fail_sclk_dac:
+	clk_put(sdev->sclk_dac);
+fail:
+	dev_info(dev, "probe failed\n");
+	return ret;
+}
+
+static int __devexit sdo_remove(struct platform_device *pdev)
+{
+	struct v4l2_subdev *sd = dev_get_drvdata(&pdev->dev);
+	struct sdo_device *sdev = sd_to_sdev(sd);
+
+	pm_runtime_disable(&pdev->dev);
+	clk_disable(sdev->dac);
+	regulator_put(sdev->vdet);
+	regulator_put(sdev->vdac);
+	clk_put(sdev->fout_vpll);
+	clk_put(sdev->dacphy);
+	clk_put(sdev->dac);
+	clk_put(sdev->sclk_dac);
+
+	dev_info(&pdev->dev, "remove successful\n");
+	return 0;
+}
+
+static struct platform_driver sdo_driver __refdata = {
+	.probe = sdo_probe,
+	.remove = __devexit_p(sdo_remove),
+	.driver = {
+		.name = "s5p-sdo",
+		.owner = THIS_MODULE,
+		.pm = &sdo_pm_ops,
+	}
+};
+
+module_platform_driver(sdo_driver);
diff --git a/drivers/media/platform/s5p-tv/sii9234_drv.c b/drivers/media/platform/s5p-tv/sii9234_drv.c
new file mode 100644
index 0000000..6d348f9
--- /dev/null
+++ b/drivers/media/platform/s5p-tv/sii9234_drv.c
@@ -0,0 +1,422 @@
+/*
+ * Samsung MHL interface driver
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Author: Tomasz Stanislawski <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/machine.h>
+#include <linux/slab.h>
+
+#include <mach/gpio.h>
+#include <plat/gpio-cfg.h>
+
+#include <media/sii9234.h>
+#include <media/v4l2-subdev.h>
+
+MODULE_AUTHOR("Tomasz Stanislawski <t.stanislaws@samsung.com>");
+MODULE_DESCRIPTION("Samsung MHL interface driver");
+MODULE_LICENSE("GPL");
+
+struct sii9234_context {
+	struct i2c_client *client;
+	struct regulator *power;
+	int gpio_n_reset;
+	struct v4l2_subdev sd;
+};
+
+static inline struct sii9234_context *sd_to_context(struct v4l2_subdev *sd)
+{
+	return container_of(sd, struct sii9234_context, sd);
+}
+
+static inline int sii9234_readb(struct i2c_client *client, int addr)
+{
+	return i2c_smbus_read_byte_data(client, addr);
+}
+
+static inline int sii9234_writeb(struct i2c_client *client, int addr, int value)
+{
+	return i2c_smbus_write_byte_data(client, addr, value);
+}
+
+static inline int sii9234_writeb_mask(struct i2c_client *client, int addr,
+	int value, int mask)
+{
+	int ret;
+
+	ret = i2c_smbus_read_byte_data(client, addr);
+	if (ret < 0)
+		return ret;
+	ret = (ret & ~mask) | (value & mask);
+	return i2c_smbus_write_byte_data(client, addr, ret);
+}
+
+static inline int sii9234_readb_idx(struct i2c_client *client, int addr)
+{
+	int ret;
+	ret = i2c_smbus_write_byte_data(client, 0xbc, addr >> 8);
+	if (ret < 0)
+		return ret;
+	ret = i2c_smbus_write_byte_data(client, 0xbd, addr & 0xff);
+	if (ret < 0)
+		return ret;
+	return i2c_smbus_read_byte_data(client, 0xbe);
+}
+
+static inline int sii9234_writeb_idx(struct i2c_client *client, int addr,
+	int value)
+{
+	int ret;
+	ret = i2c_smbus_write_byte_data(client, 0xbc, addr >> 8);
+	if (ret < 0)
+		return ret;
+	ret = i2c_smbus_write_byte_data(client, 0xbd, addr & 0xff);
+	if (ret < 0)
+		return ret;
+	ret = i2c_smbus_write_byte_data(client, 0xbe, value);
+	return ret;
+}
+
+static inline int sii9234_writeb_idx_mask(struct i2c_client *client, int addr,
+	int value, int mask)
+{
+	int ret;
+
+	ret = sii9234_readb_idx(client, addr);
+	if (ret < 0)
+		return ret;
+	ret = (ret & ~mask) | (value & mask);
+	return sii9234_writeb_idx(client, addr, ret);
+}
+
+static int sii9234_reset(struct sii9234_context *ctx)
+{
+	struct i2c_client *client = ctx->client;
+	struct device *dev = &client->dev;
+	int ret, tries;
+
+	gpio_direction_output(ctx->gpio_n_reset, 1);
+	mdelay(1);
+	gpio_direction_output(ctx->gpio_n_reset, 0);
+	mdelay(1);
+	gpio_direction_output(ctx->gpio_n_reset, 1);
+	mdelay(1);
+
+	/* going to TTPI mode */
+	ret = sii9234_writeb(client, 0xc7, 0);
+	if (ret < 0) {
+		dev_err(dev, "failed to set TTPI mode\n");
+		return ret;
+	}
+	for (tries = 0; tries < 100 ; ++tries) {
+		ret = sii9234_readb(client, 0x1b);
+		if (ret > 0)
+			break;
+		if (ret < 0) {
+			dev_err(dev, "failed to reset device\n");
+			return -EIO;
+		}
+		mdelay(1);
+	}
+	if (tries == 100) {
+		dev_err(dev, "maximal number of tries reached\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int sii9234_verify_version(struct i2c_client *client)
+{
+	struct device *dev = &client->dev;
+	int family, rev, tpi_rev, dev_id, sub_id, hdcp, id;
+
+	family = sii9234_readb(client, 0x1b);
+	rev = sii9234_readb(client, 0x1c) & 0x0f;
+	tpi_rev = sii9234_readb(client, 0x1d) & 0x7f;
+	dev_id = sii9234_readb_idx(client, 0x0103);
+	sub_id = sii9234_readb_idx(client, 0x0102);
+	hdcp = sii9234_readb(client, 0x30);
+
+	if (family < 0 || rev < 0 || tpi_rev < 0 || dev_id < 0 ||
+		sub_id < 0 || hdcp < 0) {
+		dev_err(dev, "failed to read chip's version\n");
+		return -EIO;
+	}
+
+	id = (dev_id << 8) | sub_id;
+
+	dev_info(dev, "chip: SiL%02x family: %02x, rev: %02x\n",
+		id, family, rev);
+	dev_info(dev, "tpi_rev:%02x, hdcp: %02x\n", tpi_rev, hdcp);
+	if (id != 0x9234) {
+		dev_err(dev, "not supported chip\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static u8 data[][3] = {
+/* setup from driver created by doonsoo45.kim */
+	{ 0x01, 0x05, 0x04 }, /* Enable Auto soft reset on SCDT = 0 */
+	{ 0x01, 0x08, 0x35 }, /* Power Up TMDS Tx Core */
+	{ 0x01, 0x0d, 0x1c }, /* HDMI Transcode mode enable */
+	{ 0x01, 0x2b, 0x01 }, /* Enable HDCP Compliance workaround */
+	{ 0x01, 0x79, 0x40 }, /* daniel test...MHL_INT */
+	{ 0x01, 0x80, 0x34 }, /* Enable Rx PLL Clock Value */
+	{ 0x01, 0x90, 0x27 }, /* Enable CBUS discovery */
+	{ 0x01, 0x91, 0xe5 }, /* Skip RGND detection */
+	{ 0x01, 0x92, 0x46 }, /* Force MHD mode */
+	{ 0x01, 0x93, 0xdc }, /* Disable CBUS pull-up during RGND measurement */
+	{ 0x01, 0x94, 0x66 }, /* 1.8V CBUS VTH & GND threshold */
+	{ 0x01, 0x95, 0x31 }, /* RGND block & single discovery attempt */
+	{ 0x01, 0x96, 0x22 }, /* use 1K and 2K setting */
+	{ 0x01, 0xa0, 0x10 }, /* SIMG: Term mode */
+	{ 0x01, 0xa1, 0xfc }, /* Disable internal Mobile HD driver */
+	{ 0x01, 0xa3, 0xfa }, /* SIMG: Output Swing  default EB, 3x Clk Mult */
+	{ 0x01, 0xa5, 0x80 }, /* SIMG: RGND Hysterisis, 3x mode for Beast */
+	{ 0x01, 0xa6, 0x0c }, /* SIMG: Swing Offset */
+	{ 0x02, 0x3d, 0x3f }, /* Power up CVCC 1.2V core */
+	{ 0x03, 0x00, 0x00 }, /* SIMG: correcting HW default */
+	{ 0x03, 0x11, 0x01 }, /* Enable TxPLL Clock */
+	{ 0x03, 0x12, 0x15 }, /* Enable Tx Clock Path & Equalizer */
+	{ 0x03, 0x13, 0x60 }, /* SIMG: Set termination value */
+	{ 0x03, 0x14, 0xf0 }, /* SIMG: Change CKDT level */
+	{ 0x03, 0x17, 0x07 }, /* SIMG: PLL Calrefsel */
+	{ 0x03, 0x1a, 0x20 }, /* VCO Cal */
+	{ 0x03, 0x22, 0xe0 }, /* SIMG: Auto EQ */
+	{ 0x03, 0x23, 0xc0 }, /* SIMG: Auto EQ */
+	{ 0x03, 0x24, 0xa0 }, /* SIMG: Auto EQ */
+	{ 0x03, 0x25, 0x80 }, /* SIMG: Auto EQ */
+	{ 0x03, 0x26, 0x60 }, /* SIMG: Auto EQ */
+	{ 0x03, 0x27, 0x40 }, /* SIMG: Auto EQ */
+	{ 0x03, 0x28, 0x20 }, /* SIMG: Auto EQ */
+	{ 0x03, 0x29, 0x00 }, /* SIMG: Auto EQ */
+	{ 0x03, 0x31, 0x0b }, /* SIMG: Rx PLL BW value from I2C BW ~ 4MHz */
+	{ 0x03, 0x45, 0x06 }, /* SIMG: DPLL Mode */
+	{ 0x03, 0x4b, 0x06 }, /* SIMG: Correcting HW default */
+	{ 0x03, 0x4c, 0xa0 }, /* Manual zone control */
+	{ 0x03, 0x4d, 0x02 }, /* SIMG: PLL Mode Value (order is important) */
+};
+
+static int sii9234_set_internal(struct sii9234_context *ctx)
+{
+	struct i2c_client *client = ctx->client;
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(data); ++i) {
+		int addr = (data[i][0] << 8) | data[i][1];
+		ret = sii9234_writeb_idx(client, addr, data[i][2]);
+		if (ret < 0)
+			return ret;
+	}
+	return 0;
+}
+
+static int sii9234_runtime_suspend(struct device *dev)
+{
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct sii9234_context *ctx = sd_to_context(sd);
+	struct i2c_client *client = ctx->client;
+
+	dev_info(dev, "suspend start\n");
+
+	sii9234_writeb_mask(client, 0x1e, 3, 3);
+	regulator_disable(ctx->power);
+
+	return 0;
+}
+
+static int sii9234_runtime_resume(struct device *dev)
+{
+	struct v4l2_subdev *sd = dev_get_drvdata(dev);
+	struct sii9234_context *ctx = sd_to_context(sd);
+	struct i2c_client *client = ctx->client;
+	int ret;
+
+	dev_info(dev, "resume start\n");
+	regulator_enable(ctx->power);
+
+	ret = sii9234_reset(ctx);
+	if (ret)
+		goto fail;
+
+	/* enable tpi */
+	ret = sii9234_writeb_mask(client, 0x1e, 1, 0);
+	if (ret < 0)
+		goto fail;
+	ret = sii9234_set_internal(ctx);
+	if (ret < 0)
+		goto fail;
+
+	return 0;
+
+fail:
+	dev_err(dev, "failed to resume\n");
+	regulator_disable(ctx->power);
+
+	return ret;
+}
+
+static const struct dev_pm_ops sii9234_pm_ops = {
+	.runtime_suspend = sii9234_runtime_suspend,
+	.runtime_resume	 = sii9234_runtime_resume,
+};
+
+static int sii9234_s_power(struct v4l2_subdev *sd, int on)
+{
+	struct sii9234_context *ctx = sd_to_context(sd);
+	int ret;
+
+	if (on)
+		ret = pm_runtime_get_sync(&ctx->client->dev);
+	else
+		ret = pm_runtime_put(&ctx->client->dev);
+	/* only values < 0 indicate errors */
+	return IS_ERR_VALUE(ret) ? ret : 0;
+}
+
+static int sii9234_s_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct sii9234_context *ctx = sd_to_context(sd);
+
+	/* (dis/en)able TDMS output */
+	sii9234_writeb_mask(ctx->client, 0x1a, enable ? 0 : ~0 , 1 << 4);
+	return 0;
+}
+
+static const struct v4l2_subdev_core_ops sii9234_core_ops = {
+	.s_power =  sii9234_s_power,
+};
+
+static const struct v4l2_subdev_video_ops sii9234_video_ops = {
+	.s_stream =  sii9234_s_stream,
+};
+
+static const struct v4l2_subdev_ops sii9234_ops = {
+	.core = &sii9234_core_ops,
+	.video = &sii9234_video_ops,
+};
+
+static int __devinit sii9234_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	struct device *dev = &client->dev;
+	struct sii9234_platform_data *pdata = dev->platform_data;
+	struct sii9234_context *ctx;
+	int ret;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx) {
+		dev_err(dev, "out of memory\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+	ctx->client = client;
+
+	ctx->power = regulator_get(dev, "hdmi-en");
+	if (IS_ERR(ctx->power)) {
+		dev_err(dev, "failed to acquire regulator hdmi-en\n");
+		ret = PTR_ERR(ctx->power);
+		goto fail_ctx;
+	}
+
+	ctx->gpio_n_reset = pdata->gpio_n_reset;
+	ret = gpio_request(ctx->gpio_n_reset, "MHL_RST");
+	if (ret) {
+		dev_err(dev, "failed to acquire MHL_RST gpio\n");
+		goto fail_power;
+	}
+
+	v4l2_i2c_subdev_init(&ctx->sd, client, &sii9234_ops);
+
+	pm_runtime_enable(dev);
+
+	/* enable device */
+	ret = pm_runtime_get_sync(dev);
+	if (ret)
+		goto fail_pm;
+
+	/* verify chip version */
+	ret = sii9234_verify_version(client);
+	if (ret)
+		goto fail_pm_get;
+
+	/* stop processing */
+	pm_runtime_put(dev);
+
+	dev_info(dev, "probe successful\n");
+
+	return 0;
+
+fail_pm_get:
+	pm_runtime_put_sync(dev);
+
+fail_pm:
+	pm_runtime_disable(dev);
+	gpio_free(ctx->gpio_n_reset);
+
+fail_power:
+	regulator_put(ctx->power);
+
+fail_ctx:
+	kfree(ctx);
+
+fail:
+	dev_err(dev, "probe failed\n");
+
+	return ret;
+}
+
+static int __devexit sii9234_remove(struct i2c_client *client)
+{
+	struct device *dev = &client->dev;
+	struct v4l2_subdev *sd = i2c_get_clientdata(client);
+	struct sii9234_context *ctx = sd_to_context(sd);
+
+	pm_runtime_disable(dev);
+	gpio_free(ctx->gpio_n_reset);
+	regulator_put(ctx->power);
+	kfree(ctx);
+
+	dev_info(dev, "remove successful\n");
+
+	return 0;
+}
+
+
+static const struct i2c_device_id sii9234_id[] = {
+	{ "SII9234", 0 },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(i2c, sii9234_id);
+static struct i2c_driver sii9234_driver = {
+	.driver = {
+		.name	= "sii9234",
+		.owner	= THIS_MODULE,
+		.pm = &sii9234_pm_ops,
+	},
+	.probe		= sii9234_probe,
+	.remove		= __devexit_p(sii9234_remove),
+	.id_table = sii9234_id,
+};
+
+module_i2c_driver(sii9234_driver);
diff --git a/drivers/media/platform/sh_mobile_ceu_camera.c b/drivers/media/platform/sh_mobile_ceu_camera.c
new file mode 100644
index 0000000..0baaf94
--- /dev/null
+++ b/drivers/media/platform/sh_mobile_ceu_camera.c
@@ -0,0 +1,2331 @@
+/*
+ * V4L2 Driver for SuperH Mobile CEU interface
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on V4L2 Driver for PXA camera host - "pxa_camera.c",
+ *
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/soc_camera.h>
+#include <media/sh_mobile_ceu.h>
+#include <media/sh_mobile_csi2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-mediabus.h>
+#include <media/soc_mediabus.h>
+
+/* register offsets for sh7722 / sh7723 */
+
+#define CAPSR  0x00 /* Capture start register */
+#define CAPCR  0x04 /* Capture control register */
+#define CAMCR  0x08 /* Capture interface control register */
+#define CMCYR  0x0c /* Capture interface cycle  register */
+#define CAMOR  0x10 /* Capture interface offset register */
+#define CAPWR  0x14 /* Capture interface width register */
+#define CAIFR  0x18 /* Capture interface input format register */
+#define CSTCR  0x20 /* Camera strobe control register (<= sh7722) */
+#define CSECR  0x24 /* Camera strobe emission count register (<= sh7722) */
+#define CRCNTR 0x28 /* CEU register control register */
+#define CRCMPR 0x2c /* CEU register forcible control register */
+#define CFLCR  0x30 /* Capture filter control register */
+#define CFSZR  0x34 /* Capture filter size clip register */
+#define CDWDR  0x38 /* Capture destination width register */
+#define CDAYR  0x3c /* Capture data address Y register */
+#define CDACR  0x40 /* Capture data address C register */
+#define CDBYR  0x44 /* Capture data bottom-field address Y register */
+#define CDBCR  0x48 /* Capture data bottom-field address C register */
+#define CBDSR  0x4c /* Capture bundle destination size register */
+#define CFWCR  0x5c /* Firewall operation control register */
+#define CLFCR  0x60 /* Capture low-pass filter control register */
+#define CDOCR  0x64 /* Capture data output control register */
+#define CDDCR  0x68 /* Capture data complexity level register */
+#define CDDAR  0x6c /* Capture data complexity level address register */
+#define CEIER  0x70 /* Capture event interrupt enable register */
+#define CETCR  0x74 /* Capture event flag clear register */
+#define CSTSR  0x7c /* Capture status register */
+#define CSRTR  0x80 /* Capture software reset register */
+#define CDSSR  0x84 /* Capture data size register */
+#define CDAYR2 0x90 /* Capture data address Y register 2 */
+#define CDACR2 0x94 /* Capture data address C register 2 */
+#define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */
+#define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */
+
+#undef DEBUG_GEOMETRY
+#ifdef DEBUG_GEOMETRY
+#define dev_geo	dev_info
+#else
+#define dev_geo	dev_dbg
+#endif
+
+/* per video frame buffer */
+struct sh_mobile_ceu_buffer {
+	struct vb2_buffer vb; /* v4l buffer must be first */
+	struct list_head queue;
+};
+
+struct sh_mobile_ceu_dev {
+	struct soc_camera_host ici;
+	struct soc_camera_device *icd;
+	struct platform_device *csi2_pdev;
+
+	unsigned int irq;
+	void __iomem *base;
+	size_t video_limit;
+	size_t buf_total;
+
+	spinlock_t lock;		/* Protects video buffer lists */
+	struct list_head capture;
+	struct vb2_buffer *active;
+	struct vb2_alloc_ctx *alloc_ctx;
+
+	struct sh_mobile_ceu_info *pdata;
+	struct completion complete;
+
+	u32 cflcr;
+
+	/* static max sizes either from platform data or default */
+	int max_width;
+	int max_height;
+
+	enum v4l2_field field;
+	int sequence;
+
+	unsigned int image_mode:1;
+	unsigned int is_16bit:1;
+	unsigned int frozen:1;
+};
+
+struct sh_mobile_ceu_cam {
+	/* CEU offsets within the camera output, before the CEU scaler */
+	unsigned int ceu_left;
+	unsigned int ceu_top;
+	/* Client output, as seen by the CEU */
+	unsigned int width;
+	unsigned int height;
+	/*
+	 * User window from S_CROP / G_CROP, produced by client cropping and
+	 * scaling, CEU scaling and CEU cropping, mapped back onto the client
+	 * input window
+	 */
+	struct v4l2_rect subrect;
+	/* Camera cropping rectangle */
+	struct v4l2_rect rect;
+	const struct soc_mbus_pixelfmt *extra_fmt;
+	enum v4l2_mbus_pixelcode code;
+};
+
+static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_buffer *vb)
+{
+	return container_of(vb, struct sh_mobile_ceu_buffer, vb);
+}
+
+static void ceu_write(struct sh_mobile_ceu_dev *priv,
+		      unsigned long reg_offs, u32 data)
+{
+	iowrite32(data, priv->base + reg_offs);
+}
+
+static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs)
+{
+	return ioread32(priv->base + reg_offs);
+}
+
+static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev)
+{
+	int i, success = 0;
+	struct soc_camera_device *icd = pcdev->icd;
+
+	ceu_write(pcdev, CAPSR, 1 << 16); /* reset */
+
+	/* wait CSTSR.CPTON bit */
+	for (i = 0; i < 1000; i++) {
+		if (!(ceu_read(pcdev, CSTSR) & 1)) {
+			success++;
+			break;
+		}
+		udelay(1);
+	}
+
+	/* wait CAPSR.CPKIL bit */
+	for (i = 0; i < 1000; i++) {
+		if (!(ceu_read(pcdev, CAPSR) & (1 << 16))) {
+			success++;
+			break;
+		}
+		udelay(1);
+	}
+
+
+	if (2 != success) {
+		dev_warn(icd->pdev, "soft reset time out\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ *  Videobuf operations
+ */
+
+/*
+ * .queue_setup() is called to check, whether the driver can accept the
+ *		  requested number of buffers and to fill in plane sizes
+ *		  for the current frame format if required
+ */
+static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
+			const struct v4l2_format *fmt,
+			unsigned int *count, unsigned int *num_planes,
+			unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+	if (fmt) {
+		const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd,
+								fmt->fmt.pix.pixelformat);
+		unsigned int bytes_per_line;
+		int ret;
+
+		if (!xlate)
+			return -EINVAL;
+
+		ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
+					      xlate->host_fmt);
+		if (ret < 0)
+			return ret;
+
+		bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret);
+
+		ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line,
+					  fmt->fmt.pix.height);
+		if (ret < 0)
+			return ret;
+
+		sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret);
+	} else {
+		/* Called from VIDIOC_REQBUFS or in compatibility mode */
+		sizes[0] = icd->sizeimage;
+	}
+
+	alloc_ctxs[0] = pcdev->alloc_ctx;
+
+	if (!vq->num_buffers)
+		pcdev->sequence = 0;
+
+	if (!*count)
+		*count = 2;
+
+	/* If *num_planes != 0, we have already verified *count. */
+	if (pcdev->video_limit && !*num_planes) {
+		size_t size = PAGE_ALIGN(sizes[0]) * *count;
+
+		if (size + pcdev->buf_total > pcdev->video_limit)
+			*count = (pcdev->video_limit - pcdev->buf_total) /
+				PAGE_ALIGN(sizes[0]);
+	}
+
+	*num_planes = 1;
+
+	dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]);
+
+	return 0;
+}
+
+#define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */
+#define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */
+#define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */
+#define CEU_CEIER_VBP   (1 << 20) /* vbp error */
+#define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */
+#define CEU_CEIER_MASK (CEU_CEIER_CPEIE | CEU_CEIER_VBP)
+
+
+/*
+ * return value doesn't reflex the success/failure to queue the new buffer,
+ * but rather the status of the previous buffer.
+ */
+static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
+{
+	struct soc_camera_device *icd = pcdev->icd;
+	dma_addr_t phys_addr_top, phys_addr_bottom;
+	unsigned long top1, top2;
+	unsigned long bottom1, bottom2;
+	u32 status;
+	bool planar;
+	int ret = 0;
+
+	/*
+	 * The hardware is _very_ picky about this sequence. Especially
+	 * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge
+	 * several not-so-well documented interrupt sources in CETCR.
+	 */
+	ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_MASK);
+	status = ceu_read(pcdev, CETCR);
+	ceu_write(pcdev, CETCR, ~status & CEU_CETCR_MAGIC);
+	if (!pcdev->frozen)
+		ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_MASK);
+	ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP);
+	ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW);
+
+	/*
+	 * When a VBP interrupt occurs, a capture end interrupt does not occur
+	 * and the image of that frame is not captured correctly. So, soft reset
+	 * is needed here.
+	 */
+	if (status & CEU_CEIER_VBP) {
+		sh_mobile_ceu_soft_reset(pcdev);
+		ret = -EIO;
+	}
+
+	if (pcdev->frozen) {
+		complete(&pcdev->complete);
+		return ret;
+	}
+
+	if (!pcdev->active)
+		return ret;
+
+	if (V4L2_FIELD_INTERLACED_BT == pcdev->field) {
+		top1	= CDBYR;
+		top2	= CDBCR;
+		bottom1	= CDAYR;
+		bottom2	= CDACR;
+	} else {
+		top1	= CDAYR;
+		top2	= CDACR;
+		bottom1	= CDBYR;
+		bottom2	= CDBCR;
+	}
+
+	phys_addr_top = vb2_dma_contig_plane_dma_addr(pcdev->active, 0);
+
+	switch (icd->current_fmt->host_fmt->fourcc) {
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV61:
+		planar = true;
+		break;
+	default:
+		planar = false;
+	}
+
+	ceu_write(pcdev, top1, phys_addr_top);
+	if (V4L2_FIELD_NONE != pcdev->field) {
+		phys_addr_bottom = phys_addr_top + icd->bytesperline;
+		ceu_write(pcdev, bottom1, phys_addr_bottom);
+	}
+
+	if (planar) {
+		phys_addr_top += icd->bytesperline * icd->user_height;
+		ceu_write(pcdev, top2, phys_addr_top);
+		if (V4L2_FIELD_NONE != pcdev->field) {
+			phys_addr_bottom = phys_addr_top + icd->bytesperline;
+			ceu_write(pcdev, bottom2, phys_addr_bottom);
+		}
+	}
+
+	ceu_write(pcdev, CAPSR, 0x1); /* start capture */
+
+	return ret;
+}
+
+static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
+{
+	struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
+
+	/* Added list head initialization on alloc */
+	WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb);
+
+	return 0;
+}
+
+static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
+{
+	struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+	struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
+	unsigned long size;
+
+	size = icd->sizeimage;
+
+	if (vb2_plane_size(vb, 0) < size) {
+		dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
+			vb->v4l2_buf.index, vb2_plane_size(vb, 0), size);
+		goto error;
+	}
+
+	vb2_set_plane_payload(vb, 0, size);
+
+	dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+		vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
+
+#ifdef DEBUG
+	/*
+	 * This can be useful if you want to see if we actually fill
+	 * the buffer with something
+	 */
+	if (vb2_plane_vaddr(vb, 0))
+		memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
+#endif
+
+	spin_lock_irq(&pcdev->lock);
+	list_add_tail(&buf->queue, &pcdev->capture);
+
+	if (!pcdev->active) {
+		/*
+		 * Because there were no active buffer at this moment,
+		 * we are not interested in the return value of
+		 * sh_mobile_ceu_capture here.
+		 */
+		pcdev->active = vb;
+		sh_mobile_ceu_capture(pcdev);
+	}
+	spin_unlock_irq(&pcdev->lock);
+
+	return;
+
+error:
+	vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+}
+
+static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
+{
+	struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+	spin_lock_irq(&pcdev->lock);
+
+	if (pcdev->active == vb) {
+		/* disable capture (release DMA buffer), reset */
+		ceu_write(pcdev, CAPSR, 1 << 16);
+		pcdev->active = NULL;
+	}
+
+	/*
+	 * Doesn't hurt also if the list is empty, but it hurts, if queuing the
+	 * buffer failed, and .buf_init() hasn't been called
+	 */
+	if (buf->queue.next)
+		list_del_init(&buf->queue);
+
+	pcdev->buf_total -= PAGE_ALIGN(vb2_plane_size(vb, 0));
+	dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__,
+		pcdev->buf_total);
+
+	spin_unlock_irq(&pcdev->lock);
+}
+
+static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
+{
+	struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+	pcdev->buf_total += PAGE_ALIGN(vb2_plane_size(vb, 0));
+	dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__,
+		pcdev->buf_total);
+
+	/* This is for locking debugging only */
+	INIT_LIST_HEAD(&to_ceu_vb(vb)->queue);
+	return 0;
+}
+
+static int sh_mobile_ceu_stop_streaming(struct vb2_queue *q)
+{
+	struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+	struct list_head *buf_head, *tmp;
+
+	spin_lock_irq(&pcdev->lock);
+
+	pcdev->active = NULL;
+
+	list_for_each_safe(buf_head, tmp, &pcdev->capture)
+		list_del_init(buf_head);
+
+	spin_unlock_irq(&pcdev->lock);
+
+	return sh_mobile_ceu_soft_reset(pcdev);
+}
+
+static struct vb2_ops sh_mobile_ceu_videobuf_ops = {
+	.queue_setup	= sh_mobile_ceu_videobuf_setup,
+	.buf_prepare	= sh_mobile_ceu_videobuf_prepare,
+	.buf_queue	= sh_mobile_ceu_videobuf_queue,
+	.buf_cleanup	= sh_mobile_ceu_videobuf_release,
+	.buf_init	= sh_mobile_ceu_videobuf_init,
+	.wait_prepare	= soc_camera_unlock,
+	.wait_finish	= soc_camera_lock,
+	.stop_streaming	= sh_mobile_ceu_stop_streaming,
+};
+
+static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
+{
+	struct sh_mobile_ceu_dev *pcdev = data;
+	struct vb2_buffer *vb;
+	int ret;
+
+	spin_lock(&pcdev->lock);
+
+	vb = pcdev->active;
+	if (!vb)
+		/* Stale interrupt from a released buffer */
+		goto out;
+
+	list_del_init(&to_ceu_vb(vb)->queue);
+
+	if (!list_empty(&pcdev->capture))
+		pcdev->active = &list_entry(pcdev->capture.next,
+					    struct sh_mobile_ceu_buffer, queue)->vb;
+	else
+		pcdev->active = NULL;
+
+	ret = sh_mobile_ceu_capture(pcdev);
+	do_gettimeofday(&vb->v4l2_buf.timestamp);
+	if (!ret) {
+		vb->v4l2_buf.field = pcdev->field;
+		vb->v4l2_buf.sequence = pcdev->sequence++;
+	}
+	vb2_buffer_done(vb, ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+
+out:
+	spin_unlock(&pcdev->lock);
+
+	return IRQ_HANDLED;
+}
+
+static struct v4l2_subdev *find_csi2(struct sh_mobile_ceu_dev *pcdev)
+{
+	struct v4l2_subdev *sd;
+
+	if (!pcdev->csi2_pdev)
+		return NULL;
+
+	v4l2_device_for_each_subdev(sd, &pcdev->ici.v4l2_dev)
+		if (&pcdev->csi2_pdev->dev == v4l2_get_subdevdata(sd))
+			return sd;
+
+	return NULL;
+}
+
+/* Called with .video_lock held */
+static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+	struct v4l2_subdev *csi2_sd;
+	int ret;
+
+	if (pcdev->icd)
+		return -EBUSY;
+
+	dev_info(icd->parent,
+		 "SuperH Mobile CEU driver attached to camera %d\n",
+		 icd->devnum);
+
+	pm_runtime_get_sync(ici->v4l2_dev.dev);
+
+	pcdev->buf_total = 0;
+
+	ret = sh_mobile_ceu_soft_reset(pcdev);
+
+	csi2_sd = find_csi2(pcdev);
+	if (csi2_sd) {
+		csi2_sd->grp_id = soc_camera_grp_id(icd);
+		v4l2_set_subdev_hostdata(csi2_sd, icd);
+	}
+
+	ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
+	if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
+		pm_runtime_put_sync(ici->v4l2_dev.dev);
+		return ret;
+	}
+
+	/*
+	 * -ENODEV is special: either csi2_sd == NULL or the CSI-2 driver
+	 * has not found this soc-camera device among its clients
+	 */
+	if (ret == -ENODEV && csi2_sd)
+		csi2_sd->grp_id = 0;
+	pcdev->icd = icd;
+
+	return 0;
+}
+
+/* Called with .video_lock held */
+static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+	struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
+
+	BUG_ON(icd != pcdev->icd);
+
+	v4l2_subdev_call(csi2_sd, core, s_power, 0);
+	if (csi2_sd)
+		csi2_sd->grp_id = 0;
+	/* disable capture, disable interrupts */
+	ceu_write(pcdev, CEIER, 0);
+	sh_mobile_ceu_soft_reset(pcdev);
+
+	/* make sure active buffer is canceled */
+	spin_lock_irq(&pcdev->lock);
+	if (pcdev->active) {
+		list_del_init(&to_ceu_vb(pcdev->active)->queue);
+		vb2_buffer_done(pcdev->active, VB2_BUF_STATE_ERROR);
+		pcdev->active = NULL;
+	}
+	spin_unlock_irq(&pcdev->lock);
+
+	pm_runtime_put_sync(ici->v4l2_dev.dev);
+
+	dev_info(icd->parent,
+		 "SuperH Mobile CEU driver detached from camera %d\n",
+		 icd->devnum);
+
+	pcdev->icd = NULL;
+}
+
+/*
+ * See chapter 29.4.12 "Capture Filter Control Register (CFLCR)"
+ * in SH7722 Hardware Manual
+ */
+static unsigned int size_dst(unsigned int src, unsigned int scale)
+{
+	unsigned int mant_pre = scale >> 12;
+	if (!src || !scale)
+		return src;
+	return ((mant_pre + 2 * (src - 1)) / (2 * mant_pre) - 1) *
+		mant_pre * 4096 / scale + 1;
+}
+
+static u16 calc_scale(unsigned int src, unsigned int *dst)
+{
+	u16 scale;
+
+	if (src == *dst)
+		return 0;
+
+	scale = (src * 4096 / *dst) & ~7;
+
+	while (scale > 4096 && size_dst(src, scale) < *dst)
+		scale -= 8;
+
+	*dst = size_dst(src, scale);
+
+	return scale;
+}
+
+/* rect is guaranteed to not exceed the scaled camera rectangle */
+static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct sh_mobile_ceu_cam *cam = icd->host_priv;
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+	unsigned int height, width, cdwdr_width, in_width, in_height;
+	unsigned int left_offset, top_offset;
+	u32 camor;
+
+	dev_geo(icd->parent, "Crop %ux%u@%u:%u\n",
+		icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top);
+
+	left_offset	= cam->ceu_left;
+	top_offset	= cam->ceu_top;
+
+	WARN_ON(icd->user_width & 3 || icd->user_height & 3);
+
+	width = icd->user_width;
+
+	if (pcdev->image_mode) {
+		in_width = cam->width;
+		if (!pcdev->is_16bit) {
+			in_width *= 2;
+			left_offset *= 2;
+		}
+	} else {
+		unsigned int w_factor;
+
+		switch (icd->current_fmt->host_fmt->packing) {
+		case SOC_MBUS_PACKING_2X8_PADHI:
+			w_factor = 2;
+			break;
+		default:
+			w_factor = 1;
+		}
+
+		in_width = cam->width * w_factor;
+		left_offset *= w_factor;
+	}
+
+	cdwdr_width = icd->bytesperline;
+
+	height = icd->user_height;
+	in_height = cam->height;
+	if (V4L2_FIELD_NONE != pcdev->field) {
+		height = (height / 2) & ~3;
+		in_height /= 2;
+		top_offset /= 2;
+		cdwdr_width *= 2;
+	}
+
+	/* CSI2 special configuration */
+	if (pcdev->pdata->csi2) {
+		in_width = ((in_width - 2) * 2);
+		left_offset *= 2;
+	}
+
+	/* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */
+	camor = left_offset | (top_offset << 16);
+
+	dev_geo(icd->parent,
+		"CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor,
+		(in_height << 16) | in_width, (height << 16) | width,
+		cdwdr_width);
+
+	ceu_write(pcdev, CAMOR, camor);
+	ceu_write(pcdev, CAPWR, (in_height << 16) | in_width);
+	/* CFSZR clipping is applied _after_ the scaling filter (CFLCR) */
+	ceu_write(pcdev, CFSZR, (height << 16) | width);
+	ceu_write(pcdev, CDWDR, cdwdr_width);
+}
+
+static u32 capture_save_reset(struct sh_mobile_ceu_dev *pcdev)
+{
+	u32 capsr = ceu_read(pcdev, CAPSR);
+	ceu_write(pcdev, CAPSR, 1 << 16); /* reset, stop capture */
+	return capsr;
+}
+
+static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr)
+{
+	unsigned long timeout = jiffies + 10 * HZ;
+
+	/*
+	 * Wait until the end of the current frame. It can take a long time,
+	 * but if it has been aborted by a CAPSR reset, it shoule exit sooner.
+	 */
+	while ((ceu_read(pcdev, CSTSR) & 1) && time_before(jiffies, timeout))
+		msleep(1);
+
+	if (time_after(jiffies, timeout)) {
+		dev_err(pcdev->ici.v4l2_dev.dev,
+			"Timeout waiting for frame end! Interface problem?\n");
+		return;
+	}
+
+	/* Wait until reset clears, this shall not hang... */
+	while (ceu_read(pcdev, CAPSR) & (1 << 16))
+		udelay(10);
+
+	/* Anything to restore? */
+	if (capsr & ~(1 << 16))
+		ceu_write(pcdev, CAPSR, capsr);
+}
+
+/* Find the bus subdevice driver, e.g., CSI2 */
+static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
+					   struct soc_camera_device *icd)
+{
+	if (pcdev->csi2_pdev) {
+		struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
+		if (csi2_sd && csi2_sd->grp_id == soc_camera_grp_id(icd))
+			return csi2_sd;
+	}
+
+	return soc_camera_to_subdev(icd);
+}
+
+#define CEU_BUS_FLAGS (V4L2_MBUS_MASTER |	\
+		V4L2_MBUS_PCLK_SAMPLE_RISING |	\
+		V4L2_MBUS_HSYNC_ACTIVE_HIGH |	\
+		V4L2_MBUS_HSYNC_ACTIVE_LOW |	\
+		V4L2_MBUS_VSYNC_ACTIVE_HIGH |	\
+		V4L2_MBUS_VSYNC_ACTIVE_LOW |	\
+		V4L2_MBUS_DATA_ACTIVE_HIGH)
+
+/* Capture is not running, no interrupts, no locking needed */
+static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+	struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd);
+	struct sh_mobile_ceu_cam *cam = icd->host_priv;
+	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+	unsigned long value, common_flags = CEU_BUS_FLAGS;
+	u32 capsr = capture_save_reset(pcdev);
+	unsigned int yuv_lineskip;
+	int ret;
+
+	/*
+	 * If the client doesn't implement g_mbus_config, we just use our
+	 * platform data
+	 */
+	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+	if (!ret) {
+		common_flags = soc_mbus_config_compatible(&cfg,
+							  common_flags);
+		if (!common_flags)
+			return -EINVAL;
+	} else if (ret != -ENOIOCTLCMD) {
+		return ret;
+	}
+
+	/* Make choises, based on platform preferences */
+	if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+	    (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
+		if (pcdev->pdata->flags & SH_CEU_FLAG_HSYNC_LOW)
+			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+		else
+			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
+	}
+
+	if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+	    (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
+		if (pcdev->pdata->flags & SH_CEU_FLAG_VSYNC_LOW)
+			common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+		else
+			common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
+	}
+
+	cfg.flags = common_flags;
+	ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+	if (ret < 0 && ret != -ENOIOCTLCMD)
+		return ret;
+
+	if (icd->current_fmt->host_fmt->bits_per_sample > 8)
+		pcdev->is_16bit = 1;
+	else
+		pcdev->is_16bit = 0;
+
+	ceu_write(pcdev, CRCNTR, 0);
+	ceu_write(pcdev, CRCMPR, 0);
+
+	value = 0x00000010; /* data fetch by default */
+	yuv_lineskip = 0x10;
+
+	switch (icd->current_fmt->host_fmt->fourcc) {
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+		/* convert 4:2:2 -> 4:2:0 */
+		yuv_lineskip = 0; /* skip for NV12/21, no skip for NV16/61 */
+		/* fall-through */
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV61:
+		switch (cam->code) {
+		case V4L2_MBUS_FMT_UYVY8_2X8:
+			value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */
+			break;
+		case V4L2_MBUS_FMT_VYUY8_2X8:
+			value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */
+			break;
+		case V4L2_MBUS_FMT_YUYV8_2X8:
+			value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */
+			break;
+		case V4L2_MBUS_FMT_YVYU8_2X8:
+			value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */
+			break;
+		default:
+			BUG();
+		}
+	}
+
+	if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 ||
+	    icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61)
+		value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */
+
+	value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
+	value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
+
+	if (pcdev->pdata->csi2) /* CSI2 mode */
+		value |= 3 << 12;
+	else if (pcdev->is_16bit)
+		value |= 1 << 12;
+	else if (pcdev->pdata->flags & SH_CEU_FLAG_LOWER_8BIT)
+		value |= 2 << 12;
+
+	ceu_write(pcdev, CAMCR, value);
+
+	ceu_write(pcdev, CAPCR, 0x00300000);
+
+	switch (pcdev->field) {
+	case V4L2_FIELD_INTERLACED_TB:
+		value = 0x101;
+		break;
+	case V4L2_FIELD_INTERLACED_BT:
+		value = 0x102;
+		break;
+	default:
+		value = 0;
+		break;
+	}
+	ceu_write(pcdev, CAIFR, value);
+
+	sh_mobile_ceu_set_rect(icd);
+	mdelay(1);
+
+	dev_geo(icd->parent, "CFLCR 0x%x\n", pcdev->cflcr);
+	ceu_write(pcdev, CFLCR, pcdev->cflcr);
+
+	/*
+	 * A few words about byte order (observed in Big Endian mode)
+	 *
+	 * In data fetch mode bytes are received in chunks of 8 bytes.
+	 * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first)
+	 *
+	 * The data is however by default written to memory in reverse order:
+	 * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte)
+	 *
+	 * The lowest three bits of CDOCR allows us to do swapping,
+	 * using 7 we swap the data bytes to match the incoming order:
+	 * D0, D1, D2, D3, D4, D5, D6, D7
+	 */
+	value = 0x00000007 | yuv_lineskip;
+
+	ceu_write(pcdev, CDOCR, value);
+	ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */
+
+	capture_restore(pcdev, capsr);
+
+	/* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */
+	return 0;
+}
+
+static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
+				       unsigned char buswidth)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+	struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd);
+	unsigned long common_flags = CEU_BUS_FLAGS;
+	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+	int ret;
+
+	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+	if (!ret)
+		common_flags = soc_mbus_config_compatible(&cfg,
+							  common_flags);
+	else if (ret != -ENOIOCTLCMD)
+		return ret;
+
+	if (!common_flags || buswidth > 16)
+		return -EINVAL;
+
+	return 0;
+}
+
+static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = {
+	{
+		.fourcc			= V4L2_PIX_FMT_NV12,
+		.name			= "NV12",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_1_5X8,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PLANAR_2Y_C,
+	}, {
+		.fourcc			= V4L2_PIX_FMT_NV21,
+		.name			= "NV21",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_1_5X8,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PLANAR_2Y_C,
+	}, {
+		.fourcc			= V4L2_PIX_FMT_NV16,
+		.name			= "NV16",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PLANAR_Y_C,
+	}, {
+		.fourcc			= V4L2_PIX_FMT_NV61,
+		.name			= "NV61",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PLANAR_Y_C,
+	},
+};
+
+/* This will be corrected as we get more formats */
+static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt)
+{
+	return	fmt->packing == SOC_MBUS_PACKING_NONE ||
+		(fmt->bits_per_sample == 8 &&
+		 fmt->packing == SOC_MBUS_PACKING_1_5X8) ||
+		(fmt->bits_per_sample == 8 &&
+		 fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+		(fmt->bits_per_sample > 8 &&
+		 fmt->packing == SOC_MBUS_PACKING_EXTEND16);
+}
+
+static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect);
+
+static struct soc_camera_device *ctrl_to_icd(struct v4l2_ctrl *ctrl)
+{
+	return container_of(ctrl->handler, struct soc_camera_device,
+							ctrl_handler);
+}
+
+static int sh_mobile_ceu_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct soc_camera_device *icd = ctrl_to_icd(ctrl);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+	switch (ctrl->id) {
+	case V4L2_CID_SHARPNESS:
+		switch (icd->current_fmt->host_fmt->fourcc) {
+		case V4L2_PIX_FMT_NV12:
+		case V4L2_PIX_FMT_NV21:
+		case V4L2_PIX_FMT_NV16:
+		case V4L2_PIX_FMT_NV61:
+			ceu_write(pcdev, CLFCR, !ctrl->val);
+			return 0;
+		}
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops sh_mobile_ceu_ctrl_ops = {
+	.s_ctrl = sh_mobile_ceu_s_ctrl,
+};
+
+static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx,
+				     struct soc_camera_format_xlate *xlate)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct device *dev = icd->parent;
+	struct soc_camera_host *ici = to_soc_camera_host(dev);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+	int ret, k, n;
+	int formats = 0;
+	struct sh_mobile_ceu_cam *cam;
+	enum v4l2_mbus_pixelcode code;
+	const struct soc_mbus_pixelfmt *fmt;
+
+	ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+	if (ret < 0)
+		/* No more formats */
+		return 0;
+
+	fmt = soc_mbus_get_fmtdesc(code);
+	if (!fmt) {
+		dev_warn(dev, "unsupported format code #%u: %d\n", idx, code);
+		return 0;
+	}
+
+	if (!pcdev->pdata->csi2) {
+		/* Are there any restrictions in the CSI-2 case? */
+		ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
+		if (ret < 0)
+			return 0;
+	}
+
+	if (!icd->host_priv) {
+		struct v4l2_mbus_framefmt mf;
+		struct v4l2_rect rect;
+		int shift = 0;
+
+		/* Add our control */
+		v4l2_ctrl_new_std(&icd->ctrl_handler, &sh_mobile_ceu_ctrl_ops,
+				  V4L2_CID_SHARPNESS, 0, 1, 1, 0);
+		if (icd->ctrl_handler.error)
+			return icd->ctrl_handler.error;
+
+		/* FIXME: subwindow is lost between close / open */
+
+		/* Cache current client geometry */
+		ret = client_g_rect(sd, &rect);
+		if (ret < 0)
+			return ret;
+
+		/* First time */
+		ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+		if (ret < 0)
+			return ret;
+
+		/*
+		 * All currently existing CEU implementations support 2560x1920
+		 * or larger frames. If the sensor is proposing too big a frame,
+		 * don't bother with possibly supportred by the CEU larger
+		 * sizes, just try VGA multiples. If needed, this can be
+		 * adjusted in the future.
+		 */
+		while ((mf.width > pcdev->max_width ||
+			mf.height > pcdev->max_height) && shift < 4) {
+			/* Try 2560x1920, 1280x960, 640x480, 320x240 */
+			mf.width	= 2560 >> shift;
+			mf.height	= 1920 >> shift;
+			ret = v4l2_device_call_until_err(sd->v4l2_dev,
+					soc_camera_grp_id(icd), video,
+					s_mbus_fmt, &mf);
+			if (ret < 0)
+				return ret;
+			shift++;
+		}
+
+		if (shift == 4) {
+			dev_err(dev, "Failed to configure the client below %ux%x\n",
+				mf.width, mf.height);
+			return -EIO;
+		}
+
+		dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height);
+
+		cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+		if (!cam)
+			return -ENOMEM;
+
+		/* We are called with current camera crop, initialise subrect with it */
+		cam->rect	= rect;
+		cam->subrect	= rect;
+
+		cam->width	= mf.width;
+		cam->height	= mf.height;
+
+		icd->host_priv = cam;
+	} else {
+		cam = icd->host_priv;
+	}
+
+	/* Beginning of a pass */
+	if (!idx)
+		cam->extra_fmt = NULL;
+
+	switch (code) {
+	case V4L2_MBUS_FMT_UYVY8_2X8:
+	case V4L2_MBUS_FMT_VYUY8_2X8:
+	case V4L2_MBUS_FMT_YUYV8_2X8:
+	case V4L2_MBUS_FMT_YVYU8_2X8:
+		if (cam->extra_fmt)
+			break;
+
+		/*
+		 * Our case is simple so far: for any of the above four camera
+		 * formats we add all our four synthesized NV* formats, so,
+		 * just marking the device with a single flag suffices. If
+		 * the format generation rules are more complex, you would have
+		 * to actually hang your already added / counted formats onto
+		 * the host_priv pointer and check whether the format you're
+		 * going to add now is already there.
+		 */
+		cam->extra_fmt = sh_mobile_ceu_formats;
+
+		n = ARRAY_SIZE(sh_mobile_ceu_formats);
+		formats += n;
+		for (k = 0; xlate && k < n; k++) {
+			xlate->host_fmt	= &sh_mobile_ceu_formats[k];
+			xlate->code	= code;
+			xlate++;
+			dev_dbg(dev, "Providing format %s using code %d\n",
+				sh_mobile_ceu_formats[k].name, code);
+		}
+		break;
+	default:
+		if (!sh_mobile_ceu_packing_supported(fmt))
+			return 0;
+	}
+
+	/* Generic pass-through */
+	formats++;
+	if (xlate) {
+		xlate->host_fmt	= fmt;
+		xlate->code	= code;
+		xlate++;
+		dev_dbg(dev, "Providing format %s in pass-through mode\n",
+			fmt->name);
+	}
+
+	return formats;
+}
+
+static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd)
+{
+	kfree(icd->host_priv);
+	icd->host_priv = NULL;
+}
+
+/* Check if any dimension of r1 is smaller than respective one of r2 */
+static bool is_smaller(struct v4l2_rect *r1, struct v4l2_rect *r2)
+{
+	return r1->width < r2->width || r1->height < r2->height;
+}
+
+/* Check if r1 fails to cover r2 */
+static bool is_inside(struct v4l2_rect *r1, struct v4l2_rect *r2)
+{
+	return r1->left > r2->left || r1->top > r2->top ||
+		r1->left + r1->width < r2->left + r2->width ||
+		r1->top + r1->height < r2->top + r2->height;
+}
+
+static unsigned int scale_down(unsigned int size, unsigned int scale)
+{
+	return (size * 4096 + scale / 2) / scale;
+}
+
+static unsigned int calc_generic_scale(unsigned int input, unsigned int output)
+{
+	return (input * 4096 + output / 2) / output;
+}
+
+/* Get and store current client crop */
+static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
+{
+	struct v4l2_crop crop;
+	struct v4l2_cropcap cap;
+	int ret;
+
+	crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+	ret = v4l2_subdev_call(sd, video, g_crop, &crop);
+	if (!ret) {
+		*rect = crop.c;
+		return ret;
+	}
+
+	/* Camera driver doesn't support .g_crop(), assume default rectangle */
+	cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+	ret = v4l2_subdev_call(sd, video, cropcap, &cap);
+	if (!ret)
+		*rect = cap.defrect;
+
+	return ret;
+}
+
+/* Client crop has changed, update our sub-rectangle to remain within the area */
+static void update_subrect(struct sh_mobile_ceu_cam *cam)
+{
+	struct v4l2_rect *rect = &cam->rect, *subrect = &cam->subrect;
+
+	if (rect->width < subrect->width)
+		subrect->width = rect->width;
+
+	if (rect->height < subrect->height)
+		subrect->height = rect->height;
+
+	if (rect->left > subrect->left)
+		subrect->left = rect->left;
+	else if (rect->left + rect->width >
+		 subrect->left + subrect->width)
+		subrect->left = rect->left + rect->width -
+			subrect->width;
+
+	if (rect->top > subrect->top)
+		subrect->top = rect->top;
+	else if (rect->top + rect->height >
+		 subrect->top + subrect->height)
+		subrect->top = rect->top + rect->height -
+			subrect->height;
+}
+
+/*
+ * The common for both scaling and cropping iterative approach is:
+ * 1. try if the client can produce exactly what requested by the user
+ * 2. if (1) failed, try to double the client image until we get one big enough
+ * 3. if (2) failed, try to request the maximum image
+ */
+static int client_s_crop(struct soc_camera_device *icd, struct v4l2_crop *crop,
+			 struct v4l2_crop *cam_crop)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c;
+	struct device *dev = sd->v4l2_dev->dev;
+	struct sh_mobile_ceu_cam *cam = icd->host_priv;
+	struct v4l2_cropcap cap;
+	int ret;
+	unsigned int width, height;
+
+	v4l2_subdev_call(sd, video, s_crop, crop);
+	ret = client_g_rect(sd, cam_rect);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Now cam_crop contains the current camera input rectangle, and it must
+	 * be within camera cropcap bounds
+	 */
+	if (!memcmp(rect, cam_rect, sizeof(*rect))) {
+		/* Even if camera S_CROP failed, but camera rectangle matches */
+		dev_dbg(dev, "Camera S_CROP successful for %dx%d@%d:%d\n",
+			rect->width, rect->height, rect->left, rect->top);
+		cam->rect = *cam_rect;
+		return 0;
+	}
+
+	/* Try to fix cropping, that camera hasn't managed to set */
+	dev_geo(dev, "Fix camera S_CROP for %dx%d@%d:%d to %dx%d@%d:%d\n",
+		cam_rect->width, cam_rect->height,
+		cam_rect->left, cam_rect->top,
+		rect->width, rect->height, rect->left, rect->top);
+
+	/* We need sensor maximum rectangle */
+	ret = v4l2_subdev_call(sd, video, cropcap, &cap);
+	if (ret < 0)
+		return ret;
+
+	/* Put user requested rectangle within sensor bounds */
+	soc_camera_limit_side(&rect->left, &rect->width, cap.bounds.left, 2,
+			      cap.bounds.width);
+	soc_camera_limit_side(&rect->top, &rect->height, cap.bounds.top, 4,
+			      cap.bounds.height);
+
+	/*
+	 * Popular special case - some cameras can only handle fixed sizes like
+	 * QVGA, VGA,... Take care to avoid infinite loop.
+	 */
+	width = max(cam_rect->width, 2);
+	height = max(cam_rect->height, 2);
+
+	/*
+	 * Loop as long as sensor is not covering the requested rectangle and
+	 * is still within its bounds
+	 */
+	while (!ret && (is_smaller(cam_rect, rect) ||
+			is_inside(cam_rect, rect)) &&
+	       (cap.bounds.width > width || cap.bounds.height > height)) {
+
+		width *= 2;
+		height *= 2;
+
+		cam_rect->width = width;
+		cam_rect->height = height;
+
+		/*
+		 * We do not know what capabilities the camera has to set up
+		 * left and top borders. We could try to be smarter in iterating
+		 * them, e.g., if camera current left is to the right of the
+		 * target left, set it to the middle point between the current
+		 * left and minimum left. But that would add too much
+		 * complexity: we would have to iterate each border separately.
+		 * Instead we just drop to the left and top bounds.
+		 */
+		if (cam_rect->left > rect->left)
+			cam_rect->left = cap.bounds.left;
+
+		if (cam_rect->left + cam_rect->width < rect->left + rect->width)
+			cam_rect->width = rect->left + rect->width -
+				cam_rect->left;
+
+		if (cam_rect->top > rect->top)
+			cam_rect->top = cap.bounds.top;
+
+		if (cam_rect->top + cam_rect->height < rect->top + rect->height)
+			cam_rect->height = rect->top + rect->height -
+				cam_rect->top;
+
+		v4l2_subdev_call(sd, video, s_crop, cam_crop);
+		ret = client_g_rect(sd, cam_rect);
+		dev_geo(dev, "Camera S_CROP %d for %dx%d@%d:%d\n", ret,
+			cam_rect->width, cam_rect->height,
+			cam_rect->left, cam_rect->top);
+	}
+
+	/* S_CROP must not modify the rectangle */
+	if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) {
+		/*
+		 * The camera failed to configure a suitable cropping,
+		 * we cannot use the current rectangle, set to max
+		 */
+		*cam_rect = cap.bounds;
+		v4l2_subdev_call(sd, video, s_crop, cam_crop);
+		ret = client_g_rect(sd, cam_rect);
+		dev_geo(dev, "Camera S_CROP %d for max %dx%d@%d:%d\n", ret,
+			cam_rect->width, cam_rect->height,
+			cam_rect->left, cam_rect->top);
+	}
+
+	if (!ret) {
+		cam->rect = *cam_rect;
+		update_subrect(cam);
+	}
+
+	return ret;
+}
+
+/* Iterative s_mbus_fmt, also updates cached client crop on success */
+static int client_s_fmt(struct soc_camera_device *icd,
+			struct v4l2_mbus_framefmt *mf, bool ceu_can_scale)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+	struct sh_mobile_ceu_cam *cam = icd->host_priv;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct device *dev = icd->parent;
+	unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
+	unsigned int max_width, max_height;
+	struct v4l2_cropcap cap;
+	bool ceu_1to1;
+	int ret;
+
+	ret = v4l2_device_call_until_err(sd->v4l2_dev,
+					 soc_camera_grp_id(icd), video,
+					 s_mbus_fmt, mf);
+	if (ret < 0)
+		return ret;
+
+	dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
+
+	if (width == mf->width && height == mf->height) {
+		/* Perfect! The client has done it all. */
+		ceu_1to1 = true;
+		goto update_cache;
+	}
+
+	ceu_1to1 = false;
+	if (!ceu_can_scale)
+		goto update_cache;
+
+	cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+	ret = v4l2_subdev_call(sd, video, cropcap, &cap);
+	if (ret < 0)
+		return ret;
+
+	max_width = min(cap.bounds.width, pcdev->max_width);
+	max_height = min(cap.bounds.height, pcdev->max_height);
+
+	/* Camera set a format, but geometry is not precise, try to improve */
+	tmp_w = mf->width;
+	tmp_h = mf->height;
+
+	/* width <= max_width && height <= max_height - guaranteed by try_fmt */
+	while ((width > tmp_w || height > tmp_h) &&
+	       tmp_w < max_width && tmp_h < max_height) {
+		tmp_w = min(2 * tmp_w, max_width);
+		tmp_h = min(2 * tmp_h, max_height);
+		mf->width = tmp_w;
+		mf->height = tmp_h;
+		ret = v4l2_device_call_until_err(sd->v4l2_dev,
+					soc_camera_grp_id(icd), video,
+					s_mbus_fmt, mf);
+		dev_geo(dev, "Camera scaled to %ux%u\n",
+			mf->width, mf->height);
+		if (ret < 0) {
+			/* This shouldn't happen */
+			dev_err(dev, "Client failed to set format: %d\n", ret);
+			return ret;
+		}
+	}
+
+update_cache:
+	/* Update cache */
+	ret = client_g_rect(sd, &cam->rect);
+	if (ret < 0)
+		return ret;
+
+	if (ceu_1to1)
+		cam->subrect = cam->rect;
+	else
+		update_subrect(cam);
+
+	return 0;
+}
+
+/**
+ * @width	- on output: user width, mapped back to input
+ * @height	- on output: user height, mapped back to input
+ * @mf		- in- / output camera output window
+ */
+static int client_scale(struct soc_camera_device *icd,
+			struct v4l2_mbus_framefmt *mf,
+			unsigned int *width, unsigned int *height,
+			bool ceu_can_scale)
+{
+	struct sh_mobile_ceu_cam *cam = icd->host_priv;
+	struct device *dev = icd->parent;
+	struct v4l2_mbus_framefmt mf_tmp = *mf;
+	unsigned int scale_h, scale_v;
+	int ret;
+
+	/*
+	 * 5. Apply iterative camera S_FMT for camera user window (also updates
+	 *    client crop cache and the imaginary sub-rectangle).
+	 */
+	ret = client_s_fmt(icd, &mf_tmp, ceu_can_scale);
+	if (ret < 0)
+		return ret;
+
+	dev_geo(dev, "5: camera scaled to %ux%u\n",
+		mf_tmp.width, mf_tmp.height);
+
+	/* 6. Retrieve camera output window (g_fmt) */
+
+	/* unneeded - it is already in "mf_tmp" */
+
+	/* 7. Calculate new client scales. */
+	scale_h = calc_generic_scale(cam->rect.width, mf_tmp.width);
+	scale_v = calc_generic_scale(cam->rect.height, mf_tmp.height);
+
+	mf->width	= mf_tmp.width;
+	mf->height	= mf_tmp.height;
+	mf->colorspace	= mf_tmp.colorspace;
+
+	/*
+	 * 8. Calculate new CEU crop - apply camera scales to previously
+	 *    updated "effective" crop.
+	 */
+	*width = scale_down(cam->subrect.width, scale_h);
+	*height = scale_down(cam->subrect.height, scale_v);
+
+	dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height);
+
+	return 0;
+}
+
+/*
+ * CEU can scale and crop, but we don't want to waste bandwidth and kill the
+ * framerate by always requesting the maximum image from the client. See
+ * Documentation/video4linux/sh_mobile_ceu_camera.txt for a description of
+ * scaling and cropping algorithms and for the meaning of referenced here steps.
+ */
+static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
+				  struct v4l2_crop *a)
+{
+	struct v4l2_rect *rect = &a->c;
+	struct device *dev = icd->parent;
+	struct soc_camera_host *ici = to_soc_camera_host(dev);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+	struct v4l2_crop cam_crop;
+	struct sh_mobile_ceu_cam *cam = icd->host_priv;
+	struct v4l2_rect *cam_rect = &cam_crop.c;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct v4l2_mbus_framefmt mf;
+	unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v,
+		out_width, out_height;
+	int interm_width, interm_height;
+	u32 capsr, cflcr;
+	int ret;
+
+	dev_geo(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height,
+		rect->left, rect->top);
+
+	/* During camera cropping its output window can change too, stop CEU */
+	capsr = capture_save_reset(pcdev);
+	dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
+
+	/*
+	 * 1. - 2. Apply iterative camera S_CROP for new input window, read back
+	 * actual camera rectangle.
+	 */
+	ret = client_s_crop(icd, a, &cam_crop);
+	if (ret < 0)
+		return ret;
+
+	dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n",
+		cam_rect->width, cam_rect->height,
+		cam_rect->left, cam_rect->top);
+
+	/* On success cam_crop contains current camera crop */
+
+	/* 3. Retrieve camera output window */
+	ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+	if (ret < 0)
+		return ret;
+
+	if (mf.width > pcdev->max_width || mf.height > pcdev->max_height)
+		return -EINVAL;
+
+	/* 4. Calculate camera scales */
+	scale_cam_h	= calc_generic_scale(cam_rect->width, mf.width);
+	scale_cam_v	= calc_generic_scale(cam_rect->height, mf.height);
+
+	/* Calculate intermediate window */
+	interm_width	= scale_down(rect->width, scale_cam_h);
+	interm_height	= scale_down(rect->height, scale_cam_v);
+
+	if (interm_width < icd->user_width) {
+		u32 new_scale_h;
+
+		new_scale_h = calc_generic_scale(rect->width, icd->user_width);
+
+		mf.width = scale_down(cam_rect->width, new_scale_h);
+	}
+
+	if (interm_height < icd->user_height) {
+		u32 new_scale_v;
+
+		new_scale_v = calc_generic_scale(rect->height, icd->user_height);
+
+		mf.height = scale_down(cam_rect->height, new_scale_v);
+	}
+
+	if (interm_width < icd->user_width || interm_height < icd->user_height) {
+		ret = v4l2_device_call_until_err(sd->v4l2_dev,
+					soc_camera_grp_id(icd), video,
+					s_mbus_fmt, &mf);
+		if (ret < 0)
+			return ret;
+
+		dev_geo(dev, "New camera output %ux%u\n", mf.width, mf.height);
+		scale_cam_h	= calc_generic_scale(cam_rect->width, mf.width);
+		scale_cam_v	= calc_generic_scale(cam_rect->height, mf.height);
+		interm_width	= scale_down(rect->width, scale_cam_h);
+		interm_height	= scale_down(rect->height, scale_cam_v);
+	}
+
+	/* Cache camera output window */
+	cam->width	= mf.width;
+	cam->height	= mf.height;
+
+	if (pcdev->image_mode) {
+		out_width	= min(interm_width, icd->user_width);
+		out_height	= min(interm_height, icd->user_height);
+	} else {
+		out_width	= interm_width;
+		out_height	= interm_height;
+	}
+
+	/*
+	 * 5. Calculate CEU scales from camera scales from results of (5) and
+	 *    the user window
+	 */
+	scale_ceu_h	= calc_scale(interm_width, &out_width);
+	scale_ceu_v	= calc_scale(interm_height, &out_height);
+
+	dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v);
+
+	/* Apply CEU scales. */
+	cflcr = scale_ceu_h | (scale_ceu_v << 16);
+	if (cflcr != pcdev->cflcr) {
+		pcdev->cflcr = cflcr;
+		ceu_write(pcdev, CFLCR, cflcr);
+	}
+
+	icd->user_width	 = out_width & ~3;
+	icd->user_height = out_height & ~3;
+	/* Offsets are applied at the CEU scaling filter input */
+	cam->ceu_left	 = scale_down(rect->left - cam_rect->left, scale_cam_h) & ~1;
+	cam->ceu_top	 = scale_down(rect->top - cam_rect->top, scale_cam_v) & ~1;
+
+	/* 6. Use CEU cropping to crop to the new window. */
+	sh_mobile_ceu_set_rect(icd);
+
+	cam->subrect = *rect;
+
+	dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n",
+		icd->user_width, icd->user_height,
+		cam->ceu_left, cam->ceu_top);
+
+	/* Restore capture. The CE bit can be cleared by the hardware */
+	if (pcdev->active)
+		capsr |= 1;
+	capture_restore(pcdev, capsr);
+
+	/* Even if only camera cropping succeeded */
+	return ret;
+}
+
+static int sh_mobile_ceu_get_crop(struct soc_camera_device *icd,
+				  struct v4l2_crop *a)
+{
+	struct sh_mobile_ceu_cam *cam = icd->host_priv;
+
+	a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	a->c = cam->subrect;
+
+	return 0;
+}
+
+/*
+ * Calculate real client output window by applying new scales to the current
+ * client crop. New scales are calculated from the requested output format and
+ * CEU crop, mapped backed onto the client input (subrect).
+ */
+static void calculate_client_output(struct soc_camera_device *icd,
+		const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf)
+{
+	struct sh_mobile_ceu_cam *cam = icd->host_priv;
+	struct device *dev = icd->parent;
+	struct v4l2_rect *cam_subrect = &cam->subrect;
+	unsigned int scale_v, scale_h;
+
+	if (cam_subrect->width == cam->rect.width &&
+	    cam_subrect->height == cam->rect.height) {
+		/* No sub-cropping */
+		mf->width	= pix->width;
+		mf->height	= pix->height;
+		return;
+	}
+
+	/* 1.-2. Current camera scales and subwin - cached. */
+
+	dev_geo(dev, "2: subwin %ux%u@%u:%u\n",
+		cam_subrect->width, cam_subrect->height,
+		cam_subrect->left, cam_subrect->top);
+
+	/*
+	 * 3. Calculate new combined scales from input sub-window to requested
+	 *    user window.
+	 */
+
+	/*
+	 * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF
+	 * (128x96) or larger than VGA
+	 */
+	scale_h = calc_generic_scale(cam_subrect->width, pix->width);
+	scale_v = calc_generic_scale(cam_subrect->height, pix->height);
+
+	dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v);
+
+	/*
+	 * 4. Calculate desired client output window by applying combined scales
+	 *    to client (real) input window.
+	 */
+	mf->width	= scale_down(cam->rect.width, scale_h);
+	mf->height	= scale_down(cam->rect.height, scale_v);
+}
+
+/* Similar to set_crop multistage iterative algorithm */
+static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
+				 struct v4l2_format *f)
+{
+	struct device *dev = icd->parent;
+	struct soc_camera_host *ici = to_soc_camera_host(dev);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+	struct sh_mobile_ceu_cam *cam = icd->host_priv;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_mbus_framefmt mf;
+	__u32 pixfmt = pix->pixelformat;
+	const struct soc_camera_format_xlate *xlate;
+	/* Keep Compiler Happy */
+	unsigned int ceu_sub_width = 0, ceu_sub_height = 0;
+	u16 scale_v, scale_h;
+	int ret;
+	bool image_mode;
+	enum v4l2_field field;
+
+	switch (pix->field) {
+	default:
+		pix->field = V4L2_FIELD_NONE;
+		/* fall-through */
+	case V4L2_FIELD_INTERLACED_TB:
+	case V4L2_FIELD_INTERLACED_BT:
+	case V4L2_FIELD_NONE:
+		field = pix->field;
+		break;
+	case V4L2_FIELD_INTERLACED:
+		field = V4L2_FIELD_INTERLACED_TB;
+		break;
+	}
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+	if (!xlate) {
+		dev_warn(dev, "Format %x not found\n", pixfmt);
+		return -EINVAL;
+	}
+
+	/* 1.-4. Calculate desired client output geometry */
+	calculate_client_output(icd, pix, &mf);
+	mf.field	= pix->field;
+	mf.colorspace	= pix->colorspace;
+	mf.code		= xlate->code;
+
+	switch (pixfmt) {
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV61:
+		image_mode = true;
+		break;
+	default:
+		image_mode = false;
+	}
+
+	dev_geo(dev, "S_FMT(pix=0x%x, fld 0x%x, code 0x%x, %ux%u)\n", pixfmt, mf.field, mf.code,
+		pix->width, pix->height);
+
+	dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height);
+
+	/* 5. - 9. */
+	ret = client_scale(icd, &mf, &ceu_sub_width, &ceu_sub_height,
+			   image_mode && V4L2_FIELD_NONE == field);
+
+	dev_geo(dev, "5-9: client scale return %d\n", ret);
+
+	/* Done with the camera. Now see if we can improve the result */
+
+	dev_geo(dev, "fmt %ux%u, requested %ux%u\n",
+		mf.width, mf.height, pix->width, pix->height);
+	if (ret < 0)
+		return ret;
+
+	if (mf.code != xlate->code)
+		return -EINVAL;
+
+	/* 9. Prepare CEU crop */
+	cam->width = mf.width;
+	cam->height = mf.height;
+
+	/* 10. Use CEU scaling to scale to the requested user window. */
+
+	/* We cannot scale up */
+	if (pix->width > ceu_sub_width)
+		ceu_sub_width = pix->width;
+
+	if (pix->height > ceu_sub_height)
+		ceu_sub_height = pix->height;
+
+	pix->colorspace = mf.colorspace;
+
+	if (image_mode) {
+		/* Scale pix->{width x height} down to width x height */
+		scale_h		= calc_scale(ceu_sub_width, &pix->width);
+		scale_v		= calc_scale(ceu_sub_height, &pix->height);
+	} else {
+		pix->width	= ceu_sub_width;
+		pix->height	= ceu_sub_height;
+		scale_h		= 0;
+		scale_v		= 0;
+	}
+
+	pcdev->cflcr = scale_h | (scale_v << 16);
+
+	/*
+	 * We have calculated CFLCR, the actual configuration will be performed
+	 * in sh_mobile_ceu_set_bus_param()
+	 */
+
+	dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n",
+		ceu_sub_width, scale_h, pix->width,
+		ceu_sub_height, scale_v, pix->height);
+
+	cam->code		= xlate->code;
+	icd->current_fmt	= xlate;
+
+	pcdev->field = field;
+	pcdev->image_mode = image_mode;
+
+	/* CFSZR requirement */
+	pix->width	&= ~3;
+	pix->height	&= ~3;
+
+	return 0;
+}
+
+#define CEU_CHDW_MAX	8188U	/* Maximum line stride */
+
+static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
+				 struct v4l2_format *f)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+	const struct soc_camera_format_xlate *xlate;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct v4l2_mbus_framefmt mf;
+	__u32 pixfmt = pix->pixelformat;
+	int width, height;
+	int ret;
+
+	dev_geo(icd->parent, "TRY_FMT(pix=0x%x, %ux%u)\n",
+		 pixfmt, pix->width, pix->height);
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+	if (!xlate) {
+		xlate = icd->current_fmt;
+		dev_dbg(icd->parent, "Format %x not found, keeping %x\n",
+			pixfmt, xlate->host_fmt->fourcc);
+		pixfmt = xlate->host_fmt->fourcc;
+		pix->pixelformat = pixfmt;
+		pix->colorspace = icd->colorspace;
+	}
+
+	/* FIXME: calculate using depth and bus width */
+
+	/* CFSZR requires height and width to be 4-pixel aligned */
+	v4l_bound_align_image(&pix->width, 2, pcdev->max_width, 2,
+			      &pix->height, 4, pcdev->max_height, 2, 0);
+
+	width = pix->width;
+	height = pix->height;
+
+	/* limit to sensor capabilities */
+	mf.width	= pix->width;
+	mf.height	= pix->height;
+	mf.field	= pix->field;
+	mf.code		= xlate->code;
+	mf.colorspace	= pix->colorspace;
+
+	ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
+					 video, try_mbus_fmt, &mf);
+	if (ret < 0)
+		return ret;
+
+	pix->width	= mf.width;
+	pix->height	= mf.height;
+	pix->field	= mf.field;
+	pix->colorspace	= mf.colorspace;
+
+	switch (pixfmt) {
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV61:
+		/* FIXME: check against rect_max after converting soc-camera */
+		/* We can scale precisely, need a bigger image from camera */
+		if (pix->width < width || pix->height < height) {
+			/*
+			 * We presume, the sensor behaves sanely, i.e., if
+			 * requested a bigger rectangle, it will not return a
+			 * smaller one.
+			 */
+			mf.width = pcdev->max_width;
+			mf.height = pcdev->max_height;
+			ret = v4l2_device_call_until_err(sd->v4l2_dev,
+					soc_camera_grp_id(icd), video,
+					try_mbus_fmt, &mf);
+			if (ret < 0) {
+				/* Shouldn't actually happen... */
+				dev_err(icd->parent,
+					"FIXME: client try_fmt() = %d\n", ret);
+				return ret;
+			}
+		}
+		/* We will scale exactly */
+		if (mf.width > width)
+			pix->width = width;
+		if (mf.height > height)
+			pix->height = height;
+
+		pix->bytesperline = max(pix->bytesperline, pix->width);
+		pix->bytesperline = min(pix->bytesperline, CEU_CHDW_MAX);
+		pix->bytesperline &= ~3;
+		break;
+
+	default:
+		/* Configurable stride isn't supported in pass-through mode. */
+		pix->bytesperline  = 0;
+	}
+
+	pix->width	&= ~3;
+	pix->height	&= ~3;
+	pix->sizeimage	= 0;
+
+	dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n",
+		__func__, ret, pix->pixelformat, pix->width, pix->height);
+
+	return ret;
+}
+
+static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
+				      struct v4l2_crop *a)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct sh_mobile_ceu_dev *pcdev = ici->priv;
+	u32 out_width = icd->user_width, out_height = icd->user_height;
+	int ret;
+
+	/* Freeze queue */
+	pcdev->frozen = 1;
+	/* Wait for frame */
+	ret = wait_for_completion_interruptible(&pcdev->complete);
+	/* Stop the client */
+	ret = v4l2_subdev_call(sd, video, s_stream, 0);
+	if (ret < 0)
+		dev_warn(icd->parent,
+			 "Client failed to stop the stream: %d\n", ret);
+	else
+		/* Do the crop, if it fails, there's nothing more we can do */
+		sh_mobile_ceu_set_crop(icd, a);
+
+	dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height);
+
+	if (icd->user_width != out_width || icd->user_height != out_height) {
+		struct v4l2_format f = {
+			.type	= V4L2_BUF_TYPE_VIDEO_CAPTURE,
+			.fmt.pix	= {
+				.width		= out_width,
+				.height		= out_height,
+				.pixelformat	= icd->current_fmt->host_fmt->fourcc,
+				.field		= pcdev->field,
+				.colorspace	= icd->colorspace,
+			},
+		};
+		ret = sh_mobile_ceu_set_fmt(icd, &f);
+		if (!ret && (out_width != f.fmt.pix.width ||
+			     out_height != f.fmt.pix.height))
+			ret = -EINVAL;
+		if (!ret) {
+			icd->user_width		= out_width & ~3;
+			icd->user_height	= out_height & ~3;
+			ret = sh_mobile_ceu_set_bus_param(icd);
+		}
+	}
+
+	/* Thaw the queue */
+	pcdev->frozen = 0;
+	spin_lock_irq(&pcdev->lock);
+	sh_mobile_ceu_capture(pcdev);
+	spin_unlock_irq(&pcdev->lock);
+	/* Start the client */
+	ret = v4l2_subdev_call(sd, video, s_stream, 1);
+	return ret;
+}
+
+static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt)
+{
+	struct soc_camera_device *icd = file->private_data;
+
+	return vb2_poll(&icd->vb2_vidq, file, pt);
+}
+
+static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
+				  struct v4l2_capability *cap)
+{
+	strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+	return 0;
+}
+
+static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
+				       struct soc_camera_device *icd)
+{
+	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	q->io_modes = VB2_MMAP | VB2_USERPTR;
+	q->drv_priv = icd;
+	q->ops = &sh_mobile_ceu_videobuf_ops;
+	q->mem_ops = &vb2_dma_contig_memops;
+	q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer);
+
+	return vb2_queue_init(q);
+}
+
+static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
+	.owner		= THIS_MODULE,
+	.add		= sh_mobile_ceu_add_device,
+	.remove		= sh_mobile_ceu_remove_device,
+	.get_formats	= sh_mobile_ceu_get_formats,
+	.put_formats	= sh_mobile_ceu_put_formats,
+	.get_crop	= sh_mobile_ceu_get_crop,
+	.set_crop	= sh_mobile_ceu_set_crop,
+	.set_livecrop	= sh_mobile_ceu_set_livecrop,
+	.set_fmt	= sh_mobile_ceu_set_fmt,
+	.try_fmt	= sh_mobile_ceu_try_fmt,
+	.poll		= sh_mobile_ceu_poll,
+	.querycap	= sh_mobile_ceu_querycap,
+	.set_bus_param	= sh_mobile_ceu_set_bus_param,
+	.init_videobuf2	= sh_mobile_ceu_init_videobuf,
+};
+
+struct bus_wait {
+	struct notifier_block	notifier;
+	struct completion	completion;
+	struct device		*dev;
+};
+
+static int bus_notify(struct notifier_block *nb,
+		      unsigned long action, void *data)
+{
+	struct device *dev = data;
+	struct bus_wait *wait = container_of(nb, struct bus_wait, notifier);
+
+	if (wait->dev != dev)
+		return NOTIFY_DONE;
+
+	switch (action) {
+	case BUS_NOTIFY_UNBOUND_DRIVER:
+		/* Protect from module unloading */
+		wait_for_completion(&wait->completion);
+		return NOTIFY_OK;
+	}
+	return NOTIFY_DONE;
+}
+
+static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
+{
+	struct sh_mobile_ceu_dev *pcdev;
+	struct resource *res;
+	void __iomem *base;
+	unsigned int irq;
+	int err = 0;
+	struct bus_wait wait = {
+		.completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion),
+		.notifier.notifier_call = bus_notify,
+	};
+	struct sh_mobile_ceu_companion *csi2;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+	if (!res || (int)irq <= 0) {
+		dev_err(&pdev->dev, "Not enough CEU platform resources.\n");
+		err = -ENODEV;
+		goto exit;
+	}
+
+	pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL);
+	if (!pcdev) {
+		dev_err(&pdev->dev, "Could not allocate pcdev\n");
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	INIT_LIST_HEAD(&pcdev->capture);
+	spin_lock_init(&pcdev->lock);
+	init_completion(&pcdev->complete);
+
+	pcdev->pdata = pdev->dev.platform_data;
+	if (!pcdev->pdata) {
+		err = -EINVAL;
+		dev_err(&pdev->dev, "CEU platform data not set.\n");
+		goto exit_kfree;
+	}
+
+	pcdev->max_width = pcdev->pdata->max_width ? : 2560;
+	pcdev->max_height = pcdev->pdata->max_height ? : 1920;
+
+	base = ioremap_nocache(res->start, resource_size(res));
+	if (!base) {
+		err = -ENXIO;
+		dev_err(&pdev->dev, "Unable to ioremap CEU registers.\n");
+		goto exit_kfree;
+	}
+
+	pcdev->irq = irq;
+	pcdev->base = base;
+	pcdev->video_limit = 0; /* only enabled if second resource exists */
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (res) {
+		err = dma_declare_coherent_memory(&pdev->dev, res->start,
+						  res->start,
+						  resource_size(res),
+						  DMA_MEMORY_MAP |
+						  DMA_MEMORY_EXCLUSIVE);
+		if (!err) {
+			dev_err(&pdev->dev, "Unable to declare CEU memory.\n");
+			err = -ENXIO;
+			goto exit_iounmap;
+		}
+
+		pcdev->video_limit = resource_size(res);
+	}
+
+	/* request irq */
+	err = request_irq(pcdev->irq, sh_mobile_ceu_irq, IRQF_DISABLED,
+			  dev_name(&pdev->dev), pcdev);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to register CEU interrupt.\n");
+		goto exit_release_mem;
+	}
+
+	pm_suspend_ignore_children(&pdev->dev, true);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_resume(&pdev->dev);
+
+	pcdev->ici.priv = pcdev;
+	pcdev->ici.v4l2_dev.dev = &pdev->dev;
+	pcdev->ici.nr = pdev->id;
+	pcdev->ici.drv_name = dev_name(&pdev->dev);
+	pcdev->ici.ops = &sh_mobile_ceu_host_ops;
+	pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE;
+
+	pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+	if (IS_ERR(pcdev->alloc_ctx)) {
+		err = PTR_ERR(pcdev->alloc_ctx);
+		goto exit_free_clk;
+	}
+
+	err = soc_camera_host_register(&pcdev->ici);
+	if (err)
+		goto exit_free_ctx;
+
+	/* CSI2 interfacing */
+	csi2 = pcdev->pdata->csi2;
+	if (csi2) {
+		struct platform_device *csi2_pdev =
+			platform_device_alloc("sh-mobile-csi2", csi2->id);
+		struct sh_csi2_pdata *csi2_pdata = csi2->platform_data;
+
+		if (!csi2_pdev) {
+			err = -ENOMEM;
+			goto exit_host_unregister;
+		}
+
+		pcdev->csi2_pdev		= csi2_pdev;
+
+		err = platform_device_add_data(csi2_pdev, csi2_pdata, sizeof(*csi2_pdata));
+		if (err < 0)
+			goto exit_pdev_put;
+
+		csi2_pdata			= csi2_pdev->dev.platform_data;
+		csi2_pdata->v4l2_dev		= &pcdev->ici.v4l2_dev;
+
+		csi2_pdev->resource		= csi2->resource;
+		csi2_pdev->num_resources	= csi2->num_resources;
+
+		err = platform_device_add(csi2_pdev);
+		if (err < 0)
+			goto exit_pdev_put;
+
+		wait.dev = &csi2_pdev->dev;
+
+		err = bus_register_notifier(&platform_bus_type, &wait.notifier);
+		if (err < 0)
+			goto exit_pdev_unregister;
+
+		/*
+		 * From this point the driver module will not unload, until
+		 * we complete the completion.
+		 */
+
+		if (!csi2_pdev->dev.driver) {
+			complete(&wait.completion);
+			/* Either too late, or probing failed */
+			bus_unregister_notifier(&platform_bus_type, &wait.notifier);
+			err = -ENXIO;
+			goto exit_pdev_unregister;
+		}
+
+		/*
+		 * The module is still loaded, in the worst case it is hanging
+		 * in device release on our completion. So, _now_ dereferencing
+		 * the "owner" is safe!
+		 */
+
+		err = try_module_get(csi2_pdev->dev.driver->owner);
+
+		/* Let notifier complete, if it has been locked */
+		complete(&wait.completion);
+		bus_unregister_notifier(&platform_bus_type, &wait.notifier);
+		if (!err) {
+			err = -ENODEV;
+			goto exit_pdev_unregister;
+		}
+	}
+
+	return 0;
+
+exit_pdev_unregister:
+	platform_device_del(pcdev->csi2_pdev);
+exit_pdev_put:
+	pcdev->csi2_pdev->resource = NULL;
+	platform_device_put(pcdev->csi2_pdev);
+exit_host_unregister:
+	soc_camera_host_unregister(&pcdev->ici);
+exit_free_ctx:
+	vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+exit_free_clk:
+	pm_runtime_disable(&pdev->dev);
+	free_irq(pcdev->irq, pcdev);
+exit_release_mem:
+	if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
+		dma_release_declared_memory(&pdev->dev);
+exit_iounmap:
+	iounmap(base);
+exit_kfree:
+	kfree(pcdev);
+exit:
+	return err;
+}
+
+static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev)
+{
+	struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+	struct sh_mobile_ceu_dev *pcdev = container_of(soc_host,
+					struct sh_mobile_ceu_dev, ici);
+	struct platform_device *csi2_pdev = pcdev->csi2_pdev;
+
+	soc_camera_host_unregister(soc_host);
+	pm_runtime_disable(&pdev->dev);
+	free_irq(pcdev->irq, pcdev);
+	if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
+		dma_release_declared_memory(&pdev->dev);
+	iounmap(pcdev->base);
+	vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+	if (csi2_pdev && csi2_pdev->dev.driver) {
+		struct module *csi2_drv = csi2_pdev->dev.driver->owner;
+		platform_device_del(csi2_pdev);
+		csi2_pdev->resource = NULL;
+		platform_device_put(csi2_pdev);
+		module_put(csi2_drv);
+	}
+	kfree(pcdev);
+
+	return 0;
+}
+
+static int sh_mobile_ceu_runtime_nop(struct device *dev)
+{
+	/* Runtime PM callback shared between ->runtime_suspend()
+	 * and ->runtime_resume(). Simply returns success.
+	 *
+	 * This driver re-initializes all registers after
+	 * pm_runtime_get_sync() anyway so there is no need
+	 * to save and restore registers here.
+	 */
+	return 0;
+}
+
+static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = {
+	.runtime_suspend = sh_mobile_ceu_runtime_nop,
+	.runtime_resume = sh_mobile_ceu_runtime_nop,
+};
+
+static struct platform_driver sh_mobile_ceu_driver = {
+	.driver 	= {
+		.name	= "sh_mobile_ceu",
+		.pm	= &sh_mobile_ceu_dev_pm_ops,
+	},
+	.probe		= sh_mobile_ceu_probe,
+	.remove		= __devexit_p(sh_mobile_ceu_remove),
+};
+
+static int __init sh_mobile_ceu_init(void)
+{
+	/* Whatever return code */
+	request_module("sh_mobile_csi2");
+	return platform_driver_register(&sh_mobile_ceu_driver);
+}
+
+static void __exit sh_mobile_ceu_exit(void)
+{
+	platform_driver_unregister(&sh_mobile_ceu_driver);
+}
+
+module_init(sh_mobile_ceu_init);
+module_exit(sh_mobile_ceu_exit);
+
+MODULE_DESCRIPTION("SuperH Mobile CEU driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.6");
+MODULE_ALIAS("platform:sh_mobile_ceu");
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
new file mode 100644
index 0000000..9f62fd8
--- /dev/null
+++ b/drivers/media/platform/sh_vou.c
@@ -0,0 +1,1510 @@
+/*
+ * SuperH Video Output Unit (VOU) driver
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/module.h>
+
+#include <media/sh_vou.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf-dma-contig.h>
+
+/* Mirror addresses are not available for all registers */
+#define VOUER	0
+#define VOUCR	4
+#define VOUSTR	8
+#define VOUVCR	0xc
+#define VOUISR	0x10
+#define VOUBCR	0x14
+#define VOUDPR	0x18
+#define VOUDSR	0x1c
+#define VOUVPR	0x20
+#define VOUIR	0x24
+#define VOUSRR	0x28
+#define VOUMSR	0x2c
+#define VOUHIR	0x30
+#define VOUDFR	0x34
+#define VOUAD1R	0x38
+#define VOUAD2R	0x3c
+#define VOUAIR	0x40
+#define VOUSWR	0x44
+#define VOURCR	0x48
+#define VOURPR	0x50
+
+enum sh_vou_status {
+	SH_VOU_IDLE,
+	SH_VOU_INITIALISING,
+	SH_VOU_RUNNING,
+};
+
+#define VOU_MAX_IMAGE_WIDTH	720
+#define VOU_MAX_IMAGE_HEIGHT	576
+
+struct sh_vou_device {
+	struct v4l2_device v4l2_dev;
+	struct video_device *vdev;
+	atomic_t use_count;
+	struct sh_vou_pdata *pdata;
+	spinlock_t lock;
+	void __iomem *base;
+	/* State information */
+	struct v4l2_pix_format pix;
+	struct v4l2_rect rect;
+	struct list_head queue;
+	v4l2_std_id std;
+	int pix_idx;
+	struct videobuf_buffer *active;
+	enum sh_vou_status status;
+	struct mutex fop_lock;
+};
+
+struct sh_vou_file {
+	struct videobuf_queue vbq;
+};
+
+/* Register access routines for sides A, B and mirror addresses */
+static void sh_vou_reg_a_write(struct sh_vou_device *vou_dev, unsigned int reg,
+			       u32 value)
+{
+	__raw_writel(value, vou_dev->base + reg);
+}
+
+static void sh_vou_reg_ab_write(struct sh_vou_device *vou_dev, unsigned int reg,
+				u32 value)
+{
+	__raw_writel(value, vou_dev->base + reg);
+	__raw_writel(value, vou_dev->base + reg + 0x1000);
+}
+
+static void sh_vou_reg_m_write(struct sh_vou_device *vou_dev, unsigned int reg,
+			       u32 value)
+{
+	__raw_writel(value, vou_dev->base + reg + 0x2000);
+}
+
+static u32 sh_vou_reg_a_read(struct sh_vou_device *vou_dev, unsigned int reg)
+{
+	return __raw_readl(vou_dev->base + reg);
+}
+
+static void sh_vou_reg_a_set(struct sh_vou_device *vou_dev, unsigned int reg,
+			     u32 value, u32 mask)
+{
+	u32 old = __raw_readl(vou_dev->base + reg);
+
+	value = (value & mask) | (old & ~mask);
+	__raw_writel(value, vou_dev->base + reg);
+}
+
+static void sh_vou_reg_b_set(struct sh_vou_device *vou_dev, unsigned int reg,
+			     u32 value, u32 mask)
+{
+	sh_vou_reg_a_set(vou_dev, reg + 0x1000, value, mask);
+}
+
+static void sh_vou_reg_ab_set(struct sh_vou_device *vou_dev, unsigned int reg,
+			      u32 value, u32 mask)
+{
+	sh_vou_reg_a_set(vou_dev, reg, value, mask);
+	sh_vou_reg_b_set(vou_dev, reg, value, mask);
+}
+
+struct sh_vou_fmt {
+	u32		pfmt;
+	char		*desc;
+	unsigned char	bpp;
+	unsigned char	rgb;
+	unsigned char	yf;
+	unsigned char	pkf;
+};
+
+/* Further pixel formats can be added */
+static struct sh_vou_fmt vou_fmt[] = {
+	{
+		.pfmt	= V4L2_PIX_FMT_NV12,
+		.bpp	= 12,
+		.desc	= "YVU420 planar",
+		.yf	= 0,
+		.rgb	= 0,
+	},
+	{
+		.pfmt	= V4L2_PIX_FMT_NV16,
+		.bpp	= 16,
+		.desc	= "YVYU planar",
+		.yf	= 1,
+		.rgb	= 0,
+	},
+	{
+		.pfmt	= V4L2_PIX_FMT_RGB24,
+		.bpp	= 24,
+		.desc	= "RGB24",
+		.pkf	= 2,
+		.rgb	= 1,
+	},
+	{
+		.pfmt	= V4L2_PIX_FMT_RGB565,
+		.bpp	= 16,
+		.desc	= "RGB565",
+		.pkf	= 3,
+		.rgb	= 1,
+	},
+	{
+		.pfmt	= V4L2_PIX_FMT_RGB565X,
+		.bpp	= 16,
+		.desc	= "RGB565 byteswapped",
+		.pkf	= 3,
+		.rgb	= 1,
+	},
+};
+
+static void sh_vou_schedule_next(struct sh_vou_device *vou_dev,
+				 struct videobuf_buffer *vb)
+{
+	dma_addr_t addr1, addr2;
+
+	addr1 = videobuf_to_dma_contig(vb);
+	switch (vou_dev->pix.pixelformat) {
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV16:
+		addr2 = addr1 + vou_dev->pix.width * vou_dev->pix.height;
+		break;
+	default:
+		addr2 = 0;
+	}
+
+	sh_vou_reg_m_write(vou_dev, VOUAD1R, addr1);
+	sh_vou_reg_m_write(vou_dev, VOUAD2R, addr2);
+}
+
+static void sh_vou_stream_start(struct sh_vou_device *vou_dev,
+				struct videobuf_buffer *vb)
+{
+	unsigned int row_coeff;
+#ifdef __LITTLE_ENDIAN
+	u32 dataswap = 7;
+#else
+	u32 dataswap = 0;
+#endif
+
+	switch (vou_dev->pix.pixelformat) {
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV16:
+		row_coeff = 1;
+		break;
+	case V4L2_PIX_FMT_RGB565:
+		dataswap ^= 1;
+	case V4L2_PIX_FMT_RGB565X:
+		row_coeff = 2;
+		break;
+	case V4L2_PIX_FMT_RGB24:
+		row_coeff = 3;
+		break;
+	}
+
+	sh_vou_reg_a_write(vou_dev, VOUSWR, dataswap);
+	sh_vou_reg_ab_write(vou_dev, VOUAIR, vou_dev->pix.width * row_coeff);
+	sh_vou_schedule_next(vou_dev, vb);
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+	BUG_ON(in_interrupt());
+
+	/* Wait until this buffer is no longer in STATE_QUEUED or STATE_ACTIVE */
+	videobuf_waiton(vq, vb, 0, 0);
+	videobuf_dma_contig_free(vq, vb);
+	vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+/* Locking: caller holds fop_lock mutex */
+static int sh_vou_buf_setup(struct videobuf_queue *vq, unsigned int *count,
+			    unsigned int *size)
+{
+	struct video_device *vdev = vq->priv_data;
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+	*size = vou_fmt[vou_dev->pix_idx].bpp * vou_dev->pix.width *
+		vou_dev->pix.height / 8;
+
+	if (*count < 2)
+		*count = 2;
+
+	/* Taking into account maximum frame size, *count will stay >= 2 */
+	if (PAGE_ALIGN(*size) * *count > 4 * 1024 * 1024)
+		*count = 4 * 1024 * 1024 / PAGE_ALIGN(*size);
+
+	dev_dbg(vq->dev, "%s(): count=%d, size=%d\n", __func__, *count, *size);
+
+	return 0;
+}
+
+/* Locking: caller holds fop_lock mutex */
+static int sh_vou_buf_prepare(struct videobuf_queue *vq,
+			      struct videobuf_buffer *vb,
+			      enum v4l2_field field)
+{
+	struct video_device *vdev = vq->priv_data;
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+	struct v4l2_pix_format *pix = &vou_dev->pix;
+	int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
+	int ret;
+
+	dev_dbg(vq->dev, "%s()\n", __func__);
+
+	if (vb->width	!= pix->width ||
+	    vb->height	!= pix->height ||
+	    vb->field	!= pix->field) {
+		vb->width	= pix->width;
+		vb->height	= pix->height;
+		vb->field	= field;
+		if (vb->state != VIDEOBUF_NEEDS_INIT)
+			free_buffer(vq, vb);
+	}
+
+	vb->size = vb->height * bytes_per_line;
+	if (vb->baddr && vb->bsize < vb->size) {
+		/* User buffer too small */
+		dev_warn(vq->dev, "User buffer too small: [%u] @ %lx\n",
+			 vb->bsize, vb->baddr);
+		return -EINVAL;
+	}
+
+	if (vb->state == VIDEOBUF_NEEDS_INIT) {
+		ret = videobuf_iolock(vq, vb, NULL);
+		if (ret < 0) {
+			dev_warn(vq->dev, "IOLOCK buf-type %d: %d\n",
+				 vb->memory, ret);
+			return ret;
+		}
+		vb->state = VIDEOBUF_PREPARED;
+	}
+
+	dev_dbg(vq->dev,
+		"%s(): fmt #%d, %u bytes per line, phys 0x%x, type %d, state %d\n",
+		__func__, vou_dev->pix_idx, bytes_per_line,
+		videobuf_to_dma_contig(vb), vb->memory, vb->state);
+
+	return 0;
+}
+
+/* Locking: caller holds fop_lock mutex and vq->irqlock spinlock */
+static void sh_vou_buf_queue(struct videobuf_queue *vq,
+			     struct videobuf_buffer *vb)
+{
+	struct video_device *vdev = vq->priv_data;
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+	dev_dbg(vq->dev, "%s()\n", __func__);
+
+	vb->state = VIDEOBUF_QUEUED;
+	list_add_tail(&vb->queue, &vou_dev->queue);
+
+	if (vou_dev->status == SH_VOU_RUNNING) {
+		return;
+	} else if (!vou_dev->active) {
+		vou_dev->active = vb;
+		/* Start from side A: we use mirror addresses, so, set B */
+		sh_vou_reg_a_write(vou_dev, VOURPR, 1);
+		dev_dbg(vq->dev, "%s: first buffer status 0x%x\n", __func__,
+			sh_vou_reg_a_read(vou_dev, VOUSTR));
+		sh_vou_schedule_next(vou_dev, vb);
+		/* Only activate VOU after the second buffer */
+	} else if (vou_dev->active->queue.next == &vb->queue) {
+		/* Second buffer - initialise register side B */
+		sh_vou_reg_a_write(vou_dev, VOURPR, 0);
+		sh_vou_stream_start(vou_dev, vb);
+
+		/* Register side switching with frame VSYNC */
+		sh_vou_reg_a_write(vou_dev, VOURCR, 5);
+		dev_dbg(vq->dev, "%s: second buffer status 0x%x\n", __func__,
+			sh_vou_reg_a_read(vou_dev, VOUSTR));
+
+		/* Enable End-of-Frame (VSYNC) interrupts */
+		sh_vou_reg_a_write(vou_dev, VOUIR, 0x10004);
+		/* Two buffers on the queue - activate the hardware */
+
+		vou_dev->status = SH_VOU_RUNNING;
+		sh_vou_reg_a_write(vou_dev, VOUER, 0x107);
+	}
+}
+
+static void sh_vou_buf_release(struct videobuf_queue *vq,
+			       struct videobuf_buffer *vb)
+{
+	struct video_device *vdev = vq->priv_data;
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+	unsigned long flags;
+
+	dev_dbg(vq->dev, "%s()\n", __func__);
+
+	spin_lock_irqsave(&vou_dev->lock, flags);
+
+	if (vou_dev->active == vb) {
+		/* disable output */
+		sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
+		/* ...but the current frame will complete */
+		sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
+		vou_dev->active = NULL;
+	}
+
+	if ((vb->state == VIDEOBUF_ACTIVE || vb->state == VIDEOBUF_QUEUED)) {
+		vb->state = VIDEOBUF_ERROR;
+		list_del(&vb->queue);
+	}
+
+	spin_unlock_irqrestore(&vou_dev->lock, flags);
+
+	free_buffer(vq, vb);
+}
+
+static struct videobuf_queue_ops sh_vou_video_qops = {
+	.buf_setup	= sh_vou_buf_setup,
+	.buf_prepare	= sh_vou_buf_prepare,
+	.buf_queue	= sh_vou_buf_queue,
+	.buf_release	= sh_vou_buf_release,
+};
+
+/* Video IOCTLs */
+static int sh_vou_querycap(struct file *file, void  *priv,
+			   struct v4l2_capability *cap)
+{
+	struct sh_vou_file *vou_file = priv;
+
+	dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+	strlcpy(cap->card, "SuperH VOU", sizeof(cap->card));
+	cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+	return 0;
+}
+
+/* Enumerate formats, that the device can accept from the user */
+static int sh_vou_enum_fmt_vid_out(struct file *file, void  *priv,
+				   struct v4l2_fmtdesc *fmt)
+{
+	struct sh_vou_file *vou_file = priv;
+
+	if (fmt->index >= ARRAY_SIZE(vou_fmt))
+		return -EINVAL;
+
+	dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+	fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	strlcpy(fmt->description, vou_fmt[fmt->index].desc,
+		sizeof(fmt->description));
+	fmt->pixelformat = vou_fmt[fmt->index].pfmt;
+
+	return 0;
+}
+
+static int sh_vou_g_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *fmt)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+	fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	fmt->fmt.pix = vou_dev->pix;
+
+	return 0;
+}
+
+static const unsigned char vou_scale_h_num[] = {1, 9, 2, 9, 4};
+static const unsigned char vou_scale_h_den[] = {1, 8, 1, 4, 1};
+static const unsigned char vou_scale_h_fld[] = {0, 2, 1, 3};
+static const unsigned char vou_scale_v_num[] = {1, 2, 4};
+static const unsigned char vou_scale_v_den[] = {1, 1, 1};
+static const unsigned char vou_scale_v_fld[] = {0, 1};
+
+static void sh_vou_configure_geometry(struct sh_vou_device *vou_dev,
+				      int pix_idx, int w_idx, int h_idx)
+{
+	struct sh_vou_fmt *fmt = vou_fmt + pix_idx;
+	unsigned int black_left, black_top, width_max, height_max,
+		frame_in_height, frame_out_height, frame_out_top;
+	struct v4l2_rect *rect = &vou_dev->rect;
+	struct v4l2_pix_format *pix = &vou_dev->pix;
+	u32 vouvcr = 0, dsr_h, dsr_v;
+
+	if (vou_dev->std & V4L2_STD_525_60) {
+		width_max = 858;
+		height_max = 262;
+	} else {
+		width_max = 864;
+		height_max = 312;
+	}
+
+	frame_in_height = pix->height / 2;
+	frame_out_height = rect->height / 2;
+	frame_out_top = rect->top / 2;
+
+	/*
+	 * Cropping scheme: max useful image is 720x480, and the total video
+	 * area is 858x525 (NTSC) or 864x625 (PAL). AK8813 / 8814 starts
+	 * sampling data beginning with fixed 276th (NTSC) / 288th (PAL) clock,
+	 * of which the first 33 / 25 clocks HSYNC must be held active. This
+	 * has to be configured in CR[HW]. 1 pixel equals 2 clock periods.
+	 * This gives CR[HW] = 16 / 12, VPR[HVP] = 138 / 144, which gives
+	 * exactly 858 - 138 = 864 - 144 = 720! We call the out-of-display area,
+	 * beyond DSR, specified on the left and top by the VPR register "black
+	 * pixels" and out-of-image area (DPR) "background pixels." We fix VPR
+	 * at 138 / 144 : 20, because that's the HSYNC timing, that our first
+	 * client requires, and that's exactly what leaves us 720 pixels for the
+	 * image; we leave VPR[VVP] at default 20 for now, because the client
+	 * doesn't seem to have any special requirements for it. Otherwise we
+	 * could also set it to max - 240 = 22 / 72. Thus VPR depends only on
+	 * the selected standard, and DPR and DSR are selected according to
+	 * cropping. Q: how does the client detect the first valid line? Does
+	 * HSYNC stay inactive during invalid (black) lines?
+	 */
+	black_left = width_max - VOU_MAX_IMAGE_WIDTH;
+	black_top = 20;
+
+	dsr_h = rect->width + rect->left;
+	dsr_v = frame_out_height + frame_out_top;
+
+	dev_dbg(vou_dev->v4l2_dev.dev,
+		"image %ux%u, black %u:%u, offset %u:%u, display %ux%u\n",
+		pix->width, frame_in_height, black_left, black_top,
+		rect->left, frame_out_top, dsr_h, dsr_v);
+
+	/* VOUISR height - half of a frame height in frame mode */
+	sh_vou_reg_ab_write(vou_dev, VOUISR, (pix->width << 16) | frame_in_height);
+	sh_vou_reg_ab_write(vou_dev, VOUVPR, (black_left << 16) | black_top);
+	sh_vou_reg_ab_write(vou_dev, VOUDPR, (rect->left << 16) | frame_out_top);
+	sh_vou_reg_ab_write(vou_dev, VOUDSR, (dsr_h << 16) | dsr_v);
+
+	/*
+	 * if necessary, we could set VOUHIR to
+	 * max(black_left + dsr_h, width_max) here
+	 */
+
+	if (w_idx)
+		vouvcr |= (1 << 15) | (vou_scale_h_fld[w_idx - 1] << 4);
+	if (h_idx)
+		vouvcr |= (1 << 14) | vou_scale_v_fld[h_idx - 1];
+
+	dev_dbg(vou_dev->v4l2_dev.dev, "%s: scaling 0x%x\n", fmt->desc, vouvcr);
+
+	/* To produce a colour bar for testing set bit 23 of VOUVCR */
+	sh_vou_reg_ab_write(vou_dev, VOUVCR, vouvcr);
+	sh_vou_reg_ab_write(vou_dev, VOUDFR,
+			    fmt->pkf | (fmt->yf << 8) | (fmt->rgb << 16));
+}
+
+struct sh_vou_geometry {
+	struct v4l2_rect output;
+	unsigned int in_width;
+	unsigned int in_height;
+	int scale_idx_h;
+	int scale_idx_v;
+};
+
+/*
+ * Find input geometry, that we can use to produce output, closest to the
+ * requested rectangle, using VOU scaling
+ */
+static void vou_adjust_input(struct sh_vou_geometry *geo, v4l2_std_id std)
+{
+	/* The compiler cannot know, that best and idx will indeed be set */
+	unsigned int best_err = UINT_MAX, best = 0, img_height_max;
+	int i, idx = 0;
+
+	if (std & V4L2_STD_525_60)
+		img_height_max = 480;
+	else
+		img_height_max = 576;
+
+	/* Image width must be a multiple of 4 */
+	v4l_bound_align_image(&geo->in_width, 0, VOU_MAX_IMAGE_WIDTH, 2,
+			      &geo->in_height, 0, img_height_max, 1, 0);
+
+	/* Select scales to come as close as possible to the output image */
+	for (i = ARRAY_SIZE(vou_scale_h_num) - 1; i >= 0; i--) {
+		unsigned int err;
+		unsigned int found = geo->output.width * vou_scale_h_den[i] /
+			vou_scale_h_num[i];
+
+		if (found > VOU_MAX_IMAGE_WIDTH)
+			/* scales increase */
+			break;
+
+		err = abs(found - geo->in_width);
+		if (err < best_err) {
+			best_err = err;
+			idx = i;
+			best = found;
+		}
+		if (!err)
+			break;
+	}
+
+	geo->in_width = best;
+	geo->scale_idx_h = idx;
+
+	best_err = UINT_MAX;
+
+	/* This loop can be replaced with one division */
+	for (i = ARRAY_SIZE(vou_scale_v_num) - 1; i >= 0; i--) {
+		unsigned int err;
+		unsigned int found = geo->output.height * vou_scale_v_den[i] /
+			vou_scale_v_num[i];
+
+		if (found > img_height_max)
+			/* scales increase */
+			break;
+
+		err = abs(found - geo->in_height);
+		if (err < best_err) {
+			best_err = err;
+			idx = i;
+			best = found;
+		}
+		if (!err)
+			break;
+	}
+
+	geo->in_height = best;
+	geo->scale_idx_v = idx;
+}
+
+/*
+ * Find output geometry, that we can produce, using VOU scaling, closest to
+ * the requested rectangle
+ */
+static void vou_adjust_output(struct sh_vou_geometry *geo, v4l2_std_id std)
+{
+	unsigned int best_err = UINT_MAX, best, width_max, height_max,
+		img_height_max;
+	int i, idx;
+
+	if (std & V4L2_STD_525_60) {
+		width_max = 858;
+		height_max = 262 * 2;
+		img_height_max = 480;
+	} else {
+		width_max = 864;
+		height_max = 312 * 2;
+		img_height_max = 576;
+	}
+
+	/* Select scales to come as close as possible to the output image */
+	for (i = 0; i < ARRAY_SIZE(vou_scale_h_num); i++) {
+		unsigned int err;
+		unsigned int found = geo->in_width * vou_scale_h_num[i] /
+			vou_scale_h_den[i];
+
+		if (found > VOU_MAX_IMAGE_WIDTH)
+			/* scales increase */
+			break;
+
+		err = abs(found - geo->output.width);
+		if (err < best_err) {
+			best_err = err;
+			idx = i;
+			best = found;
+		}
+		if (!err)
+			break;
+	}
+
+	geo->output.width = best;
+	geo->scale_idx_h = idx;
+	if (geo->output.left + best > width_max)
+		geo->output.left = width_max - best;
+
+	pr_debug("%s(): W %u * %u/%u = %u\n", __func__, geo->in_width,
+		 vou_scale_h_num[idx], vou_scale_h_den[idx], best);
+
+	best_err = UINT_MAX;
+
+	/* This loop can be replaced with one division */
+	for (i = 0; i < ARRAY_SIZE(vou_scale_v_num); i++) {
+		unsigned int err;
+		unsigned int found = geo->in_height * vou_scale_v_num[i] /
+			vou_scale_v_den[i];
+
+		if (found > img_height_max)
+			/* scales increase */
+			break;
+
+		err = abs(found - geo->output.height);
+		if (err < best_err) {
+			best_err = err;
+			idx = i;
+			best = found;
+		}
+		if (!err)
+			break;
+	}
+
+	geo->output.height = best;
+	geo->scale_idx_v = idx;
+	if (geo->output.top + best > height_max)
+		geo->output.top = height_max - best;
+
+	pr_debug("%s(): H %u * %u/%u = %u\n", __func__, geo->in_height,
+		 vou_scale_v_num[idx], vou_scale_v_den[idx], best);
+}
+
+static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
+				struct v4l2_format *fmt)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+	struct v4l2_pix_format *pix = &fmt->fmt.pix;
+	unsigned int img_height_max;
+	int pix_idx;
+	struct sh_vou_geometry geo;
+	struct v4l2_mbus_framefmt mbfmt = {
+		/* Revisit: is this the correct code? */
+		.code = V4L2_MBUS_FMT_YUYV8_2X8,
+		.field = V4L2_FIELD_INTERLACED,
+		.colorspace = V4L2_COLORSPACE_SMPTE170M,
+	};
+	int ret;
+
+	dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__,
+		vou_dev->rect.width, vou_dev->rect.height,
+		pix->width, pix->height);
+
+	if (pix->field == V4L2_FIELD_ANY)
+		pix->field = V4L2_FIELD_NONE;
+
+	if (fmt->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+	    pix->field != V4L2_FIELD_NONE)
+		return -EINVAL;
+
+	for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++)
+		if (vou_fmt[pix_idx].pfmt == pix->pixelformat)
+			break;
+
+	if (pix_idx == ARRAY_SIZE(vou_fmt))
+		return -EINVAL;
+
+	if (vou_dev->std & V4L2_STD_525_60)
+		img_height_max = 480;
+	else
+		img_height_max = 576;
+
+	/* Image width must be a multiple of 4 */
+	v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 2,
+			      &pix->height, 0, img_height_max, 1, 0);
+
+	geo.in_width = pix->width;
+	geo.in_height = pix->height;
+	geo.output = vou_dev->rect;
+
+	vou_adjust_output(&geo, vou_dev->std);
+
+	mbfmt.width = geo.output.width;
+	mbfmt.height = geo.output.height;
+	ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+					 s_mbus_fmt, &mbfmt);
+	/* Must be implemented, so, don't check for -ENOIOCTLCMD */
+	if (ret < 0)
+		return ret;
+
+	dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__,
+		geo.output.width, geo.output.height, mbfmt.width, mbfmt.height);
+
+	/* Sanity checks */
+	if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH ||
+	    (unsigned)mbfmt.height > img_height_max ||
+	    mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8)
+		return -EIO;
+
+	if (mbfmt.width != geo.output.width ||
+	    mbfmt.height != geo.output.height) {
+		geo.output.width = mbfmt.width;
+		geo.output.height = mbfmt.height;
+
+		vou_adjust_input(&geo, vou_dev->std);
+	}
+
+	/* We tried to preserve output rectangle, but it could have changed */
+	vou_dev->rect = geo.output;
+	pix->width = geo.in_width;
+	pix->height = geo.in_height;
+
+	dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u\n", __func__,
+		pix->width, pix->height);
+
+	vou_dev->pix_idx = pix_idx;
+
+	vou_dev->pix = *pix;
+
+	sh_vou_configure_geometry(vou_dev, pix_idx,
+				  geo.scale_idx_h, geo.scale_idx_v);
+
+	return 0;
+}
+
+static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
+				  struct v4l2_format *fmt)
+{
+	struct sh_vou_file *vou_file = priv;
+	struct v4l2_pix_format *pix = &fmt->fmt.pix;
+	int i;
+
+	dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+	fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	pix->field = V4L2_FIELD_NONE;
+
+	v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
+			      &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
+
+	for (i = 0; ARRAY_SIZE(vou_fmt); i++)
+		if (vou_fmt[i].pfmt == pix->pixelformat)
+			return 0;
+
+	pix->pixelformat = vou_fmt[0].pfmt;
+
+	return 0;
+}
+
+static int sh_vou_reqbufs(struct file *file, void *priv,
+			  struct v4l2_requestbuffers *req)
+{
+	struct sh_vou_file *vou_file = priv;
+
+	dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+	if (req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+
+	return videobuf_reqbufs(&vou_file->vbq, req);
+}
+
+static int sh_vou_querybuf(struct file *file, void *priv,
+			   struct v4l2_buffer *b)
+{
+	struct sh_vou_file *vou_file = priv;
+
+	dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+	return videobuf_querybuf(&vou_file->vbq, b);
+}
+
+static int sh_vou_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+{
+	struct sh_vou_file *vou_file = priv;
+
+	dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+	return videobuf_qbuf(&vou_file->vbq, b);
+}
+
+static int sh_vou_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+{
+	struct sh_vou_file *vou_file = priv;
+
+	dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+	return videobuf_dqbuf(&vou_file->vbq, b, file->f_flags & O_NONBLOCK);
+}
+
+static int sh_vou_streamon(struct file *file, void *priv,
+			   enum v4l2_buf_type buftype)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+	struct sh_vou_file *vou_file = priv;
+	int ret;
+
+	dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+	ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
+					 video, s_stream, 1);
+	if (ret < 0 && ret != -ENOIOCTLCMD)
+		return ret;
+
+	/* This calls our .buf_queue() (== sh_vou_buf_queue) */
+	return videobuf_streamon(&vou_file->vbq);
+}
+
+static int sh_vou_streamoff(struct file *file, void *priv,
+			    enum v4l2_buf_type buftype)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+	struct sh_vou_file *vou_file = priv;
+
+	dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+	/*
+	 * This calls buf_release from host driver's videobuf_queue_ops for all
+	 * remaining buffers. When the last buffer is freed, stop streaming
+	 */
+	videobuf_streamoff(&vou_file->vbq);
+	v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_stream, 0);
+
+	return 0;
+}
+
+static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt)
+{
+	switch (bus_fmt) {
+	default:
+		pr_warning("%s(): Invalid bus-format code %d, using default 8-bit\n",
+			   __func__, bus_fmt);
+	case SH_VOU_BUS_8BIT:
+		return 1;
+	case SH_VOU_BUS_16BIT:
+		return 0;
+	case SH_VOU_BUS_BT656:
+		return 3;
+	}
+}
+
+static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+	int ret;
+
+	dev_dbg(vou_dev->v4l2_dev.dev, "%s(): 0x%llx\n", __func__, *std_id);
+
+	if (*std_id & ~vdev->tvnorms)
+		return -EINVAL;
+
+	ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+					 s_std_output, *std_id);
+	/* Shall we continue, if the subdev doesn't support .s_std_output()? */
+	if (ret < 0 && ret != -ENOIOCTLCMD)
+		return ret;
+
+	if (*std_id & V4L2_STD_525_60)
+		sh_vou_reg_ab_set(vou_dev, VOUCR,
+			sh_vou_ntsc_mode(vou_dev->pdata->bus_fmt) << 29, 7 << 29);
+	else
+		sh_vou_reg_ab_set(vou_dev, VOUCR, 5 << 29, 7 << 29);
+
+	vou_dev->std = *std_id;
+
+	return 0;
+}
+
+static int sh_vou_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+	*std = vou_dev->std;
+
+	return 0;
+}
+
+static int sh_vou_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+	a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	a->c = vou_dev->rect;
+
+	return 0;
+}
+
+/* Assume a dull encoder, do all the work ourselves. */
+static int sh_vou_s_crop(struct file *file, void *fh, struct v4l2_crop *a)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+	struct v4l2_rect *rect = &a->c;
+	struct v4l2_crop sd_crop = {.type = V4L2_BUF_TYPE_VIDEO_OUTPUT};
+	struct v4l2_pix_format *pix = &vou_dev->pix;
+	struct sh_vou_geometry geo;
+	struct v4l2_mbus_framefmt mbfmt = {
+		/* Revisit: is this the correct code? */
+		.code = V4L2_MBUS_FMT_YUYV8_2X8,
+		.field = V4L2_FIELD_INTERLACED,
+		.colorspace = V4L2_COLORSPACE_SMPTE170M,
+	};
+	unsigned int img_height_max;
+	int ret;
+
+	dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u@%u:%u\n", __func__,
+		rect->width, rect->height, rect->left, rect->top);
+
+	if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+
+	if (vou_dev->std & V4L2_STD_525_60)
+		img_height_max = 480;
+	else
+		img_height_max = 576;
+
+	v4l_bound_align_image(&rect->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
+			      &rect->height, 0, img_height_max, 1, 0);
+
+	if (rect->width + rect->left > VOU_MAX_IMAGE_WIDTH)
+		rect->left = VOU_MAX_IMAGE_WIDTH - rect->width;
+
+	if (rect->height + rect->top > img_height_max)
+		rect->top = img_height_max - rect->height;
+
+	geo.output = *rect;
+	geo.in_width = pix->width;
+	geo.in_height = pix->height;
+
+	/* Configure the encoder one-to-one, position at 0, ignore errors */
+	sd_crop.c.width = geo.output.width;
+	sd_crop.c.height = geo.output.height;
+	/*
+	 * We first issue a S_CROP, so that the subsequent S_FMT delivers the
+	 * final encoder configuration.
+	 */
+	v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+				   s_crop, &sd_crop);
+	mbfmt.width = geo.output.width;
+	mbfmt.height = geo.output.height;
+	ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+					 s_mbus_fmt, &mbfmt);
+	/* Must be implemented, so, don't check for -ENOIOCTLCMD */
+	if (ret < 0)
+		return ret;
+
+	/* Sanity checks */
+	if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH ||
+	    (unsigned)mbfmt.height > img_height_max ||
+	    mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8)
+		return -EIO;
+
+	geo.output.width = mbfmt.width;
+	geo.output.height = mbfmt.height;
+
+	/*
+	 * No down-scaling. According to the API, current call has precedence:
+	 * http://v4l2spec.bytesex.org/spec/x1904.htm#AEN1954 paragraph two.
+	 */
+	vou_adjust_input(&geo, vou_dev->std);
+
+	/* We tried to preserve output rectangle, but it could have changed */
+	vou_dev->rect = geo.output;
+	pix->width = geo.in_width;
+	pix->height = geo.in_height;
+
+	sh_vou_configure_geometry(vou_dev, vou_dev->pix_idx,
+				  geo.scale_idx_h, geo.scale_idx_v);
+
+	return 0;
+}
+
+/*
+ * Total field: NTSC 858 x 2 * 262/263, PAL 864 x 2 * 312/313, default rectangle
+ * is the initial register values, height takes the interlaced format into
+ * account. The actual image can only go up to 720 x 2 * 240, So, VOUVPR can
+ * actually only meaningfully contain values <= 720 and <= 240 respectively, and
+ * not <= 864 and <= 312.
+ */
+static int sh_vou_cropcap(struct file *file, void *priv,
+			  struct v4l2_cropcap *a)
+{
+	struct sh_vou_file *vou_file = priv;
+
+	dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+	a->type				= V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	a->bounds.left			= 0;
+	a->bounds.top			= 0;
+	a->bounds.width			= VOU_MAX_IMAGE_WIDTH;
+	a->bounds.height		= VOU_MAX_IMAGE_HEIGHT;
+	/* Default = max, set VOUDPR = 0, which is not hardware default */
+	a->defrect.left			= 0;
+	a->defrect.top			= 0;
+	a->defrect.width		= VOU_MAX_IMAGE_WIDTH;
+	a->defrect.height		= VOU_MAX_IMAGE_HEIGHT;
+	a->pixelaspect.numerator	= 1;
+	a->pixelaspect.denominator	= 1;
+
+	return 0;
+}
+
+static irqreturn_t sh_vou_isr(int irq, void *dev_id)
+{
+	struct sh_vou_device *vou_dev = dev_id;
+	static unsigned long j;
+	struct videobuf_buffer *vb;
+	static int cnt;
+	static int side;
+	u32 irq_status = sh_vou_reg_a_read(vou_dev, VOUIR), masked;
+	u32 vou_status = sh_vou_reg_a_read(vou_dev, VOUSTR);
+
+	if (!(irq_status & 0x300)) {
+		if (printk_timed_ratelimit(&j, 500))
+			dev_warn(vou_dev->v4l2_dev.dev, "IRQ status 0x%x!\n",
+				 irq_status);
+		return IRQ_NONE;
+	}
+
+	spin_lock(&vou_dev->lock);
+	if (!vou_dev->active || list_empty(&vou_dev->queue)) {
+		if (printk_timed_ratelimit(&j, 500))
+			dev_warn(vou_dev->v4l2_dev.dev,
+				 "IRQ without active buffer: %x!\n", irq_status);
+		/* Just ack: buf_release will disable further interrupts */
+		sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x300);
+		spin_unlock(&vou_dev->lock);
+		return IRQ_HANDLED;
+	}
+
+	masked = ~(0x300 & irq_status) & irq_status & 0x30304;
+	dev_dbg(vou_dev->v4l2_dev.dev,
+		"IRQ status 0x%x -> 0x%x, VOU status 0x%x, cnt %d\n",
+		irq_status, masked, vou_status, cnt);
+
+	cnt++;
+	side = vou_status & 0x10000;
+
+	/* Clear only set interrupts */
+	sh_vou_reg_a_write(vou_dev, VOUIR, masked);
+
+	vb = vou_dev->active;
+	list_del(&vb->queue);
+
+	vb->state = VIDEOBUF_DONE;
+	do_gettimeofday(&vb->ts);
+	vb->field_count++;
+	wake_up(&vb->done);
+
+	if (list_empty(&vou_dev->queue)) {
+		/* Stop VOU */
+		dev_dbg(vou_dev->v4l2_dev.dev, "%s: queue empty after %d\n",
+			__func__, cnt);
+		sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
+		vou_dev->active = NULL;
+		vou_dev->status = SH_VOU_INITIALISING;
+		/* Disable End-of-Frame (VSYNC) interrupts */
+		sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
+		spin_unlock(&vou_dev->lock);
+		return IRQ_HANDLED;
+	}
+
+	vou_dev->active = list_entry(vou_dev->queue.next,
+				     struct videobuf_buffer, queue);
+
+	if (vou_dev->active->queue.next != &vou_dev->queue) {
+		struct videobuf_buffer *new = list_entry(vou_dev->active->queue.next,
+						struct videobuf_buffer, queue);
+		sh_vou_schedule_next(vou_dev, new);
+	}
+
+	spin_unlock(&vou_dev->lock);
+
+	return IRQ_HANDLED;
+}
+
+static int sh_vou_hw_init(struct sh_vou_device *vou_dev)
+{
+	struct sh_vou_pdata *pdata = vou_dev->pdata;
+	u32 voucr = sh_vou_ntsc_mode(pdata->bus_fmt) << 29;
+	int i = 100;
+
+	/* Disable all IRQs */
+	sh_vou_reg_a_write(vou_dev, VOUIR, 0);
+
+	/* Reset VOU interfaces - registers unaffected */
+	sh_vou_reg_a_write(vou_dev, VOUSRR, 0x101);
+	while (--i && (sh_vou_reg_a_read(vou_dev, VOUSRR) & 0x101))
+		udelay(1);
+
+	if (!i)
+		return -ETIMEDOUT;
+
+	dev_dbg(vou_dev->v4l2_dev.dev, "Reset took %dus\n", 100 - i);
+
+	if (pdata->flags & SH_VOU_PCLK_FALLING)
+		voucr |= 1 << 28;
+	if (pdata->flags & SH_VOU_HSYNC_LOW)
+		voucr |= 1 << 27;
+	if (pdata->flags & SH_VOU_VSYNC_LOW)
+		voucr |= 1 << 26;
+	sh_vou_reg_ab_set(vou_dev, VOUCR, voucr, 0xfc000000);
+
+	/* Manual register side switching at first */
+	sh_vou_reg_a_write(vou_dev, VOURCR, 4);
+	/* Default - fixed HSYNC length, can be made configurable is required */
+	sh_vou_reg_ab_write(vou_dev, VOUMSR, 0x800000);
+
+	return 0;
+}
+
+/* File operations */
+static int sh_vou_open(struct file *file)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+	struct sh_vou_file *vou_file = kzalloc(sizeof(struct sh_vou_file),
+					       GFP_KERNEL);
+
+	if (!vou_file)
+		return -ENOMEM;
+
+	dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+	file->private_data = vou_file;
+
+	if (mutex_lock_interruptible(&vou_dev->fop_lock))
+		return -ERESTARTSYS;
+	if (atomic_inc_return(&vou_dev->use_count) == 1) {
+		int ret;
+		/* First open */
+		vou_dev->status = SH_VOU_INITIALISING;
+		pm_runtime_get_sync(vdev->v4l2_dev->dev);
+		ret = sh_vou_hw_init(vou_dev);
+		if (ret < 0) {
+			atomic_dec(&vou_dev->use_count);
+			pm_runtime_put(vdev->v4l2_dev->dev);
+			vou_dev->status = SH_VOU_IDLE;
+			mutex_unlock(&vou_dev->fop_lock);
+			return ret;
+		}
+	}
+
+	videobuf_queue_dma_contig_init(&vou_file->vbq, &sh_vou_video_qops,
+				       vou_dev->v4l2_dev.dev, &vou_dev->lock,
+				       V4L2_BUF_TYPE_VIDEO_OUTPUT,
+				       V4L2_FIELD_NONE,
+				       sizeof(struct videobuf_buffer), vdev,
+				       &vou_dev->fop_lock);
+	mutex_unlock(&vou_dev->fop_lock);
+
+	return 0;
+}
+
+static int sh_vou_release(struct file *file)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+	struct sh_vou_file *vou_file = file->private_data;
+
+	dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+	if (!atomic_dec_return(&vou_dev->use_count)) {
+		mutex_lock(&vou_dev->fop_lock);
+		/* Last close */
+		vou_dev->status = SH_VOU_IDLE;
+		sh_vou_reg_a_set(vou_dev, VOUER, 0, 0x101);
+		pm_runtime_put(vdev->v4l2_dev->dev);
+		mutex_unlock(&vou_dev->fop_lock);
+	}
+
+	file->private_data = NULL;
+	kfree(vou_file);
+
+	return 0;
+}
+
+static int sh_vou_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+	struct sh_vou_file *vou_file = file->private_data;
+	int ret;
+
+	dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+	if (mutex_lock_interruptible(&vou_dev->fop_lock))
+		return -ERESTARTSYS;
+	ret = videobuf_mmap_mapper(&vou_file->vbq, vma);
+	mutex_unlock(&vou_dev->fop_lock);
+	return ret;
+}
+
+static unsigned int sh_vou_poll(struct file *file, poll_table *wait)
+{
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+	struct sh_vou_file *vou_file = file->private_data;
+	unsigned int res;
+
+	dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+	mutex_lock(&vou_dev->fop_lock);
+	res = videobuf_poll_stream(file, &vou_file->vbq, wait);
+	mutex_unlock(&vou_dev->fop_lock);
+	return res;
+}
+
+static int sh_vou_g_chip_ident(struct file *file, void *fh,
+				   struct v4l2_dbg_chip_ident *id)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+	return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, g_chip_ident, id);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int sh_vou_g_register(struct file *file, void *fh,
+				 struct v4l2_dbg_register *reg)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+	return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, g_register, reg);
+}
+
+static int sh_vou_s_register(struct file *file, void *fh,
+				 struct v4l2_dbg_register *reg)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+	return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, s_register, reg);
+}
+#endif
+
+/* sh_vou display ioctl operations */
+static const struct v4l2_ioctl_ops sh_vou_ioctl_ops = {
+	.vidioc_querycap        	= sh_vou_querycap,
+	.vidioc_enum_fmt_vid_out	= sh_vou_enum_fmt_vid_out,
+	.vidioc_g_fmt_vid_out		= sh_vou_g_fmt_vid_out,
+	.vidioc_s_fmt_vid_out		= sh_vou_s_fmt_vid_out,
+	.vidioc_try_fmt_vid_out		= sh_vou_try_fmt_vid_out,
+	.vidioc_reqbufs			= sh_vou_reqbufs,
+	.vidioc_querybuf		= sh_vou_querybuf,
+	.vidioc_qbuf			= sh_vou_qbuf,
+	.vidioc_dqbuf			= sh_vou_dqbuf,
+	.vidioc_streamon		= sh_vou_streamon,
+	.vidioc_streamoff		= sh_vou_streamoff,
+	.vidioc_s_std			= sh_vou_s_std,
+	.vidioc_g_std			= sh_vou_g_std,
+	.vidioc_cropcap			= sh_vou_cropcap,
+	.vidioc_g_crop			= sh_vou_g_crop,
+	.vidioc_s_crop			= sh_vou_s_crop,
+	.vidioc_g_chip_ident		= sh_vou_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.vidioc_g_register		= sh_vou_g_register,
+	.vidioc_s_register		= sh_vou_s_register,
+#endif
+};
+
+static const struct v4l2_file_operations sh_vou_fops = {
+	.owner		= THIS_MODULE,
+	.open		= sh_vou_open,
+	.release	= sh_vou_release,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= sh_vou_mmap,
+	.poll		= sh_vou_poll,
+};
+
+static const struct video_device sh_vou_video_template = {
+	.name		= "sh_vou",
+	.fops		= &sh_vou_fops,
+	.ioctl_ops	= &sh_vou_ioctl_ops,
+	.tvnorms	= V4L2_STD_525_60, /* PAL only supported in 8-bit non-bt656 mode */
+	.current_norm	= V4L2_STD_NTSC_M,
+};
+
+static int __devinit sh_vou_probe(struct platform_device *pdev)
+{
+	struct sh_vou_pdata *vou_pdata = pdev->dev.platform_data;
+	struct v4l2_rect *rect;
+	struct v4l2_pix_format *pix;
+	struct i2c_adapter *i2c_adap;
+	struct video_device *vdev;
+	struct sh_vou_device *vou_dev;
+	struct resource *reg_res, *region;
+	struct v4l2_subdev *subdev;
+	int irq, ret;
+
+	reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+
+	if (!vou_pdata || !reg_res || irq <= 0) {
+		dev_err(&pdev->dev, "Insufficient VOU platform information.\n");
+		return -ENODEV;
+	}
+
+	vou_dev = kzalloc(sizeof(*vou_dev), GFP_KERNEL);
+	if (!vou_dev)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&vou_dev->queue);
+	spin_lock_init(&vou_dev->lock);
+	mutex_init(&vou_dev->fop_lock);
+	atomic_set(&vou_dev->use_count, 0);
+	vou_dev->pdata = vou_pdata;
+	vou_dev->status = SH_VOU_IDLE;
+
+	rect = &vou_dev->rect;
+	pix = &vou_dev->pix;
+
+	/* Fill in defaults */
+	vou_dev->std		= sh_vou_video_template.current_norm;
+	rect->left		= 0;
+	rect->top		= 0;
+	rect->width		= VOU_MAX_IMAGE_WIDTH;
+	rect->height		= 480;
+	pix->width		= VOU_MAX_IMAGE_WIDTH;
+	pix->height		= 480;
+	pix->pixelformat	= V4L2_PIX_FMT_YVYU;
+	pix->field		= V4L2_FIELD_NONE;
+	pix->bytesperline	= VOU_MAX_IMAGE_WIDTH * 2;
+	pix->sizeimage		= VOU_MAX_IMAGE_WIDTH * 2 * 480;
+	pix->colorspace		= V4L2_COLORSPACE_SMPTE170M;
+
+	region = request_mem_region(reg_res->start, resource_size(reg_res),
+				    pdev->name);
+	if (!region) {
+		dev_err(&pdev->dev, "VOU region already claimed\n");
+		ret = -EBUSY;
+		goto ereqmemreg;
+	}
+
+	vou_dev->base = ioremap(reg_res->start, resource_size(reg_res));
+	if (!vou_dev->base) {
+		ret = -ENOMEM;
+		goto emap;
+	}
+
+	ret = request_irq(irq, sh_vou_isr, 0, "vou", vou_dev);
+	if (ret < 0)
+		goto ereqirq;
+
+	ret = v4l2_device_register(&pdev->dev, &vou_dev->v4l2_dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Error registering v4l2 device\n");
+		goto ev4l2devreg;
+	}
+
+	/* Allocate memory for video device */
+	vdev = video_device_alloc();
+	if (vdev == NULL) {
+		ret = -ENOMEM;
+		goto evdevalloc;
+	}
+
+	*vdev = sh_vou_video_template;
+	if (vou_pdata->bus_fmt == SH_VOU_BUS_8BIT)
+		vdev->tvnorms |= V4L2_STD_PAL;
+	vdev->v4l2_dev = &vou_dev->v4l2_dev;
+	vdev->release = video_device_release;
+	vdev->lock = &vou_dev->fop_lock;
+
+	vou_dev->vdev = vdev;
+	video_set_drvdata(vdev, vou_dev);
+
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_resume(&pdev->dev);
+
+	i2c_adap = i2c_get_adapter(vou_pdata->i2c_adap);
+	if (!i2c_adap) {
+		ret = -ENODEV;
+		goto ei2cgadap;
+	}
+
+	ret = sh_vou_hw_init(vou_dev);
+	if (ret < 0)
+		goto ereset;
+
+	subdev = v4l2_i2c_new_subdev_board(&vou_dev->v4l2_dev, i2c_adap,
+			vou_pdata->board_info, NULL);
+	if (!subdev) {
+		ret = -ENOMEM;
+		goto ei2cnd;
+	}
+
+	ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+	if (ret < 0)
+		goto evregdev;
+
+	return 0;
+
+evregdev:
+ei2cnd:
+ereset:
+	i2c_put_adapter(i2c_adap);
+ei2cgadap:
+	video_device_release(vdev);
+	pm_runtime_disable(&pdev->dev);
+evdevalloc:
+	v4l2_device_unregister(&vou_dev->v4l2_dev);
+ev4l2devreg:
+	free_irq(irq, vou_dev);
+ereqirq:
+	iounmap(vou_dev->base);
+emap:
+	release_mem_region(reg_res->start, resource_size(reg_res));
+ereqmemreg:
+	kfree(vou_dev);
+	return ret;
+}
+
+static int __devexit sh_vou_remove(struct platform_device *pdev)
+{
+	int irq = platform_get_irq(pdev, 0);
+	struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+	struct sh_vou_device *vou_dev = container_of(v4l2_dev,
+						struct sh_vou_device, v4l2_dev);
+	struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next,
+					    struct v4l2_subdev, list);
+	struct i2c_client *client = v4l2_get_subdevdata(sd);
+	struct resource *reg_res;
+
+	if (irq > 0)
+		free_irq(irq, vou_dev);
+	pm_runtime_disable(&pdev->dev);
+	video_unregister_device(vou_dev->vdev);
+	i2c_put_adapter(client->adapter);
+	v4l2_device_unregister(&vou_dev->v4l2_dev);
+	iounmap(vou_dev->base);
+	reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (reg_res)
+		release_mem_region(reg_res->start, resource_size(reg_res));
+	kfree(vou_dev);
+	return 0;
+}
+
+static struct platform_driver __refdata sh_vou = {
+	.remove  = __devexit_p(sh_vou_remove),
+	.driver  = {
+		.name	= "sh-vou",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init sh_vou_init(void)
+{
+	return platform_driver_probe(&sh_vou, sh_vou_probe);
+}
+
+static void __exit sh_vou_exit(void)
+{
+	platform_driver_unregister(&sh_vou);
+}
+
+module_init(sh_vou_init);
+module_exit(sh_vou_exit);
+
+MODULE_DESCRIPTION("SuperH VOU driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1.0");
+MODULE_ALIAS("platform:sh-vou");
diff --git a/drivers/media/platform/soc_camera.c b/drivers/media/platform/soc_camera.c
new file mode 100644
index 0000000..9758217
--- /dev/null
+++ b/drivers/media/platform/soc_camera.c
@@ -0,0 +1,1554 @@
+/*
+ * camera image capture (abstract) bus driver
+ *
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * This driver provides an interface between platform-specific camera
+ * busses and camera devices. It should be used if the camera is
+ * connected not over a "proper" bus like PCI or USB, but over a
+ * special bus, like, for example, the Quick Capture interface on PXA270
+ * SoCs. Later it should also be used for i.MX31 SoCs from Freescale.
+ * It can handle multiple cameras and / or multiple busses, which can
+ * be used, e.g., in stereo-vision applications.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/vmalloc.h>
+
+#include <media/soc_camera.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf-core.h>
+#include <media/videobuf2-core.h>
+#include <media/soc_mediabus.h>
+
+/* Default to VGA resolution */
+#define DEFAULT_WIDTH	640
+#define DEFAULT_HEIGHT	480
+
+#define is_streaming(ici, icd)				\
+	(((ici)->ops->init_videobuf) ?			\
+	 (icd)->vb_vidq.streaming :			\
+	 vb2_is_streaming(&(icd)->vb2_vidq))
+
+static LIST_HEAD(hosts);
+static LIST_HEAD(devices);
+static DEFINE_MUTEX(list_lock);		/* Protects the list of hosts */
+
+static int soc_camera_power_on(struct soc_camera_device *icd,
+			       struct soc_camera_link *icl)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	int ret = regulator_bulk_enable(icl->num_regulators,
+					icl->regulators);
+	if (ret < 0) {
+		dev_err(icd->pdev, "Cannot enable regulators\n");
+		return ret;
+	}
+
+	if (icl->power) {
+		ret = icl->power(icd->control, 1);
+		if (ret < 0) {
+			dev_err(icd->pdev,
+				"Platform failed to power-on the camera.\n");
+			goto elinkpwr;
+		}
+	}
+
+	ret = v4l2_subdev_call(sd, core, s_power, 1);
+	if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+		goto esdpwr;
+
+	return 0;
+
+esdpwr:
+	if (icl->power)
+		icl->power(icd->control, 0);
+elinkpwr:
+	regulator_bulk_disable(icl->num_regulators,
+			       icl->regulators);
+	return ret;
+}
+
+static int soc_camera_power_off(struct soc_camera_device *icd,
+				struct soc_camera_link *icl)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	int ret = v4l2_subdev_call(sd, core, s_power, 0);
+
+	if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+		return ret;
+
+	if (icl->power) {
+		ret = icl->power(icd->control, 0);
+		if (ret < 0) {
+			dev_err(icd->pdev,
+				"Platform failed to power-off the camera.\n");
+			return ret;
+		}
+	}
+
+	ret = regulator_bulk_disable(icl->num_regulators,
+				     icl->regulators);
+	if (ret < 0)
+		dev_err(icd->pdev, "Cannot disable regulators\n");
+
+	return ret;
+}
+
+const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
+	struct soc_camera_device *icd, unsigned int fourcc)
+{
+	unsigned int i;
+
+	for (i = 0; i < icd->num_user_formats; i++)
+		if (icd->user_formats[i].host_fmt->fourcc == fourcc)
+			return icd->user_formats + i;
+	return NULL;
+}
+EXPORT_SYMBOL(soc_camera_xlate_by_fourcc);
+
+/**
+ * soc_camera_apply_board_flags() - apply platform SOCAM_SENSOR_INVERT_* flags
+ * @icl:	camera platform parameters
+ * @cfg:	media bus configuration
+ * @return:	resulting flags
+ */
+unsigned long soc_camera_apply_board_flags(struct soc_camera_link *icl,
+					   const struct v4l2_mbus_config *cfg)
+{
+	unsigned long f, flags = cfg->flags;
+
+	/* If only one of the two polarities is supported, switch to the opposite */
+	if (icl->flags & SOCAM_SENSOR_INVERT_HSYNC) {
+		f = flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW);
+		if (f == V4L2_MBUS_HSYNC_ACTIVE_HIGH || f == V4L2_MBUS_HSYNC_ACTIVE_LOW)
+			flags ^= V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW;
+	}
+
+	if (icl->flags & SOCAM_SENSOR_INVERT_VSYNC) {
+		f = flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW);
+		if (f == V4L2_MBUS_VSYNC_ACTIVE_HIGH || f == V4L2_MBUS_VSYNC_ACTIVE_LOW)
+			flags ^= V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW;
+	}
+
+	if (icl->flags & SOCAM_SENSOR_INVERT_PCLK) {
+		f = flags & (V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING);
+		if (f == V4L2_MBUS_PCLK_SAMPLE_RISING || f == V4L2_MBUS_PCLK_SAMPLE_FALLING)
+			flags ^= V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING;
+	}
+
+	return flags;
+}
+EXPORT_SYMBOL(soc_camera_apply_board_flags);
+
+#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \
+	((x) >> 24) & 0xff
+
+static int soc_camera_try_fmt(struct soc_camera_device *icd,
+			      struct v4l2_format *f)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	const struct soc_camera_format_xlate *xlate;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	int ret;
+
+	dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
+		pixfmtstr(pix->pixelformat), pix->width, pix->height);
+
+	if (!(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) {
+		pix->bytesperline = 0;
+		pix->sizeimage = 0;
+	}
+
+	ret = ici->ops->try_fmt(icd, f);
+	if (ret < 0)
+		return ret;
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+	if (!xlate)
+		return -EINVAL;
+
+	ret = soc_mbus_bytes_per_line(pix->width, xlate->host_fmt);
+	if (ret < 0)
+		return ret;
+
+	pix->bytesperline = max_t(u32, pix->bytesperline, ret);
+
+	ret = soc_mbus_image_size(xlate->host_fmt, pix->bytesperline,
+				  pix->height);
+	if (ret < 0)
+		return ret;
+
+	pix->sizeimage = max_t(u32, pix->sizeimage, ret);
+
+	return 0;
+}
+
+static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
+				      struct v4l2_format *f)
+{
+	struct soc_camera_device *icd = file->private_data;
+
+	WARN_ON(priv != file->private_data);
+
+	/* Only single-plane capture is supported so far */
+	if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	/* limit format to hardware capabilities */
+	return soc_camera_try_fmt(icd, f);
+}
+
+static int soc_camera_enum_input(struct file *file, void *priv,
+				 struct v4l2_input *inp)
+{
+	if (inp->index != 0)
+		return -EINVAL;
+
+	/* default is camera */
+	inp->type = V4L2_INPUT_TYPE_CAMERA;
+	inp->std  = V4L2_STD_UNKNOWN;
+	strcpy(inp->name, "Camera");
+
+	return 0;
+}
+
+static int soc_camera_g_input(struct file *file, void *priv, unsigned int *i)
+{
+	*i = 0;
+
+	return 0;
+}
+
+static int soc_camera_s_input(struct file *file, void *priv, unsigned int i)
+{
+	if (i > 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int soc_camera_s_std(struct file *file, void *priv, v4l2_std_id *a)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+	return v4l2_subdev_call(sd, core, s_std, *a);
+}
+
+static int soc_camera_g_std(struct file *file, void *priv, v4l2_std_id *a)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+	return v4l2_subdev_call(sd, core, g_std, a);
+}
+
+static int soc_camera_enum_framesizes(struct file *file, void *fh,
+					 struct v4l2_frmsizeenum *fsize)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+	return ici->ops->enum_framesizes(icd, fsize);
+}
+
+static int soc_camera_reqbufs(struct file *file, void *priv,
+			      struct v4l2_requestbuffers *p)
+{
+	int ret;
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+	WARN_ON(priv != file->private_data);
+
+	if (icd->streamer && icd->streamer != file)
+		return -EBUSY;
+
+	if (ici->ops->init_videobuf) {
+		ret = videobuf_reqbufs(&icd->vb_vidq, p);
+		if (ret < 0)
+			return ret;
+
+		ret = ici->ops->reqbufs(icd, p);
+	} else {
+		ret = vb2_reqbufs(&icd->vb2_vidq, p);
+	}
+
+	if (!ret && !icd->streamer)
+		icd->streamer = file;
+
+	return ret;
+}
+
+static int soc_camera_querybuf(struct file *file, void *priv,
+			       struct v4l2_buffer *p)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+	WARN_ON(priv != file->private_data);
+
+	if (ici->ops->init_videobuf)
+		return videobuf_querybuf(&icd->vb_vidq, p);
+	else
+		return vb2_querybuf(&icd->vb2_vidq, p);
+}
+
+static int soc_camera_qbuf(struct file *file, void *priv,
+			   struct v4l2_buffer *p)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+	WARN_ON(priv != file->private_data);
+
+	if (icd->streamer != file)
+		return -EBUSY;
+
+	if (ici->ops->init_videobuf)
+		return videobuf_qbuf(&icd->vb_vidq, p);
+	else
+		return vb2_qbuf(&icd->vb2_vidq, p);
+}
+
+static int soc_camera_dqbuf(struct file *file, void *priv,
+			    struct v4l2_buffer *p)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+	WARN_ON(priv != file->private_data);
+
+	if (icd->streamer != file)
+		return -EBUSY;
+
+	if (ici->ops->init_videobuf)
+		return videobuf_dqbuf(&icd->vb_vidq, p, file->f_flags & O_NONBLOCK);
+	else
+		return vb2_dqbuf(&icd->vb2_vidq, p, file->f_flags & O_NONBLOCK);
+}
+
+static int soc_camera_create_bufs(struct file *file, void *priv,
+			    struct v4l2_create_buffers *create)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+	/* videobuf2 only */
+	if (ici->ops->init_videobuf)
+		return -EINVAL;
+	else
+		return vb2_create_bufs(&icd->vb2_vidq, create);
+}
+
+static int soc_camera_prepare_buf(struct file *file, void *priv,
+				  struct v4l2_buffer *b)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+	/* videobuf2 only */
+	if (ici->ops->init_videobuf)
+		return -EINVAL;
+	else
+		return vb2_prepare_buf(&icd->vb2_vidq, b);
+}
+
+/* Always entered with .video_lock held */
+static int soc_camera_init_user_formats(struct soc_camera_device *icd)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	unsigned int i, fmts = 0, raw_fmts = 0;
+	int ret;
+	enum v4l2_mbus_pixelcode code;
+
+	while (!v4l2_subdev_call(sd, video, enum_mbus_fmt, raw_fmts, &code))
+		raw_fmts++;
+
+	if (!ici->ops->get_formats)
+		/*
+		 * Fallback mode - the host will have to serve all
+		 * sensor-provided formats one-to-one to the user
+		 */
+		fmts = raw_fmts;
+	else
+		/*
+		 * First pass - only count formats this host-sensor
+		 * configuration can provide
+		 */
+		for (i = 0; i < raw_fmts; i++) {
+			ret = ici->ops->get_formats(icd, i, NULL);
+			if (ret < 0)
+				return ret;
+			fmts += ret;
+		}
+
+	if (!fmts)
+		return -ENXIO;
+
+	icd->user_formats =
+		vmalloc(fmts * sizeof(struct soc_camera_format_xlate));
+	if (!icd->user_formats)
+		return -ENOMEM;
+
+	dev_dbg(icd->pdev, "Found %d supported formats.\n", fmts);
+
+	/* Second pass - actually fill data formats */
+	fmts = 0;
+	for (i = 0; i < raw_fmts; i++)
+		if (!ici->ops->get_formats) {
+			v4l2_subdev_call(sd, video, enum_mbus_fmt, i, &code);
+			icd->user_formats[fmts].host_fmt =
+				soc_mbus_get_fmtdesc(code);
+			if (icd->user_formats[fmts].host_fmt)
+				icd->user_formats[fmts++].code = code;
+		} else {
+			ret = ici->ops->get_formats(icd, i,
+						    &icd->user_formats[fmts]);
+			if (ret < 0)
+				goto egfmt;
+			fmts += ret;
+		}
+
+	icd->num_user_formats = fmts;
+	icd->current_fmt = &icd->user_formats[0];
+
+	return 0;
+
+egfmt:
+	vfree(icd->user_formats);
+	return ret;
+}
+
+/* Always entered with .video_lock held */
+static void soc_camera_free_user_formats(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+	if (ici->ops->put_formats)
+		ici->ops->put_formats(icd);
+	icd->current_fmt = NULL;
+	icd->num_user_formats = 0;
+	vfree(icd->user_formats);
+	icd->user_formats = NULL;
+}
+
+/* Called with .vb_lock held, or from the first open(2), see comment there */
+static int soc_camera_set_fmt(struct soc_camera_device *icd,
+			      struct v4l2_format *f)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	int ret;
+
+	dev_dbg(icd->pdev, "S_FMT(%c%c%c%c, %ux%u)\n",
+		pixfmtstr(pix->pixelformat), pix->width, pix->height);
+
+	/* We always call try_fmt() before set_fmt() or set_crop() */
+	ret = soc_camera_try_fmt(icd, f);
+	if (ret < 0)
+		return ret;
+
+	ret = ici->ops->set_fmt(icd, f);
+	if (ret < 0) {
+		return ret;
+	} else if (!icd->current_fmt ||
+		   icd->current_fmt->host_fmt->fourcc != pix->pixelformat) {
+		dev_err(icd->pdev,
+			"Host driver hasn't set up current format correctly!\n");
+		return -EINVAL;
+	}
+
+	icd->user_width		= pix->width;
+	icd->user_height	= pix->height;
+	icd->bytesperline	= pix->bytesperline;
+	icd->sizeimage		= pix->sizeimage;
+	icd->colorspace		= pix->colorspace;
+	icd->field		= pix->field;
+	if (ici->ops->init_videobuf)
+		icd->vb_vidq.field = pix->field;
+
+	dev_dbg(icd->pdev, "set width: %d height: %d\n",
+		icd->user_width, icd->user_height);
+
+	/* set physical bus parameters */
+	return ici->ops->set_bus_param(icd);
+}
+
+static int soc_camera_open(struct file *file)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct soc_camera_device *icd = dev_get_drvdata(vdev->parent);
+	struct soc_camera_link *icl = to_soc_camera_link(icd);
+	struct soc_camera_host *ici;
+	int ret;
+
+	if (!to_soc_camera_control(icd))
+		/* No device driver attached */
+		return -ENODEV;
+
+	ici = to_soc_camera_host(icd->parent);
+
+	if (mutex_lock_interruptible(&icd->video_lock))
+		return -ERESTARTSYS;
+	if (!try_module_get(ici->ops->owner)) {
+		dev_err(icd->pdev, "Couldn't lock capture bus driver.\n");
+		ret = -EINVAL;
+		goto emodule;
+	}
+
+	icd->use_count++;
+
+	/* Now we really have to activate the camera */
+	if (icd->use_count == 1) {
+		/* Restore parameters before the last close() per V4L2 API */
+		struct v4l2_format f = {
+			.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+			.fmt.pix = {
+				.width		= icd->user_width,
+				.height		= icd->user_height,
+				.field		= icd->field,
+				.colorspace	= icd->colorspace,
+				.pixelformat	=
+					icd->current_fmt->host_fmt->fourcc,
+			},
+		};
+
+		/* The camera could have been already on, try to reset */
+		if (icl->reset)
+			icl->reset(icd->pdev);
+
+		/* Don't mess with the host during probe */
+		mutex_lock(&ici->host_lock);
+		ret = ici->ops->add(icd);
+		mutex_unlock(&ici->host_lock);
+		if (ret < 0) {
+			dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret);
+			goto eiciadd;
+		}
+
+		ret = soc_camera_power_on(icd, icl);
+		if (ret < 0)
+			goto epower;
+
+		pm_runtime_enable(&icd->vdev->dev);
+		ret = pm_runtime_resume(&icd->vdev->dev);
+		if (ret < 0 && ret != -ENOSYS)
+			goto eresume;
+
+		/*
+		 * Try to configure with default parameters. Notice: this is the
+		 * very first open, so, we cannot race against other calls,
+		 * apart from someone else calling open() simultaneously, but
+		 * .video_lock is protecting us against it.
+		 */
+		ret = soc_camera_set_fmt(icd, &f);
+		if (ret < 0)
+			goto esfmt;
+
+		if (ici->ops->init_videobuf) {
+			ici->ops->init_videobuf(&icd->vb_vidq, icd);
+		} else {
+			ret = ici->ops->init_videobuf2(&icd->vb2_vidq, icd);
+			if (ret < 0)
+				goto einitvb;
+		}
+		v4l2_ctrl_handler_setup(&icd->ctrl_handler);
+	}
+	mutex_unlock(&icd->video_lock);
+
+	file->private_data = icd;
+	dev_dbg(icd->pdev, "camera device open\n");
+
+	return 0;
+
+	/*
+	 * First four errors are entered with the .video_lock held
+	 * and use_count == 1
+	 */
+einitvb:
+esfmt:
+	pm_runtime_disable(&icd->vdev->dev);
+eresume:
+	soc_camera_power_off(icd, icl);
+epower:
+	ici->ops->remove(icd);
+eiciadd:
+	icd->use_count--;
+	module_put(ici->ops->owner);
+emodule:
+	mutex_unlock(&icd->video_lock);
+
+	return ret;
+}
+
+static int soc_camera_close(struct file *file)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+	mutex_lock(&icd->video_lock);
+	icd->use_count--;
+	if (!icd->use_count) {
+		struct soc_camera_link *icl = to_soc_camera_link(icd);
+
+		pm_runtime_suspend(&icd->vdev->dev);
+		pm_runtime_disable(&icd->vdev->dev);
+
+		if (ici->ops->init_videobuf2)
+			vb2_queue_release(&icd->vb2_vidq);
+		ici->ops->remove(icd);
+
+		soc_camera_power_off(icd, icl);
+	}
+
+	if (icd->streamer == file)
+		icd->streamer = NULL;
+	mutex_unlock(&icd->video_lock);
+
+	module_put(ici->ops->owner);
+
+	dev_dbg(icd->pdev, "camera device close\n");
+
+	return 0;
+}
+
+static ssize_t soc_camera_read(struct file *file, char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	struct soc_camera_device *icd = file->private_data;
+	int err = -EINVAL;
+
+	dev_err(icd->pdev, "camera device read not implemented\n");
+
+	return err;
+}
+
+static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	int err;
+
+	dev_dbg(icd->pdev, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
+
+	if (icd->streamer != file)
+		return -EBUSY;
+
+	if (mutex_lock_interruptible(&icd->video_lock))
+		return -ERESTARTSYS;
+	if (ici->ops->init_videobuf)
+		err = videobuf_mmap_mapper(&icd->vb_vidq, vma);
+	else
+		err = vb2_mmap(&icd->vb2_vidq, vma);
+	mutex_unlock(&icd->video_lock);
+
+	dev_dbg(icd->pdev, "vma start=0x%08lx, size=%ld, ret=%d\n",
+		(unsigned long)vma->vm_start,
+		(unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
+		err);
+
+	return err;
+}
+
+static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	unsigned res = POLLERR;
+
+	if (icd->streamer != file)
+		return POLLERR;
+
+	mutex_lock(&icd->video_lock);
+	if (ici->ops->init_videobuf && list_empty(&icd->vb_vidq.stream))
+		dev_err(icd->pdev, "Trying to poll with no queued buffers!\n");
+	else
+		res = ici->ops->poll(file, pt);
+	mutex_unlock(&icd->video_lock);
+	return res;
+}
+
+void soc_camera_lock(struct vb2_queue *vq)
+{
+	struct soc_camera_device *icd = vb2_get_drv_priv(vq);
+	mutex_lock(&icd->video_lock);
+}
+EXPORT_SYMBOL(soc_camera_lock);
+
+void soc_camera_unlock(struct vb2_queue *vq)
+{
+	struct soc_camera_device *icd = vb2_get_drv_priv(vq);
+	mutex_unlock(&icd->video_lock);
+}
+EXPORT_SYMBOL(soc_camera_unlock);
+
+static struct v4l2_file_operations soc_camera_fops = {
+	.owner		= THIS_MODULE,
+	.open		= soc_camera_open,
+	.release	= soc_camera_close,
+	.unlocked_ioctl	= video_ioctl2,
+	.read		= soc_camera_read,
+	.mmap		= soc_camera_mmap,
+	.poll		= soc_camera_poll,
+};
+
+static int soc_camera_s_fmt_vid_cap(struct file *file, void *priv,
+				    struct v4l2_format *f)
+{
+	struct soc_camera_device *icd = file->private_data;
+	int ret;
+
+	WARN_ON(priv != file->private_data);
+
+	if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		dev_warn(icd->pdev, "Wrong buf-type %d\n", f->type);
+		return -EINVAL;
+	}
+
+	if (icd->streamer && icd->streamer != file)
+		return -EBUSY;
+
+	if (is_streaming(to_soc_camera_host(icd->parent), icd)) {
+		dev_err(icd->pdev, "S_FMT denied: queue initialised\n");
+		return -EBUSY;
+	}
+
+	ret = soc_camera_set_fmt(icd, f);
+
+	if (!ret && !icd->streamer)
+		icd->streamer = file;
+
+	return ret;
+}
+
+static int soc_camera_enum_fmt_vid_cap(struct file *file, void  *priv,
+				       struct v4l2_fmtdesc *f)
+{
+	struct soc_camera_device *icd = file->private_data;
+	const struct soc_mbus_pixelfmt *format;
+
+	WARN_ON(priv != file->private_data);
+
+	if (f->index >= icd->num_user_formats)
+		return -EINVAL;
+
+	format = icd->user_formats[f->index].host_fmt;
+
+	if (format->name)
+		strlcpy(f->description, format->name, sizeof(f->description));
+	f->pixelformat = format->fourcc;
+	return 0;
+}
+
+static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv,
+				    struct v4l2_format *f)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+
+	WARN_ON(priv != file->private_data);
+
+	if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	pix->width		= icd->user_width;
+	pix->height		= icd->user_height;
+	pix->bytesperline	= icd->bytesperline;
+	pix->sizeimage		= icd->sizeimage;
+	pix->field		= icd->field;
+	pix->pixelformat	= icd->current_fmt->host_fmt->fourcc;
+	pix->colorspace		= icd->colorspace;
+	dev_dbg(icd->pdev, "current_fmt->fourcc: 0x%08x\n",
+		icd->current_fmt->host_fmt->fourcc);
+	return 0;
+}
+
+static int soc_camera_querycap(struct file *file, void  *priv,
+			       struct v4l2_capability *cap)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+	WARN_ON(priv != file->private_data);
+
+	strlcpy(cap->driver, ici->drv_name, sizeof(cap->driver));
+	return ici->ops->querycap(ici, cap);
+}
+
+static int soc_camera_streamon(struct file *file, void *priv,
+			       enum v4l2_buf_type i)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	int ret;
+
+	WARN_ON(priv != file->private_data);
+
+	if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	if (icd->streamer != file)
+		return -EBUSY;
+
+	/* This calls buf_queue from host driver's videobuf_queue_ops */
+	if (ici->ops->init_videobuf)
+		ret = videobuf_streamon(&icd->vb_vidq);
+	else
+		ret = vb2_streamon(&icd->vb2_vidq, i);
+
+	if (!ret)
+		v4l2_subdev_call(sd, video, s_stream, 1);
+
+	return ret;
+}
+
+static int soc_camera_streamoff(struct file *file, void *priv,
+				enum v4l2_buf_type i)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+	WARN_ON(priv != file->private_data);
+
+	if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	if (icd->streamer != file)
+		return -EBUSY;
+
+	/*
+	 * This calls buf_release from host driver's videobuf_queue_ops for all
+	 * remaining buffers. When the last buffer is freed, stop capture
+	 */
+	if (ici->ops->init_videobuf)
+		videobuf_streamoff(&icd->vb_vidq);
+	else
+		vb2_streamoff(&icd->vb2_vidq, i);
+
+	v4l2_subdev_call(sd, video, s_stream, 0);
+
+	return 0;
+}
+
+static int soc_camera_cropcap(struct file *file, void *fh,
+			      struct v4l2_cropcap *a)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+	return ici->ops->cropcap(icd, a);
+}
+
+static int soc_camera_g_crop(struct file *file, void *fh,
+			     struct v4l2_crop *a)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	int ret;
+
+	ret = ici->ops->get_crop(icd, a);
+
+	return ret;
+}
+
+/*
+ * According to the V4L2 API, drivers shall not update the struct v4l2_crop
+ * argument with the actual geometry, instead, the user shall use G_CROP to
+ * retrieve it.
+ */
+static int soc_camera_s_crop(struct file *file, void *fh,
+			     struct v4l2_crop *a)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct v4l2_rect *rect = &a->c;
+	struct v4l2_crop current_crop;
+	int ret;
+
+	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	dev_dbg(icd->pdev, "S_CROP(%ux%u@%u:%u)\n",
+		rect->width, rect->height, rect->left, rect->top);
+
+	/* If get_crop fails, we'll let host and / or client drivers decide */
+	ret = ici->ops->get_crop(icd, &current_crop);
+
+	/* Prohibit window size change with initialised buffers */
+	if (ret < 0) {
+		dev_err(icd->pdev,
+			"S_CROP denied: getting current crop failed\n");
+	} else if ((a->c.width == current_crop.c.width &&
+		    a->c.height == current_crop.c.height) ||
+		   !is_streaming(ici, icd)) {
+		/* same size or not streaming - use .set_crop() */
+		ret = ici->ops->set_crop(icd, a);
+	} else if (ici->ops->set_livecrop) {
+		ret = ici->ops->set_livecrop(icd, a);
+	} else {
+		dev_err(icd->pdev,
+			"S_CROP denied: queue initialised and sizes differ\n");
+		ret = -EBUSY;
+	}
+
+	return ret;
+}
+
+static int soc_camera_g_parm(struct file *file, void *fh,
+			     struct v4l2_streamparm *a)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+	if (ici->ops->get_parm)
+		return ici->ops->get_parm(icd, a);
+
+	return -ENOIOCTLCMD;
+}
+
+static int soc_camera_s_parm(struct file *file, void *fh,
+			     struct v4l2_streamparm *a)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+	if (ici->ops->set_parm)
+		return ici->ops->set_parm(icd, a);
+
+	return -ENOIOCTLCMD;
+}
+
+static int soc_camera_g_chip_ident(struct file *file, void *fh,
+				   struct v4l2_dbg_chip_ident *id)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+	return v4l2_subdev_call(sd, core, g_chip_ident, id);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int soc_camera_g_register(struct file *file, void *fh,
+				 struct v4l2_dbg_register *reg)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+	return v4l2_subdev_call(sd, core, g_register, reg);
+}
+
+static int soc_camera_s_register(struct file *file, void *fh,
+				 struct v4l2_dbg_register *reg)
+{
+	struct soc_camera_device *icd = file->private_data;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+	return v4l2_subdev_call(sd, core, s_register, reg);
+}
+#endif
+
+static int soc_camera_probe(struct soc_camera_device *icd);
+
+/* So far this function cannot fail */
+static void scan_add_host(struct soc_camera_host *ici)
+{
+	struct soc_camera_device *icd;
+
+	mutex_lock(&ici->host_lock);
+
+	list_for_each_entry(icd, &devices, list) {
+		if (icd->iface == ici->nr) {
+			int ret;
+
+			icd->parent = ici->v4l2_dev.dev;
+			ret = soc_camera_probe(icd);
+		}
+	}
+
+	mutex_unlock(&ici->host_lock);
+}
+
+#ifdef CONFIG_I2C_BOARDINFO
+static int soc_camera_init_i2c(struct soc_camera_device *icd,
+			       struct soc_camera_link *icl)
+{
+	struct i2c_client *client;
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct i2c_adapter *adap = i2c_get_adapter(icl->i2c_adapter_id);
+	struct v4l2_subdev *subdev;
+
+	if (!adap) {
+		dev_err(icd->pdev, "Cannot get I2C adapter #%d. No driver?\n",
+			icl->i2c_adapter_id);
+		goto ei2cga;
+	}
+
+	icl->board_info->platform_data = icl;
+
+	subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap,
+				icl->board_info, NULL);
+	if (!subdev)
+		goto ei2cnd;
+
+	client = v4l2_get_subdevdata(subdev);
+
+	/* Use to_i2c_client(dev) to recover the i2c client */
+	icd->control = &client->dev;
+
+	return 0;
+ei2cnd:
+	i2c_put_adapter(adap);
+ei2cga:
+	return -ENODEV;
+}
+
+static void soc_camera_free_i2c(struct soc_camera_device *icd)
+{
+	struct i2c_client *client =
+		to_i2c_client(to_soc_camera_control(icd));
+	struct i2c_adapter *adap = client->adapter;
+
+	icd->control = NULL;
+	v4l2_device_unregister_subdev(i2c_get_clientdata(client));
+	i2c_unregister_device(client);
+	i2c_put_adapter(adap);
+}
+#else
+#define soc_camera_init_i2c(icd, icl)	(-ENODEV)
+#define soc_camera_free_i2c(icd)	do {} while (0)
+#endif
+
+static int soc_camera_video_start(struct soc_camera_device *icd);
+static int video_dev_create(struct soc_camera_device *icd);
+/* Called during host-driver probe */
+static int soc_camera_probe(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct soc_camera_link *icl = to_soc_camera_link(icd);
+	struct device *control = NULL;
+	struct v4l2_subdev *sd;
+	struct v4l2_mbus_framefmt mf;
+	int ret;
+
+	dev_info(icd->pdev, "Probing %s\n", dev_name(icd->pdev));
+
+	/*
+	 * Currently the subdev with the largest number of controls (13) is
+	 * ov6550. So let's pick 16 as a hint for the control handler. Note
+	 * that this is a hint only: too large and you waste some memory, too
+	 * small and there is a (very) small performance hit when looking up
+	 * controls in the internal hash.
+	 */
+	ret = v4l2_ctrl_handler_init(&icd->ctrl_handler, 16);
+	if (ret < 0)
+		return ret;
+
+	ret = regulator_bulk_get(icd->pdev, icl->num_regulators,
+				 icl->regulators);
+	if (ret < 0)
+		goto ereg;
+
+	/* The camera could have been already on, try to reset */
+	if (icl->reset)
+		icl->reset(icd->pdev);
+
+	ret = ici->ops->add(icd);
+	if (ret < 0)
+		goto eadd;
+
+	/*
+	 * This will not yet call v4l2_subdev_core_ops::s_power(1), because the
+	 * subdevice has not been initialised yet. We'll have to call it once
+	 * again after initialisation, even though it shouldn't be needed, we
+	 * don't do any IO here.
+	 */
+	ret = soc_camera_power_on(icd, icl);
+	if (ret < 0)
+		goto epower;
+
+	/* Must have icd->vdev before registering the device */
+	ret = video_dev_create(icd);
+	if (ret < 0)
+		goto evdc;
+
+	/* Non-i2c cameras, e.g., soc_camera_platform, have no board_info */
+	if (icl->board_info) {
+		ret = soc_camera_init_i2c(icd, icl);
+		if (ret < 0)
+			goto eadddev;
+	} else if (!icl->add_device || !icl->del_device) {
+		ret = -EINVAL;
+		goto eadddev;
+	} else {
+		if (icl->module_name)
+			ret = request_module(icl->module_name);
+
+		ret = icl->add_device(icd);
+		if (ret < 0)
+			goto eadddev;
+
+		/*
+		 * FIXME: this is racy, have to use driver-binding notification,
+		 * when it is available
+		 */
+		control = to_soc_camera_control(icd);
+		if (!control || !control->driver || !dev_get_drvdata(control) ||
+		    !try_module_get(control->driver->owner)) {
+			icl->del_device(icd);
+			ret = -ENODEV;
+			goto enodrv;
+		}
+	}
+
+	sd = soc_camera_to_subdev(icd);
+	sd->grp_id = soc_camera_grp_id(icd);
+	v4l2_set_subdev_hostdata(sd, icd);
+
+	if (v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler))
+		goto ectrl;
+
+	/* At this point client .probe() should have run already */
+	ret = soc_camera_init_user_formats(icd);
+	if (ret < 0)
+		goto eiufmt;
+
+	icd->field = V4L2_FIELD_ANY;
+
+	/*
+	 * ..._video_start() will create a device node, video_register_device()
+	 * itself is protected against concurrent open() calls, but we also have
+	 * to protect our data.
+	 */
+	mutex_lock(&icd->video_lock);
+
+	ret = soc_camera_video_start(icd);
+	if (ret < 0)
+		goto evidstart;
+
+	ret = v4l2_subdev_call(sd, core, s_power, 1);
+	if (ret < 0 && ret != -ENOIOCTLCMD)
+		goto esdpwr;
+
+	/* Try to improve our guess of a reasonable window format */
+	if (!v4l2_subdev_call(sd, video, g_mbus_fmt, &mf)) {
+		icd->user_width		= mf.width;
+		icd->user_height	= mf.height;
+		icd->colorspace		= mf.colorspace;
+		icd->field		= mf.field;
+	}
+
+	ici->ops->remove(icd);
+
+	soc_camera_power_off(icd, icl);
+
+	mutex_unlock(&icd->video_lock);
+
+	return 0;
+
+esdpwr:
+	video_unregister_device(icd->vdev);
+evidstart:
+	mutex_unlock(&icd->video_lock);
+	soc_camera_free_user_formats(icd);
+eiufmt:
+ectrl:
+	if (icl->board_info) {
+		soc_camera_free_i2c(icd);
+	} else {
+		icl->del_device(icd);
+		module_put(control->driver->owner);
+	}
+enodrv:
+eadddev:
+	video_device_release(icd->vdev);
+	icd->vdev = NULL;
+evdc:
+	soc_camera_power_off(icd, icl);
+epower:
+	ici->ops->remove(icd);
+eadd:
+	regulator_bulk_free(icl->num_regulators, icl->regulators);
+ereg:
+	v4l2_ctrl_handler_free(&icd->ctrl_handler);
+	return ret;
+}
+
+/*
+ * This is called on device_unregister, which only means we have to disconnect
+ * from the host, but not remove ourselves from the device list
+ */
+static int soc_camera_remove(struct soc_camera_device *icd)
+{
+	struct soc_camera_link *icl = to_soc_camera_link(icd);
+	struct video_device *vdev = icd->vdev;
+
+	BUG_ON(!icd->parent);
+
+	v4l2_ctrl_handler_free(&icd->ctrl_handler);
+	if (vdev) {
+		video_unregister_device(vdev);
+		icd->vdev = NULL;
+	}
+
+	if (icl->board_info) {
+		soc_camera_free_i2c(icd);
+	} else {
+		struct device_driver *drv = to_soc_camera_control(icd)->driver;
+		if (drv) {
+			icl->del_device(icd);
+			module_put(drv->owner);
+		}
+	}
+	soc_camera_free_user_formats(icd);
+
+	regulator_bulk_free(icl->num_regulators, icl->regulators);
+
+	return 0;
+}
+
+static int default_cropcap(struct soc_camera_device *icd,
+			   struct v4l2_cropcap *a)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	return v4l2_subdev_call(sd, video, cropcap, a);
+}
+
+static int default_g_crop(struct soc_camera_device *icd, struct v4l2_crop *a)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	return v4l2_subdev_call(sd, video, g_crop, a);
+}
+
+static int default_s_crop(struct soc_camera_device *icd, struct v4l2_crop *a)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	return v4l2_subdev_call(sd, video, s_crop, a);
+}
+
+static int default_g_parm(struct soc_camera_device *icd,
+			  struct v4l2_streamparm *parm)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	return v4l2_subdev_call(sd, video, g_parm, parm);
+}
+
+static int default_s_parm(struct soc_camera_device *icd,
+			  struct v4l2_streamparm *parm)
+{
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	return v4l2_subdev_call(sd, video, s_parm, parm);
+}
+
+static int default_enum_framesizes(struct soc_camera_device *icd,
+				   struct v4l2_frmsizeenum *fsize)
+{
+	int ret;
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+	const struct soc_camera_format_xlate *xlate;
+	__u32 pixfmt = fsize->pixel_format;
+	struct v4l2_frmsizeenum fsize_mbus = *fsize;
+
+	xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+	if (!xlate)
+		return -EINVAL;
+	/* map xlate-code to pixel_format, sensor only handle xlate-code*/
+	fsize_mbus.pixel_format = xlate->code;
+
+	ret = v4l2_subdev_call(sd, video, enum_framesizes, &fsize_mbus);
+	if (ret < 0)
+		return ret;
+
+	*fsize = fsize_mbus;
+	fsize->pixel_format = pixfmt;
+
+	return 0;
+}
+
+int soc_camera_host_register(struct soc_camera_host *ici)
+{
+	struct soc_camera_host *ix;
+	int ret;
+
+	if (!ici || !ici->ops ||
+	    !ici->ops->try_fmt ||
+	    !ici->ops->set_fmt ||
+	    !ici->ops->set_bus_param ||
+	    !ici->ops->querycap ||
+	    ((!ici->ops->init_videobuf ||
+	      !ici->ops->reqbufs) &&
+	     !ici->ops->init_videobuf2) ||
+	    !ici->ops->add ||
+	    !ici->ops->remove ||
+	    !ici->ops->poll ||
+	    !ici->v4l2_dev.dev)
+		return -EINVAL;
+
+	if (!ici->ops->set_crop)
+		ici->ops->set_crop = default_s_crop;
+	if (!ici->ops->get_crop)
+		ici->ops->get_crop = default_g_crop;
+	if (!ici->ops->cropcap)
+		ici->ops->cropcap = default_cropcap;
+	if (!ici->ops->set_parm)
+		ici->ops->set_parm = default_s_parm;
+	if (!ici->ops->get_parm)
+		ici->ops->get_parm = default_g_parm;
+	if (!ici->ops->enum_framesizes)
+		ici->ops->enum_framesizes = default_enum_framesizes;
+
+	mutex_lock(&list_lock);
+	list_for_each_entry(ix, &hosts, list) {
+		if (ix->nr == ici->nr) {
+			ret = -EBUSY;
+			goto edevreg;
+		}
+	}
+
+	ret = v4l2_device_register(ici->v4l2_dev.dev, &ici->v4l2_dev);
+	if (ret < 0)
+		goto edevreg;
+
+	list_add_tail(&ici->list, &hosts);
+	mutex_unlock(&list_lock);
+
+	mutex_init(&ici->host_lock);
+	scan_add_host(ici);
+
+	return 0;
+
+edevreg:
+	mutex_unlock(&list_lock);
+	return ret;
+}
+EXPORT_SYMBOL(soc_camera_host_register);
+
+/* Unregister all clients! */
+void soc_camera_host_unregister(struct soc_camera_host *ici)
+{
+	struct soc_camera_device *icd;
+
+	mutex_lock(&list_lock);
+
+	list_del(&ici->list);
+	list_for_each_entry(icd, &devices, list)
+		if (icd->iface == ici->nr && to_soc_camera_control(icd))
+			soc_camera_remove(icd);
+
+	mutex_unlock(&list_lock);
+
+	v4l2_device_unregister(&ici->v4l2_dev);
+}
+EXPORT_SYMBOL(soc_camera_host_unregister);
+
+/* Image capture device */
+static int soc_camera_device_register(struct soc_camera_device *icd)
+{
+	struct soc_camera_device *ix;
+	int num = -1, i;
+
+	for (i = 0; i < 256 && num < 0; i++) {
+		num = i;
+		/* Check if this index is available on this interface */
+		list_for_each_entry(ix, &devices, list) {
+			if (ix->iface == icd->iface && ix->devnum == i) {
+				num = -1;
+				break;
+			}
+		}
+	}
+
+	if (num < 0)
+		/*
+		 * ok, we have 256 cameras on this host...
+		 * man, stay reasonable...
+		 */
+		return -ENOMEM;
+
+	icd->devnum		= num;
+	icd->use_count		= 0;
+	icd->host_priv		= NULL;
+	mutex_init(&icd->video_lock);
+
+	list_add_tail(&icd->list, &devices);
+
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
+	.vidioc_querycap	 = soc_camera_querycap,
+	.vidioc_try_fmt_vid_cap  = soc_camera_try_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap    = soc_camera_g_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap    = soc_camera_s_fmt_vid_cap,
+	.vidioc_enum_fmt_vid_cap = soc_camera_enum_fmt_vid_cap,
+	.vidioc_enum_input	 = soc_camera_enum_input,
+	.vidioc_g_input		 = soc_camera_g_input,
+	.vidioc_s_input		 = soc_camera_s_input,
+	.vidioc_s_std		 = soc_camera_s_std,
+	.vidioc_g_std		 = soc_camera_g_std,
+	.vidioc_enum_framesizes  = soc_camera_enum_framesizes,
+	.vidioc_reqbufs		 = soc_camera_reqbufs,
+	.vidioc_querybuf	 = soc_camera_querybuf,
+	.vidioc_qbuf		 = soc_camera_qbuf,
+	.vidioc_dqbuf		 = soc_camera_dqbuf,
+	.vidioc_create_bufs	 = soc_camera_create_bufs,
+	.vidioc_prepare_buf	 = soc_camera_prepare_buf,
+	.vidioc_streamon	 = soc_camera_streamon,
+	.vidioc_streamoff	 = soc_camera_streamoff,
+	.vidioc_cropcap		 = soc_camera_cropcap,
+	.vidioc_g_crop		 = soc_camera_g_crop,
+	.vidioc_s_crop		 = soc_camera_s_crop,
+	.vidioc_g_parm		 = soc_camera_g_parm,
+	.vidioc_s_parm		 = soc_camera_s_parm,
+	.vidioc_g_chip_ident     = soc_camera_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.vidioc_g_register	 = soc_camera_g_register,
+	.vidioc_s_register	 = soc_camera_s_register,
+#endif
+};
+
+static int video_dev_create(struct soc_camera_device *icd)
+{
+	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+	struct video_device *vdev = video_device_alloc();
+
+	if (!vdev)
+		return -ENOMEM;
+
+	strlcpy(vdev->name, ici->drv_name, sizeof(vdev->name));
+
+	vdev->parent		= icd->pdev;
+	vdev->current_norm	= V4L2_STD_UNKNOWN;
+	vdev->fops		= &soc_camera_fops;
+	vdev->ioctl_ops		= &soc_camera_ioctl_ops;
+	vdev->release		= video_device_release;
+	vdev->tvnorms		= V4L2_STD_UNKNOWN;
+	vdev->ctrl_handler	= &icd->ctrl_handler;
+	vdev->lock		= &icd->video_lock;
+
+	icd->vdev = vdev;
+
+	return 0;
+}
+
+/*
+ * Called from soc_camera_probe() above (with .video_lock held???)
+ */
+static int soc_camera_video_start(struct soc_camera_device *icd)
+{
+	const struct device_type *type = icd->vdev->dev.type;
+	int ret;
+
+	if (!icd->parent)
+		return -ENODEV;
+
+	ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, -1);
+	if (ret < 0) {
+		dev_err(icd->pdev, "video_register_device failed: %d\n", ret);
+		return ret;
+	}
+
+	/* Restore device type, possibly set by the subdevice driver */
+	icd->vdev->dev.type = type;
+
+	return 0;
+}
+
+static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
+{
+	struct soc_camera_link *icl = pdev->dev.platform_data;
+	struct soc_camera_device *icd;
+	int ret;
+
+	if (!icl)
+		return -EINVAL;
+
+	icd = kzalloc(sizeof(*icd), GFP_KERNEL);
+	if (!icd)
+		return -ENOMEM;
+
+	icd->iface = icl->bus_id;
+	icd->link = icl;
+	icd->pdev = &pdev->dev;
+	platform_set_drvdata(pdev, icd);
+
+	ret = soc_camera_device_register(icd);
+	if (ret < 0)
+		goto escdevreg;
+
+	icd->user_width		= DEFAULT_WIDTH;
+	icd->user_height	= DEFAULT_HEIGHT;
+
+	return 0;
+
+escdevreg:
+	kfree(icd);
+
+	return ret;
+}
+
+/*
+ * Only called on rmmod for each platform device, since they are not
+ * hot-pluggable. Now we know, that all our users - hosts and devices have
+ * been unloaded already
+ */
+static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
+{
+	struct soc_camera_device *icd = platform_get_drvdata(pdev);
+
+	if (!icd)
+		return -EINVAL;
+
+	list_del(&icd->list);
+
+	kfree(icd);
+
+	return 0;
+}
+
+static struct platform_driver __refdata soc_camera_pdrv = {
+	.probe = soc_camera_pdrv_probe,
+	.remove  = __devexit_p(soc_camera_pdrv_remove),
+	.driver  = {
+		.name	= "soc-camera-pdrv",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init soc_camera_init(void)
+{
+	return platform_driver_register(&soc_camera_pdrv);
+}
+
+static void __exit soc_camera_exit(void)
+{
+	platform_driver_unregister(&soc_camera_pdrv);
+}
+
+module_init(soc_camera_init);
+module_exit(soc_camera_exit);
+
+MODULE_DESCRIPTION("Image capture bus driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:soc-camera-pdrv");
diff --git a/drivers/media/platform/soc_camera_platform.c b/drivers/media/platform/soc_camera_platform.c
new file mode 100644
index 0000000..f59ccad
--- /dev/null
+++ b/drivers/media/platform/soc_camera_platform.c
@@ -0,0 +1,197 @@
+/*
+ * Generic Platform Camera Driver
+ *
+ * Copyright (C) 2008 Magnus Damm
+ * Based on mt9m001 driver,
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-subdev.h>
+#include <media/soc_camera.h>
+#include <media/soc_camera_platform.h>
+
+struct soc_camera_platform_priv {
+	struct v4l2_subdev subdev;
+};
+
+static struct soc_camera_platform_priv *get_priv(struct platform_device *pdev)
+{
+	struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
+	return container_of(subdev, struct soc_camera_platform_priv, subdev);
+}
+
+static int soc_camera_platform_s_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+	return p->set_capture(p, enable);
+}
+
+static int soc_camera_platform_fill_fmt(struct v4l2_subdev *sd,
+					struct v4l2_mbus_framefmt *mf)
+{
+	struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+	mf->width	= p->format.width;
+	mf->height	= p->format.height;
+	mf->code	= p->format.code;
+	mf->colorspace	= p->format.colorspace;
+	mf->field	= p->format.field;
+
+	return 0;
+}
+
+static struct v4l2_subdev_core_ops platform_subdev_core_ops;
+
+static int soc_camera_platform_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+					enum v4l2_mbus_pixelcode *code)
+{
+	struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+	if (index)
+		return -EINVAL;
+
+	*code = p->format.code;
+	return 0;
+}
+
+static int soc_camera_platform_g_crop(struct v4l2_subdev *sd,
+				      struct v4l2_crop *a)
+{
+	struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+	a->c.left	= 0;
+	a->c.top	= 0;
+	a->c.width	= p->format.width;
+	a->c.height	= p->format.height;
+	a->type		= V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+	return 0;
+}
+
+static int soc_camera_platform_cropcap(struct v4l2_subdev *sd,
+				       struct v4l2_cropcap *a)
+{
+	struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+	a->bounds.left			= 0;
+	a->bounds.top			= 0;
+	a->bounds.width			= p->format.width;
+	a->bounds.height		= p->format.height;
+	a->defrect			= a->bounds;
+	a->type				= V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	a->pixelaspect.numerator	= 1;
+	a->pixelaspect.denominator	= 1;
+
+	return 0;
+}
+
+static int soc_camera_platform_g_mbus_config(struct v4l2_subdev *sd,
+					     struct v4l2_mbus_config *cfg)
+{
+	struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+	cfg->flags = p->mbus_param;
+	cfg->type = p->mbus_type;
+
+	return 0;
+}
+
+static struct v4l2_subdev_video_ops platform_subdev_video_ops = {
+	.s_stream	= soc_camera_platform_s_stream,
+	.enum_mbus_fmt	= soc_camera_platform_enum_fmt,
+	.cropcap	= soc_camera_platform_cropcap,
+	.g_crop		= soc_camera_platform_g_crop,
+	.try_mbus_fmt	= soc_camera_platform_fill_fmt,
+	.g_mbus_fmt	= soc_camera_platform_fill_fmt,
+	.s_mbus_fmt	= soc_camera_platform_fill_fmt,
+	.g_mbus_config	= soc_camera_platform_g_mbus_config,
+};
+
+static struct v4l2_subdev_ops platform_subdev_ops = {
+	.core	= &platform_subdev_core_ops,
+	.video	= &platform_subdev_video_ops,
+};
+
+static int soc_camera_platform_probe(struct platform_device *pdev)
+{
+	struct soc_camera_host *ici;
+	struct soc_camera_platform_priv *priv;
+	struct soc_camera_platform_info *p = pdev->dev.platform_data;
+	struct soc_camera_device *icd;
+	int ret;
+
+	if (!p)
+		return -EINVAL;
+
+	if (!p->icd) {
+		dev_err(&pdev->dev,
+			"Platform has not set soc_camera_device pointer!\n");
+		return -EINVAL;
+	}
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	icd = p->icd;
+
+	/* soc-camera convention: control's drvdata points to the subdev */
+	platform_set_drvdata(pdev, &priv->subdev);
+	/* Set the control device reference */
+	icd->control = &pdev->dev;
+
+	ici = to_soc_camera_host(icd->parent);
+
+	v4l2_subdev_init(&priv->subdev, &platform_subdev_ops);
+	v4l2_set_subdevdata(&priv->subdev, p);
+	strncpy(priv->subdev.name, dev_name(&pdev->dev), V4L2_SUBDEV_NAME_SIZE);
+
+	ret = v4l2_device_register_subdev(&ici->v4l2_dev, &priv->subdev);
+	if (ret)
+		goto evdrs;
+
+	return ret;
+
+evdrs:
+	platform_set_drvdata(pdev, NULL);
+	kfree(priv);
+	return ret;
+}
+
+static int soc_camera_platform_remove(struct platform_device *pdev)
+{
+	struct soc_camera_platform_priv *priv = get_priv(pdev);
+	struct soc_camera_platform_info *p = v4l2_get_subdevdata(&priv->subdev);
+
+	p->icd->control = NULL;
+	v4l2_device_unregister_subdev(&priv->subdev);
+	platform_set_drvdata(pdev, NULL);
+	kfree(priv);
+	return 0;
+}
+
+static struct platform_driver soc_camera_platform_driver = {
+	.driver 	= {
+		.name	= "soc_camera_platform",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= soc_camera_platform_probe,
+	.remove		= soc_camera_platform_remove,
+};
+
+module_platform_driver(soc_camera_platform_driver);
+
+MODULE_DESCRIPTION("SoC Camera Platform driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:soc_camera_platform");
diff --git a/drivers/media/platform/soc_mediabus.c b/drivers/media/platform/soc_mediabus.c
new file mode 100644
index 0000000..89dce09
--- /dev/null
+++ b/drivers/media/platform/soc_mediabus.c
@@ -0,0 +1,487 @@
+/*
+ * soc-camera media bus helper routines
+ *
+ * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/soc_mediabus.h>
+
+static const struct soc_mbus_lookup mbus_fmt[] = {
+{
+	.code = V4L2_MBUS_FMT_YUYV8_2X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_YUYV,
+		.name			= "YUYV",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_YVYU8_2X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_YVYU,
+		.name			= "YVYU",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_UYVY8_2X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_UYVY,
+		.name			= "UYVY",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_VYUY8_2X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_VYUY,
+		.name			= "VYUY",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_RGB555,
+		.name			= "RGB555",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_RGB555X,
+		.name			= "RGB555X",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_RGB565,
+		.name			= "RGB565",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_RGB565X,
+		.name			= "RGB565X",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SBGGR8_1X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SBGGR8,
+		.name			= "Bayer 8 BGGR",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_NONE,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SBGGR10_1X10,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SBGGR10,
+		.name			= "Bayer 10 BGGR",
+		.bits_per_sample	= 10,
+		.packing		= SOC_MBUS_PACKING_EXTEND16,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_Y8_1X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_GREY,
+		.name			= "Grey",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_NONE,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_Y10_1X10,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_Y10,
+		.name			= "Grey 10bit",
+		.bits_per_sample	= 10,
+		.packing		= SOC_MBUS_PACKING_EXTEND16,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SBGGR10,
+		.name			= "Bayer 10 BGGR",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SBGGR10,
+		.name			= "Bayer 10 BGGR",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADLO,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SBGGR10,
+		.name			= "Bayer 10 BGGR",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_BE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SBGGR10,
+		.name			= "Bayer 10 BGGR",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADLO,
+		.order			= SOC_MBUS_ORDER_BE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_JPEG_1X8,
+	.fmt = {
+		.fourcc                 = V4L2_PIX_FMT_JPEG,
+		.name                   = "JPEG",
+		.bits_per_sample        = 8,
+		.packing                = SOC_MBUS_PACKING_VARIABLE,
+		.order                  = SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_RGB444,
+		.name			= "RGB444",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
+		.order			= SOC_MBUS_ORDER_BE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_YUYV8_1_5X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_YUV420,
+		.name			= "YUYV 4:2:0",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_1_5X8,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_YVYU8_1_5X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_YVU420,
+		.name			= "YVYU 4:2:0",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_1_5X8,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_UYVY8_1X16,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_UYVY,
+		.name			= "UYVY 16bit",
+		.bits_per_sample	= 16,
+		.packing		= SOC_MBUS_PACKING_EXTEND16,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_VYUY8_1X16,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_VYUY,
+		.name			= "VYUY 16bit",
+		.bits_per_sample	= 16,
+		.packing		= SOC_MBUS_PACKING_EXTEND16,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_YUYV8_1X16,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_YUYV,
+		.name			= "YUYV 16bit",
+		.bits_per_sample	= 16,
+		.packing		= SOC_MBUS_PACKING_EXTEND16,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_YVYU8_1X16,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_YVYU,
+		.name			= "YVYU 16bit",
+		.bits_per_sample	= 16,
+		.packing		= SOC_MBUS_PACKING_EXTEND16,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SGRBG8_1X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SGRBG8,
+		.name			= "Bayer 8 GRBG",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_NONE,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SGRBG10DPCM8,
+		.name			= "Bayer 10 BGGR DPCM 8",
+		.bits_per_sample	= 8,
+		.packing		= SOC_MBUS_PACKING_NONE,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SGBRG10_1X10,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SGBRG10,
+		.name			= "Bayer 10 GBRG",
+		.bits_per_sample	= 10,
+		.packing		= SOC_MBUS_PACKING_EXTEND16,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SGRBG10_1X10,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SGRBG10,
+		.name			= "Bayer 10 GRBG",
+		.bits_per_sample	= 10,
+		.packing		= SOC_MBUS_PACKING_EXTEND16,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SRGGB10_1X10,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SRGGB10,
+		.name			= "Bayer 10 RGGB",
+		.bits_per_sample	= 10,
+		.packing		= SOC_MBUS_PACKING_EXTEND16,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SBGGR12_1X12,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SBGGR12,
+		.name			= "Bayer 12 BGGR",
+		.bits_per_sample	= 12,
+		.packing		= SOC_MBUS_PACKING_EXTEND16,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SGBRG12_1X12,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SGBRG12,
+		.name			= "Bayer 12 GBRG",
+		.bits_per_sample	= 12,
+		.packing		= SOC_MBUS_PACKING_EXTEND16,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SGRBG12_1X12,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SGRBG12,
+		.name			= "Bayer 12 GRBG",
+		.bits_per_sample	= 12,
+		.packing		= SOC_MBUS_PACKING_EXTEND16,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = V4L2_MBUS_FMT_SRGGB12_1X12,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SRGGB12,
+		.name			= "Bayer 12 RGGB",
+		.bits_per_sample	= 12,
+		.packing		= SOC_MBUS_PACKING_EXTEND16,
+		.order			= SOC_MBUS_ORDER_LE,
+		.layout			= SOC_MBUS_LAYOUT_PACKED,
+	},
+},
+};
+
+int soc_mbus_samples_per_pixel(const struct soc_mbus_pixelfmt *mf,
+			unsigned int *numerator, unsigned int *denominator)
+{
+	switch (mf->packing) {
+	case SOC_MBUS_PACKING_NONE:
+	case SOC_MBUS_PACKING_EXTEND16:
+		*numerator = 1;
+		*denominator = 1;
+		return 0;
+	case SOC_MBUS_PACKING_2X8_PADHI:
+	case SOC_MBUS_PACKING_2X8_PADLO:
+		*numerator = 2;
+		*denominator = 1;
+		return 0;
+	case SOC_MBUS_PACKING_1_5X8:
+		*numerator = 3;
+		*denominator = 2;
+		return 0;
+	case SOC_MBUS_PACKING_VARIABLE:
+		*numerator = 0;
+		*denominator = 1;
+		return 0;
+	}
+	return -EINVAL;
+}
+EXPORT_SYMBOL(soc_mbus_samples_per_pixel);
+
+s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)
+{
+	if (mf->layout != SOC_MBUS_LAYOUT_PACKED)
+		return width * mf->bits_per_sample / 8;
+
+	switch (mf->packing) {
+	case SOC_MBUS_PACKING_NONE:
+		return width * mf->bits_per_sample / 8;
+	case SOC_MBUS_PACKING_2X8_PADHI:
+	case SOC_MBUS_PACKING_2X8_PADLO:
+	case SOC_MBUS_PACKING_EXTEND16:
+		return width * 2;
+	case SOC_MBUS_PACKING_1_5X8:
+		return width * 3 / 2;
+	case SOC_MBUS_PACKING_VARIABLE:
+		return 0;
+	}
+	return -EINVAL;
+}
+EXPORT_SYMBOL(soc_mbus_bytes_per_line);
+
+s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
+			u32 bytes_per_line, u32 height)
+{
+	if (mf->layout == SOC_MBUS_LAYOUT_PACKED)
+		return bytes_per_line * height;
+
+	switch (mf->packing) {
+	case SOC_MBUS_PACKING_2X8_PADHI:
+	case SOC_MBUS_PACKING_2X8_PADLO:
+		return bytes_per_line * height * 2;
+	case SOC_MBUS_PACKING_1_5X8:
+		return bytes_per_line * height * 3 / 2;
+	default:
+		return -EINVAL;
+	}
+}
+EXPORT_SYMBOL(soc_mbus_image_size);
+
+const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc(
+	enum v4l2_mbus_pixelcode code,
+	const struct soc_mbus_lookup *lookup,
+	int n)
+{
+	int i;
+
+	for (i = 0; i < n; i++)
+		if (lookup[i].code == code)
+			return &lookup[i].fmt;
+
+	return NULL;
+}
+EXPORT_SYMBOL(soc_mbus_find_fmtdesc);
+
+const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
+	enum v4l2_mbus_pixelcode code)
+{
+	return soc_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt));
+}
+EXPORT_SYMBOL(soc_mbus_get_fmtdesc);
+
+unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg,
+					unsigned int flags)
+{
+	unsigned long common_flags;
+	bool hsync = true, vsync = true, pclk, data, mode;
+	bool mipi_lanes, mipi_clock;
+
+	common_flags = cfg->flags & flags;
+
+	switch (cfg->type) {
+	case V4L2_MBUS_PARALLEL:
+		hsync = common_flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+					V4L2_MBUS_HSYNC_ACTIVE_LOW);
+		vsync = common_flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+					V4L2_MBUS_VSYNC_ACTIVE_LOW);
+	case V4L2_MBUS_BT656:
+		pclk = common_flags & (V4L2_MBUS_PCLK_SAMPLE_RISING |
+				       V4L2_MBUS_PCLK_SAMPLE_FALLING);
+		data = common_flags & (V4L2_MBUS_DATA_ACTIVE_HIGH |
+				       V4L2_MBUS_DATA_ACTIVE_LOW);
+		mode = common_flags & (V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE);
+		return (!hsync || !vsync || !pclk || !data || !mode) ?
+			0 : common_flags;
+	case V4L2_MBUS_CSI2:
+		mipi_lanes = common_flags & V4L2_MBUS_CSI2_LANES;
+		mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK |
+					     V4L2_MBUS_CSI2_CONTINUOUS_CLOCK);
+		return (!mipi_lanes || !mipi_clock) ? 0 : common_flags;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(soc_mbus_config_compatible);
+
+static int __init soc_mbus_init(void)
+{
+	return 0;
+}
+
+static void __exit soc_mbus_exit(void)
+{
+}
+
+module_init(soc_mbus_init);
+module_exit(soc_mbus_exit);
+
+MODULE_DESCRIPTION("soc-camera media bus interface");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
new file mode 100644
index 0000000..02194c0
--- /dev/null
+++ b/drivers/media/platform/timblogiw.c
@@ -0,0 +1,880 @@
+/*
+ * timblogiw.c timberdale FPGA LogiWin Video In driver
+ * Copyright (c) 2009-2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA LogiWin Video In
+ */
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf-dma-contig.h>
+#include <media/timb_video.h>
+
+#define DRIVER_NAME			"timb-video"
+
+#define TIMBLOGIWIN_NAME		"Timberdale Video-In"
+#define TIMBLOGIW_VERSION_CODE		0x04
+
+#define TIMBLOGIW_LINES_PER_DESC	44
+#define TIMBLOGIW_MAX_VIDEO_MEM		16
+
+#define TIMBLOGIW_HAS_DECODER(lw)	(lw->pdata.encoder.module_name)
+
+
+struct timblogiw {
+	struct video_device		video_dev;
+	struct v4l2_device		v4l2_dev; /* mutual exclusion */
+	struct mutex			lock;
+	struct device			*dev;
+	struct timb_video_platform_data pdata;
+	struct v4l2_subdev		*sd_enc;	/* encoder */
+	bool				opened;
+};
+
+struct timblogiw_tvnorm {
+	v4l2_std_id std;
+	u16     width;
+	u16     height;
+	u8	fps;
+};
+
+struct timblogiw_fh {
+	struct videobuf_queue		vb_vidq;
+	struct timblogiw_tvnorm const	*cur_norm;
+	struct list_head		capture;
+	struct dma_chan			*chan;
+	spinlock_t			queue_lock; /* mutual exclusion */
+	unsigned int			frame_count;
+};
+
+struct timblogiw_buffer {
+	/* common v4l buffer stuff -- must be first */
+	struct videobuf_buffer	vb;
+	struct scatterlist	sg[16];
+	dma_cookie_t		cookie;
+	struct timblogiw_fh	*fh;
+};
+
+const struct timblogiw_tvnorm timblogiw_tvnorms[] = {
+	{
+		.std			= V4L2_STD_PAL,
+		.width			= 720,
+		.height			= 576,
+		.fps			= 25
+	},
+	{
+		.std			= V4L2_STD_NTSC,
+		.width			= 720,
+		.height			= 480,
+		.fps			= 30
+	}
+};
+
+static int timblogiw_bytes_per_line(const struct timblogiw_tvnorm *norm)
+{
+	return norm->width * 2;
+}
+
+
+static int timblogiw_frame_size(const struct timblogiw_tvnorm *norm)
+{
+	return norm->height * timblogiw_bytes_per_line(norm);
+}
+
+static const struct timblogiw_tvnorm *timblogiw_get_norm(const v4l2_std_id std)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(timblogiw_tvnorms); i++)
+		if (timblogiw_tvnorms[i].std & std)
+			return timblogiw_tvnorms + i;
+
+	/* default to first element */
+	return timblogiw_tvnorms;
+}
+
+static void timblogiw_dma_cb(void *data)
+{
+	struct timblogiw_buffer *buf = data;
+	struct timblogiw_fh *fh = buf->fh;
+	struct videobuf_buffer *vb = &buf->vb;
+
+	spin_lock(&fh->queue_lock);
+
+	/* mark the transfer done */
+	buf->cookie = -1;
+
+	fh->frame_count++;
+
+	if (vb->state != VIDEOBUF_ERROR) {
+		list_del(&vb->queue);
+		do_gettimeofday(&vb->ts);
+		vb->field_count = fh->frame_count * 2;
+		vb->state = VIDEOBUF_DONE;
+
+		wake_up(&vb->done);
+	}
+
+	if (!list_empty(&fh->capture)) {
+		vb = list_entry(fh->capture.next, struct videobuf_buffer,
+			queue);
+		vb->state = VIDEOBUF_ACTIVE;
+	}
+
+	spin_unlock(&fh->queue_lock);
+}
+
+static bool timblogiw_dma_filter_fn(struct dma_chan *chan, void *filter_param)
+{
+	return chan->chan_id == (uintptr_t)filter_param;
+}
+
+/* IOCTL functions */
+
+static int timblogiw_g_fmt(struct file *file, void  *priv,
+	struct v4l2_format *format)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw *lw = video_get_drvdata(vdev);
+	struct timblogiw_fh *fh = priv;
+
+	dev_dbg(&vdev->dev, "%s entry\n", __func__);
+
+	if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	mutex_lock(&lw->lock);
+
+	format->fmt.pix.width = fh->cur_norm->width;
+	format->fmt.pix.height = fh->cur_norm->height;
+	format->fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
+	format->fmt.pix.bytesperline = timblogiw_bytes_per_line(fh->cur_norm);
+	format->fmt.pix.sizeimage = timblogiw_frame_size(fh->cur_norm);
+	format->fmt.pix.field = V4L2_FIELD_NONE;
+
+	mutex_unlock(&lw->lock);
+
+	return 0;
+}
+
+static int timblogiw_try_fmt(struct file *file, void  *priv,
+	struct v4l2_format *format)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct v4l2_pix_format *pix = &format->fmt.pix;
+
+	dev_dbg(&vdev->dev,
+		"%s - width=%d, height=%d, pixelformat=%d, field=%d\n"
+		"bytes per line %d, size image: %d, colorspace: %d\n",
+		__func__,
+		pix->width, pix->height, pix->pixelformat, pix->field,
+		pix->bytesperline, pix->sizeimage, pix->colorspace);
+
+	if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	if (pix->field != V4L2_FIELD_NONE)
+		return -EINVAL;
+
+	if (pix->pixelformat != V4L2_PIX_FMT_UYVY)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int timblogiw_s_fmt(struct file *file, void  *priv,
+	struct v4l2_format *format)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw *lw = video_get_drvdata(vdev);
+	struct timblogiw_fh *fh = priv;
+	struct v4l2_pix_format *pix = &format->fmt.pix;
+	int err;
+
+	mutex_lock(&lw->lock);
+
+	err = timblogiw_try_fmt(file, priv, format);
+	if (err)
+		goto out;
+
+	if (videobuf_queue_is_busy(&fh->vb_vidq)) {
+		dev_err(&vdev->dev, "%s queue busy\n", __func__);
+		err = -EBUSY;
+		goto out;
+	}
+
+	pix->width = fh->cur_norm->width;
+	pix->height = fh->cur_norm->height;
+
+out:
+	mutex_unlock(&lw->lock);
+	return err;
+}
+
+static int timblogiw_querycap(struct file *file, void  *priv,
+	struct v4l2_capability *cap)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	dev_dbg(&vdev->dev, "%s: Entry\n",  __func__);
+	memset(cap, 0, sizeof(*cap));
+	strncpy(cap->card, TIMBLOGIWIN_NAME, sizeof(cap->card)-1);
+	strncpy(cap->driver, DRIVER_NAME, sizeof(cap->driver) - 1);
+	strlcpy(cap->bus_info, vdev->name, sizeof(cap->bus_info));
+	cap->version = TIMBLOGIW_VERSION_CODE;
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+		V4L2_CAP_READWRITE;
+
+	return 0;
+}
+
+static int timblogiw_enum_fmt(struct file *file, void  *priv,
+	struct v4l2_fmtdesc *fmt)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	dev_dbg(&vdev->dev, "%s, index: %d\n",  __func__, fmt->index);
+
+	if (fmt->index != 0)
+		return -EINVAL;
+	memset(fmt, 0, sizeof(*fmt));
+	fmt->index = 0;
+	fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	strncpy(fmt->description, "4:2:2, packed, YUYV",
+		sizeof(fmt->description)-1);
+	fmt->pixelformat = V4L2_PIX_FMT_UYVY;
+
+	return 0;
+}
+
+static int timblogiw_g_parm(struct file *file, void *priv,
+	struct v4l2_streamparm *sp)
+{
+	struct timblogiw_fh *fh = priv;
+	struct v4l2_captureparm *cp = &sp->parm.capture;
+
+	cp->capability = V4L2_CAP_TIMEPERFRAME;
+	cp->timeperframe.numerator = 1;
+	cp->timeperframe.denominator = fh->cur_norm->fps;
+
+	return 0;
+}
+
+static int timblogiw_reqbufs(struct file *file, void  *priv,
+	struct v4l2_requestbuffers *rb)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw_fh *fh = priv;
+
+	dev_dbg(&vdev->dev, "%s: entry\n",  __func__);
+
+	return videobuf_reqbufs(&fh->vb_vidq, rb);
+}
+
+static int timblogiw_querybuf(struct file *file, void  *priv,
+	struct v4l2_buffer *b)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw_fh *fh = priv;
+
+	dev_dbg(&vdev->dev, "%s: entry\n",  __func__);
+
+	return videobuf_querybuf(&fh->vb_vidq, b);
+}
+
+static int timblogiw_qbuf(struct file *file, void  *priv, struct v4l2_buffer *b)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw_fh *fh = priv;
+
+	dev_dbg(&vdev->dev, "%s: entry\n",  __func__);
+
+	return videobuf_qbuf(&fh->vb_vidq, b);
+}
+
+static int timblogiw_dqbuf(struct file *file, void  *priv,
+	struct v4l2_buffer *b)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw_fh *fh = priv;
+
+	dev_dbg(&vdev->dev, "%s: entry\n",  __func__);
+
+	return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
+}
+
+static int timblogiw_g_std(struct file *file, void  *priv, v4l2_std_id *std)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw_fh *fh = priv;
+
+	dev_dbg(&vdev->dev, "%s: entry\n",  __func__);
+
+	*std = fh->cur_norm->std;
+	return 0;
+}
+
+static int timblogiw_s_std(struct file *file, void  *priv, v4l2_std_id *std)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw *lw = video_get_drvdata(vdev);
+	struct timblogiw_fh *fh = priv;
+	int err = 0;
+
+	dev_dbg(&vdev->dev, "%s: entry\n",  __func__);
+
+	mutex_lock(&lw->lock);
+
+	if (TIMBLOGIW_HAS_DECODER(lw))
+		err = v4l2_subdev_call(lw->sd_enc, core, s_std, *std);
+
+	if (!err)
+		fh->cur_norm = timblogiw_get_norm(*std);
+
+	mutex_unlock(&lw->lock);
+
+	return err;
+}
+
+static int timblogiw_enuminput(struct file *file, void  *priv,
+	struct v4l2_input *inp)
+{
+	struct video_device *vdev = video_devdata(file);
+	int i;
+
+	dev_dbg(&vdev->dev, "%s: Entry\n",  __func__);
+
+	if (inp->index != 0)
+		return -EINVAL;
+
+	inp->index = 0;
+
+	strncpy(inp->name, "Timb input 1", sizeof(inp->name) - 1);
+	inp->type = V4L2_INPUT_TYPE_CAMERA;
+
+	inp->std = 0;
+	for (i = 0; i < ARRAY_SIZE(timblogiw_tvnorms); i++)
+		inp->std |= timblogiw_tvnorms[i].std;
+
+	return 0;
+}
+
+static int timblogiw_g_input(struct file *file, void  *priv,
+	unsigned int *input)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	dev_dbg(&vdev->dev, "%s: Entry\n",  __func__);
+
+	*input = 0;
+
+	return 0;
+}
+
+static int timblogiw_s_input(struct file *file, void  *priv, unsigned int input)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	dev_dbg(&vdev->dev, "%s: Entry\n",  __func__);
+
+	if (input != 0)
+		return -EINVAL;
+	return 0;
+}
+
+static int timblogiw_streamon(struct file *file, void  *priv, unsigned int type)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw_fh *fh = priv;
+
+	dev_dbg(&vdev->dev, "%s: entry\n",  __func__);
+
+	if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		dev_dbg(&vdev->dev, "%s - No capture device\n", __func__);
+		return -EINVAL;
+	}
+
+	fh->frame_count = 0;
+	return videobuf_streamon(&fh->vb_vidq);
+}
+
+static int timblogiw_streamoff(struct file *file, void  *priv,
+	unsigned int type)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw_fh *fh = priv;
+
+	dev_dbg(&vdev->dev, "%s entry\n",  __func__);
+
+	if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	return videobuf_streamoff(&fh->vb_vidq);
+}
+
+static int timblogiw_querystd(struct file *file, void  *priv, v4l2_std_id *std)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw *lw = video_get_drvdata(vdev);
+	struct timblogiw_fh *fh = priv;
+
+	dev_dbg(&vdev->dev, "%s entry\n",  __func__);
+
+	if (TIMBLOGIW_HAS_DECODER(lw))
+		return v4l2_subdev_call(lw->sd_enc, video, querystd, std);
+	else {
+		*std = fh->cur_norm->std;
+		return 0;
+	}
+}
+
+static int timblogiw_enum_framesizes(struct file *file, void  *priv,
+	struct v4l2_frmsizeenum *fsize)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw_fh *fh = priv;
+
+	dev_dbg(&vdev->dev, "%s - index: %d, format: %d\n",  __func__,
+		fsize->index, fsize->pixel_format);
+
+	if ((fsize->index != 0) ||
+		(fsize->pixel_format != V4L2_PIX_FMT_UYVY))
+		return -EINVAL;
+
+	fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+	fsize->discrete.width = fh->cur_norm->width;
+	fsize->discrete.height = fh->cur_norm->height;
+
+	return 0;
+}
+
+/* Video buffer functions */
+
+static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
+	unsigned int *size)
+{
+	struct timblogiw_fh *fh = vq->priv_data;
+
+	*size = timblogiw_frame_size(fh->cur_norm);
+
+	if (!*count)
+		*count = 32;
+
+	while (*size * *count > TIMBLOGIW_MAX_VIDEO_MEM * 1024 * 1024)
+		(*count)--;
+
+	return 0;
+}
+
+static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
+	enum v4l2_field field)
+{
+	struct timblogiw_fh *fh = vq->priv_data;
+	struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
+		vb);
+	unsigned int data_size = timblogiw_frame_size(fh->cur_norm);
+	int err = 0;
+
+	if (vb->baddr && vb->bsize < data_size)
+		/* User provided buffer, but it is too small */
+		return -ENOMEM;
+
+	vb->size = data_size;
+	vb->width = fh->cur_norm->width;
+	vb->height = fh->cur_norm->height;
+	vb->field = field;
+
+	if (vb->state == VIDEOBUF_NEEDS_INIT) {
+		int i;
+		unsigned int size;
+		unsigned int bytes_per_desc = TIMBLOGIW_LINES_PER_DESC *
+			timblogiw_bytes_per_line(fh->cur_norm);
+		dma_addr_t addr;
+
+		sg_init_table(buf->sg, ARRAY_SIZE(buf->sg));
+
+		err = videobuf_iolock(vq, vb, NULL);
+		if (err)
+			goto err;
+
+		addr = videobuf_to_dma_contig(vb);
+		for (i = 0, size = 0; size < data_size; i++) {
+			sg_dma_address(buf->sg + i) = addr + size;
+			size += bytes_per_desc;
+			sg_dma_len(buf->sg + i) = (size > data_size) ?
+				(bytes_per_desc - (size - data_size)) :
+				bytes_per_desc;
+		}
+
+		vb->state = VIDEOBUF_PREPARED;
+		buf->cookie = -1;
+		buf->fh = fh;
+	}
+
+	return 0;
+
+err:
+	videobuf_dma_contig_free(vq, vb);
+	vb->state = VIDEOBUF_NEEDS_INIT;
+	return err;
+}
+
+static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+	struct timblogiw_fh *fh = vq->priv_data;
+	struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
+		vb);
+	struct dma_async_tx_descriptor *desc;
+	int sg_elems;
+	int bytes_per_desc = TIMBLOGIW_LINES_PER_DESC *
+		timblogiw_bytes_per_line(fh->cur_norm);
+
+	sg_elems = timblogiw_frame_size(fh->cur_norm) / bytes_per_desc;
+	sg_elems +=
+		(timblogiw_frame_size(fh->cur_norm) % bytes_per_desc) ? 1 : 0;
+
+	if (list_empty(&fh->capture))
+		vb->state = VIDEOBUF_ACTIVE;
+	else
+		vb->state = VIDEOBUF_QUEUED;
+
+	list_add_tail(&vb->queue, &fh->capture);
+
+	spin_unlock_irq(&fh->queue_lock);
+
+	desc = dmaengine_prep_slave_sg(fh->chan,
+		buf->sg, sg_elems, DMA_DEV_TO_MEM,
+		DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+	if (!desc) {
+		spin_lock_irq(&fh->queue_lock);
+		list_del_init(&vb->queue);
+		vb->state = VIDEOBUF_PREPARED;
+		return;
+	}
+
+	desc->callback_param = buf;
+	desc->callback = timblogiw_dma_cb;
+
+	buf->cookie = desc->tx_submit(desc);
+
+	spin_lock_irq(&fh->queue_lock);
+}
+
+static void buffer_release(struct videobuf_queue *vq,
+	struct videobuf_buffer *vb)
+{
+	struct timblogiw_fh *fh = vq->priv_data;
+	struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
+		vb);
+
+	videobuf_waiton(vq, vb, 0, 0);
+	if (buf->cookie >= 0)
+		dma_sync_wait(fh->chan, buf->cookie);
+
+	videobuf_dma_contig_free(vq, vb);
+	vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static struct videobuf_queue_ops timblogiw_video_qops = {
+	.buf_setup      = buffer_setup,
+	.buf_prepare    = buffer_prepare,
+	.buf_queue      = buffer_queue,
+	.buf_release    = buffer_release,
+};
+
+/* Device Operations functions */
+
+static int timblogiw_open(struct file *file)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw *lw = video_get_drvdata(vdev);
+	struct timblogiw_fh *fh;
+	v4l2_std_id std;
+	dma_cap_mask_t mask;
+	int err = 0;
+
+	dev_dbg(&vdev->dev, "%s: entry\n", __func__);
+
+	mutex_lock(&lw->lock);
+	if (lw->opened) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (TIMBLOGIW_HAS_DECODER(lw) && !lw->sd_enc) {
+		struct i2c_adapter *adapt;
+
+		/* find the video decoder */
+		adapt = i2c_get_adapter(lw->pdata.i2c_adapter);
+		if (!adapt) {
+			dev_err(&vdev->dev, "No I2C bus #%d\n",
+				lw->pdata.i2c_adapter);
+			err = -ENODEV;
+			goto out;
+		}
+
+		/* now find the encoder */
+		lw->sd_enc = v4l2_i2c_new_subdev_board(&lw->v4l2_dev, adapt,
+			lw->pdata.encoder.info, NULL);
+
+		i2c_put_adapter(adapt);
+
+		if (!lw->sd_enc) {
+			dev_err(&vdev->dev, "Failed to get encoder: %s\n",
+				lw->pdata.encoder.module_name);
+			err = -ENODEV;
+			goto out;
+		}
+	}
+
+	fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+	if (!fh) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	fh->cur_norm = timblogiw_tvnorms;
+	timblogiw_querystd(file, fh, &std);
+	fh->cur_norm = timblogiw_get_norm(std);
+
+	INIT_LIST_HEAD(&fh->capture);
+	spin_lock_init(&fh->queue_lock);
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+	dma_cap_set(DMA_PRIVATE, mask);
+
+	/* find the DMA channel */
+	fh->chan = dma_request_channel(mask, timblogiw_dma_filter_fn,
+			(void *)(uintptr_t)lw->pdata.dma_channel);
+	if (!fh->chan) {
+		dev_err(&vdev->dev, "Failed to get DMA channel\n");
+		kfree(fh);
+		err = -ENODEV;
+		goto out;
+	}
+
+	file->private_data = fh;
+	videobuf_queue_dma_contig_init(&fh->vb_vidq,
+		&timblogiw_video_qops, lw->dev, &fh->queue_lock,
+		V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+		sizeof(struct timblogiw_buffer), fh, NULL);
+
+	lw->opened = true;
+out:
+	mutex_unlock(&lw->lock);
+
+	return err;
+}
+
+static int timblogiw_close(struct file *file)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw *lw = video_get_drvdata(vdev);
+	struct timblogiw_fh *fh = file->private_data;
+
+	dev_dbg(&vdev->dev, "%s: Entry\n",  __func__);
+
+	videobuf_stop(&fh->vb_vidq);
+	videobuf_mmap_free(&fh->vb_vidq);
+
+	dma_release_channel(fh->chan);
+
+	kfree(fh);
+
+	mutex_lock(&lw->lock);
+	lw->opened = false;
+	mutex_unlock(&lw->lock);
+	return 0;
+}
+
+static ssize_t timblogiw_read(struct file *file, char __user *data,
+	size_t count, loff_t *ppos)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw_fh *fh = file->private_data;
+
+	dev_dbg(&vdev->dev, "%s: entry\n",  __func__);
+
+	return videobuf_read_stream(&fh->vb_vidq, data, count, ppos, 0,
+		file->f_flags & O_NONBLOCK);
+}
+
+static unsigned int timblogiw_poll(struct file *file,
+	struct poll_table_struct *wait)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw_fh *fh = file->private_data;
+
+	dev_dbg(&vdev->dev, "%s: entry\n",  __func__);
+
+	return videobuf_poll_stream(file, &fh->vb_vidq, wait);
+}
+
+static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct timblogiw_fh *fh = file->private_data;
+
+	dev_dbg(&vdev->dev, "%s: entry\n", __func__);
+
+	return videobuf_mmap_mapper(&fh->vb_vidq, vma);
+}
+
+/* Platform device functions */
+
+static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
+	.vidioc_querycap		= timblogiw_querycap,
+	.vidioc_enum_fmt_vid_cap	= timblogiw_enum_fmt,
+	.vidioc_g_fmt_vid_cap		= timblogiw_g_fmt,
+	.vidioc_try_fmt_vid_cap		= timblogiw_try_fmt,
+	.vidioc_s_fmt_vid_cap		= timblogiw_s_fmt,
+	.vidioc_g_parm			= timblogiw_g_parm,
+	.vidioc_reqbufs			= timblogiw_reqbufs,
+	.vidioc_querybuf		= timblogiw_querybuf,
+	.vidioc_qbuf			= timblogiw_qbuf,
+	.vidioc_dqbuf			= timblogiw_dqbuf,
+	.vidioc_g_std			= timblogiw_g_std,
+	.vidioc_s_std			= timblogiw_s_std,
+	.vidioc_enum_input		= timblogiw_enuminput,
+	.vidioc_g_input			= timblogiw_g_input,
+	.vidioc_s_input			= timblogiw_s_input,
+	.vidioc_streamon		= timblogiw_streamon,
+	.vidioc_streamoff		= timblogiw_streamoff,
+	.vidioc_querystd		= timblogiw_querystd,
+	.vidioc_enum_framesizes		= timblogiw_enum_framesizes,
+};
+
+static __devinitconst struct v4l2_file_operations timblogiw_fops = {
+	.owner		= THIS_MODULE,
+	.open		= timblogiw_open,
+	.release	= timblogiw_close,
+	.unlocked_ioctl		= video_ioctl2, /* V4L2 ioctl handler */
+	.mmap		= timblogiw_mmap,
+	.read		= timblogiw_read,
+	.poll		= timblogiw_poll,
+};
+
+static __devinitconst struct video_device timblogiw_template = {
+	.name		= TIMBLOGIWIN_NAME,
+	.fops		= &timblogiw_fops,
+	.ioctl_ops	= &timblogiw_ioctl_ops,
+	.release	= video_device_release_empty,
+	.minor		= -1,
+	.tvnorms	= V4L2_STD_PAL | V4L2_STD_NTSC
+};
+
+static int __devinit timblogiw_probe(struct platform_device *pdev)
+{
+	int err;
+	struct timblogiw *lw = NULL;
+	struct timb_video_platform_data *pdata = pdev->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "No platform data\n");
+		err = -EINVAL;
+		goto err;
+	}
+
+	if (!pdata->encoder.module_name)
+		dev_info(&pdev->dev, "Running without decoder\n");
+
+	lw = kzalloc(sizeof(*lw), GFP_KERNEL);
+	if (!lw) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	if (pdev->dev.parent)
+		lw->dev = pdev->dev.parent;
+	else
+		lw->dev = &pdev->dev;
+
+	memcpy(&lw->pdata, pdata, sizeof(lw->pdata));
+
+	mutex_init(&lw->lock);
+
+	lw->video_dev = timblogiw_template;
+
+	strlcpy(lw->v4l2_dev.name, DRIVER_NAME, sizeof(lw->v4l2_dev.name));
+	err = v4l2_device_register(NULL, &lw->v4l2_dev);
+	if (err)
+		goto err_register;
+
+	lw->video_dev.v4l2_dev = &lw->v4l2_dev;
+
+	platform_set_drvdata(pdev, lw);
+	video_set_drvdata(&lw->video_dev, lw);
+
+	err = video_register_device(&lw->video_dev, VFL_TYPE_GRABBER, 0);
+	if (err) {
+		dev_err(&pdev->dev, "Error reg video: %d\n", err);
+		goto err_request;
+	}
+
+
+	return 0;
+
+err_request:
+	platform_set_drvdata(pdev, NULL);
+	v4l2_device_unregister(&lw->v4l2_dev);
+err_register:
+	kfree(lw);
+err:
+	dev_err(&pdev->dev, "Failed to register: %d\n", err);
+
+	return err;
+}
+
+static int __devexit timblogiw_remove(struct platform_device *pdev)
+{
+	struct timblogiw *lw = platform_get_drvdata(pdev);
+
+	video_unregister_device(&lw->video_dev);
+
+	v4l2_device_unregister(&lw->v4l2_dev);
+
+	kfree(lw);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver timblogiw_platform_driver = {
+	.driver = {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+	},
+	.probe		= timblogiw_probe,
+	.remove		= __devexit_p(timblogiw_remove),
+};
+
+module_platform_driver(timblogiw_platform_driver);
+
+MODULE_DESCRIPTION(TIMBLOGIWIN_NAME);
+MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:"DRIVER_NAME);
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
new file mode 100644
index 0000000..eb404c2
--- /dev/null
+++ b/drivers/media/platform/via-camera.c
@@ -0,0 +1,1514 @@
+/*
+ * Driver for the VIA Chrome integrated camera controller.
+ *
+ * Copyright 2009,2010 Jonathan Corbet <corbet@lwn.net>
+ * Distributable under the terms of the GNU General Public License, version 2
+ *
+ * This work was supported by the One Laptop Per Child project
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/ov7670.h>
+#include <media/videobuf-dma-sg.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_qos.h>
+#include <linux/via-core.h>
+#include <linux/via-gpio.h>
+#include <linux/via_i2c.h>
+#include <asm/olpc.h>
+
+#include "via-camera.h"
+
+MODULE_ALIAS("platform:viafb-camera");
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
+MODULE_DESCRIPTION("VIA framebuffer-based camera controller driver");
+MODULE_LICENSE("GPL");
+
+static bool flip_image;
+module_param(flip_image, bool, 0444);
+MODULE_PARM_DESC(flip_image,
+		"If set, the sensor will be instructed to flip the image "
+		"vertically.");
+
+static bool override_serial;
+module_param(override_serial, bool, 0444);
+MODULE_PARM_DESC(override_serial,
+		"The camera driver will normally refuse to load if "
+		"the XO 1.5 serial port is enabled.  Set this option "
+		"to force-enable the camera.");
+
+/*
+ * Basic window sizes.
+ */
+#define VGA_WIDTH	640
+#define VGA_HEIGHT	480
+#define QCIF_WIDTH	176
+#define	QCIF_HEIGHT	144
+
+/*
+ * The structure describing our camera.
+ */
+enum viacam_opstate { S_IDLE = 0, S_RUNNING = 1 };
+
+struct via_camera {
+	struct v4l2_device v4l2_dev;
+	struct video_device vdev;
+	struct v4l2_subdev *sensor;
+	struct platform_device *platdev;
+	struct viafb_dev *viadev;
+	struct mutex lock;
+	enum viacam_opstate opstate;
+	unsigned long flags;
+	struct pm_qos_request qos_request;
+	/*
+	 * GPIO info for power/reset management
+	 */
+	int power_gpio;
+	int reset_gpio;
+	/*
+	 * I/O memory stuff.
+	 */
+	void __iomem *mmio;	/* Where the registers live */
+	void __iomem *fbmem;	/* Frame buffer memory */
+	u32 fb_offset;		/* Reserved memory offset (FB) */
+	/*
+	 * Capture buffers and related.	 The controller supports
+	 * up to three, so that's what we have here.  These buffers
+	 * live in frame buffer memory, so we don't call them "DMA".
+	 */
+	unsigned int cb_offsets[3];	/* offsets into fb mem */
+	u8 *cb_addrs[3];		/* Kernel-space addresses */
+	int n_cap_bufs;			/* How many are we using? */
+	int next_buf;
+	struct videobuf_queue vb_queue;
+	struct list_head buffer_queue;	/* prot. by reg_lock */
+	/*
+	 * User tracking.
+	 */
+	int users;
+	struct file *owner;
+	/*
+	 * Video format information.  sensor_format is kept in a form
+	 * that we can use to pass to the sensor.  We always run the
+	 * sensor in VGA resolution, though, and let the controller
+	 * downscale things if need be.	 So we keep the "real*
+	 * dimensions separately.
+	 */
+	struct v4l2_pix_format sensor_format;
+	struct v4l2_pix_format user_format;
+	enum v4l2_mbus_pixelcode mbus_code;
+};
+
+/*
+ * Yes, this is a hack, but there's only going to be one of these
+ * on any system we know of.
+ */
+static struct via_camera *via_cam_info;
+
+/*
+ * Flag values, manipulated with bitops
+ */
+#define CF_DMA_ACTIVE	 0	/* A frame is incoming */
+#define CF_CONFIG_NEEDED 1	/* Must configure hardware */
+
+
+/*
+ * Nasty ugly v4l2 boilerplate.
+ */
+#define sensor_call(cam, optype, func, args...) \
+	v4l2_subdev_call(cam->sensor, optype, func, ##args)
+
+/*
+ * Debugging and related.
+ */
+#define cam_err(cam, fmt, arg...) \
+	dev_err(&(cam)->platdev->dev, fmt, ##arg);
+#define cam_warn(cam, fmt, arg...) \
+	dev_warn(&(cam)->platdev->dev, fmt, ##arg);
+#define cam_dbg(cam, fmt, arg...) \
+	dev_dbg(&(cam)->platdev->dev, fmt, ##arg);
+
+/*
+ * Format handling.  This is ripped almost directly from Hans's changes
+ * to cafe_ccic.c.  It's a little unfortunate; until this change, we
+ * didn't need to know anything about the format except its byte depth;
+ * now this information must be managed at this level too.
+ */
+static struct via_format {
+	__u8 *desc;
+	__u32 pixelformat;
+	int bpp;   /* Bytes per pixel */
+	enum v4l2_mbus_pixelcode mbus_code;
+} via_formats[] = {
+	{
+		.desc		= "YUYV 4:2:2",
+		.pixelformat	= V4L2_PIX_FMT_YUYV,
+		.mbus_code	= V4L2_MBUS_FMT_YUYV8_2X8,
+		.bpp		= 2,
+	},
+	/* RGB444 and Bayer should be doable, but have never been
+	   tested with this driver. RGB565 seems to work at the default
+	   resolution, but results in color corruption when being scaled by
+	   viacam_set_scaled(), and is disabled as a result. */
+};
+#define N_VIA_FMTS ARRAY_SIZE(via_formats)
+
+static struct via_format *via_find_format(u32 pixelformat)
+{
+	unsigned i;
+
+	for (i = 0; i < N_VIA_FMTS; i++)
+		if (via_formats[i].pixelformat == pixelformat)
+			return via_formats + i;
+	/* Not found? Then return the first format. */
+	return via_formats;
+}
+
+
+/*--------------------------------------------------------------------------*/
+/*
+ * Sensor power/reset management.  This piece is OLPC-specific for
+ * sure; other configurations will have things connected differently.
+ */
+static int via_sensor_power_setup(struct via_camera *cam)
+{
+	int ret;
+
+	cam->power_gpio = viafb_gpio_lookup("VGPIO3");
+	cam->reset_gpio = viafb_gpio_lookup("VGPIO2");
+	if (cam->power_gpio < 0 || cam->reset_gpio < 0) {
+		dev_err(&cam->platdev->dev, "Unable to find GPIO lines\n");
+		return -EINVAL;
+	}
+	ret = gpio_request(cam->power_gpio, "viafb-camera");
+	if (ret) {
+		dev_err(&cam->platdev->dev, "Unable to request power GPIO\n");
+		return ret;
+	}
+	ret = gpio_request(cam->reset_gpio, "viafb-camera");
+	if (ret) {
+		dev_err(&cam->platdev->dev, "Unable to request reset GPIO\n");
+		gpio_free(cam->power_gpio);
+		return ret;
+	}
+	gpio_direction_output(cam->power_gpio, 0);
+	gpio_direction_output(cam->reset_gpio, 0);
+	return 0;
+}
+
+/*
+ * Power up the sensor and perform the reset dance.
+ */
+static void via_sensor_power_up(struct via_camera *cam)
+{
+	gpio_set_value(cam->power_gpio, 1);
+	gpio_set_value(cam->reset_gpio, 0);
+	msleep(20);  /* Probably excessive */
+	gpio_set_value(cam->reset_gpio, 1);
+	msleep(20);
+}
+
+static void via_sensor_power_down(struct via_camera *cam)
+{
+	gpio_set_value(cam->power_gpio, 0);
+	gpio_set_value(cam->reset_gpio, 0);
+}
+
+
+static void via_sensor_power_release(struct via_camera *cam)
+{
+	via_sensor_power_down(cam);
+	gpio_free(cam->power_gpio);
+	gpio_free(cam->reset_gpio);
+}
+
+/* --------------------------------------------------------------------------*/
+/* Sensor ops */
+
+/*
+ * Manage the ov7670 "flip" bit, which needs special help.
+ */
+static int viacam_set_flip(struct via_camera *cam)
+{
+	struct v4l2_control ctrl;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.id = V4L2_CID_VFLIP;
+	ctrl.value = flip_image;
+	return sensor_call(cam, core, s_ctrl, &ctrl);
+}
+
+/*
+ * Configure the sensor.  It's up to the caller to ensure
+ * that the camera is in the correct operating state.
+ */
+static int viacam_configure_sensor(struct via_camera *cam)
+{
+	struct v4l2_mbus_framefmt mbus_fmt;
+	int ret;
+
+	v4l2_fill_mbus_format(&mbus_fmt, &cam->sensor_format, cam->mbus_code);
+	ret = sensor_call(cam, core, init, 0);
+	if (ret == 0)
+		ret = sensor_call(cam, video, s_mbus_fmt, &mbus_fmt);
+	/*
+	 * OV7670 does weird things if flip is set *before* format...
+	 */
+	if (ret == 0)
+		ret = viacam_set_flip(cam);
+	return ret;
+}
+
+
+
+/* --------------------------------------------------------------------------*/
+/*
+ * Some simple register accessors; they assume that the lock is held.
+ *
+ * Should we want to support the second capture engine, we could
+ * hide the register difference by adding 0x1000 to registers in the
+ * 0x300-350 range.
+ */
+static inline void viacam_write_reg(struct via_camera *cam,
+		int reg, int value)
+{
+	iowrite32(value, cam->mmio + reg);
+}
+
+static inline int viacam_read_reg(struct via_camera *cam, int reg)
+{
+	return ioread32(cam->mmio + reg);
+}
+
+static inline void viacam_write_reg_mask(struct via_camera *cam,
+		int reg, int value, int mask)
+{
+	int tmp = viacam_read_reg(cam, reg);
+
+	tmp = (tmp & ~mask) | (value & mask);
+	viacam_write_reg(cam, reg, tmp);
+}
+
+
+/* --------------------------------------------------------------------------*/
+/* Interrupt management and handling */
+
+static irqreturn_t viacam_quick_irq(int irq, void *data)
+{
+	struct via_camera *cam = data;
+	irqreturn_t ret = IRQ_NONE;
+	int icv;
+
+	/*
+	 * All we do here is to clear the interrupts and tell
+	 * the handler thread to wake up.
+	 */
+	spin_lock(&cam->viadev->reg_lock);
+	icv = viacam_read_reg(cam, VCR_INTCTRL);
+	if (icv & VCR_IC_EAV) {
+		icv |= VCR_IC_EAV|VCR_IC_EVBI|VCR_IC_FFULL;
+		viacam_write_reg(cam, VCR_INTCTRL, icv);
+		ret = IRQ_WAKE_THREAD;
+	}
+	spin_unlock(&cam->viadev->reg_lock);
+	return ret;
+}
+
+/*
+ * Find the next videobuf buffer which has somebody waiting on it.
+ */
+static struct videobuf_buffer *viacam_next_buffer(struct via_camera *cam)
+{
+	unsigned long flags;
+	struct videobuf_buffer *buf = NULL;
+
+	spin_lock_irqsave(&cam->viadev->reg_lock, flags);
+	if (cam->opstate != S_RUNNING)
+		goto out;
+	if (list_empty(&cam->buffer_queue))
+		goto out;
+	buf = list_entry(cam->buffer_queue.next, struct videobuf_buffer, queue);
+	if (!waitqueue_active(&buf->done)) {/* Nobody waiting */
+		buf = NULL;
+		goto out;
+	}
+	list_del(&buf->queue);
+	buf->state = VIDEOBUF_ACTIVE;
+out:
+	spin_unlock_irqrestore(&cam->viadev->reg_lock, flags);
+	return buf;
+}
+
+/*
+ * The threaded IRQ handler.
+ */
+static irqreturn_t viacam_irq(int irq, void *data)
+{
+	int bufn;
+	struct videobuf_buffer *vb;
+	struct via_camera *cam = data;
+	struct videobuf_dmabuf *vdma;
+
+	/*
+	 * If there is no place to put the data frame, don't bother
+	 * with anything else.
+	 */
+	vb = viacam_next_buffer(cam);
+	if (vb == NULL)
+		goto done;
+	/*
+	 * Figure out which buffer we just completed.
+	 */
+	bufn = (viacam_read_reg(cam, VCR_INTCTRL) & VCR_IC_ACTBUF) >> 3;
+	bufn -= 1;
+	if (bufn < 0)
+		bufn = cam->n_cap_bufs - 1;
+	/*
+	 * Copy over the data and let any waiters know.
+	 */
+	vdma = videobuf_to_dma(vb);
+	viafb_dma_copy_out_sg(cam->cb_offsets[bufn], vdma->sglist, vdma->sglen);
+	vb->state = VIDEOBUF_DONE;
+	vb->size = cam->user_format.sizeimage;
+	wake_up(&vb->done);
+done:
+	return IRQ_HANDLED;
+}
+
+
+/*
+ * These functions must mess around with the general interrupt
+ * control register, which is relevant to much more than just the
+ * camera.  Nothing else uses interrupts, though, as of this writing.
+ * Should that situation change, we'll have to improve support at
+ * the via-core level.
+ */
+static void viacam_int_enable(struct via_camera *cam)
+{
+	viacam_write_reg(cam, VCR_INTCTRL,
+			VCR_IC_INTEN|VCR_IC_EAV|VCR_IC_EVBI|VCR_IC_FFULL);
+	viafb_irq_enable(VDE_I_C0AVEN);
+}
+
+static void viacam_int_disable(struct via_camera *cam)
+{
+	viafb_irq_disable(VDE_I_C0AVEN);
+	viacam_write_reg(cam, VCR_INTCTRL, 0);
+}
+
+
+
+/* --------------------------------------------------------------------------*/
+/* Controller operations */
+
+/*
+ * Set up our capture buffers in framebuffer memory.
+ */
+static int viacam_ctlr_cbufs(struct via_camera *cam)
+{
+	int nbuf = cam->viadev->camera_fbmem_size/cam->sensor_format.sizeimage;
+	int i;
+	unsigned int offset;
+
+	/*
+	 * See how many buffers we can work with.
+	 */
+	if (nbuf >= 3) {
+		cam->n_cap_bufs = 3;
+		viacam_write_reg_mask(cam, VCR_CAPINTC, VCR_CI_3BUFS,
+				VCR_CI_3BUFS);
+	} else if (nbuf == 2) {
+		cam->n_cap_bufs = 2;
+		viacam_write_reg_mask(cam, VCR_CAPINTC, 0, VCR_CI_3BUFS);
+	} else {
+		cam_warn(cam, "Insufficient frame buffer memory\n");
+		return -ENOMEM;
+	}
+	/*
+	 * Set them up.
+	 */
+	offset = cam->fb_offset;
+	for (i = 0; i < cam->n_cap_bufs; i++) {
+		cam->cb_offsets[i] = offset;
+		cam->cb_addrs[i] = cam->fbmem + offset;
+		viacam_write_reg(cam, VCR_VBUF1 + i*4, offset & VCR_VBUF_MASK);
+		offset += cam->sensor_format.sizeimage;
+	}
+	return 0;
+}
+
+/*
+ * Set the scaling register for downscaling the image.
+ *
+ * This register works like this...  Vertical scaling is enabled
+ * by bit 26; if that bit is set, downscaling is controlled by the
+ * value in bits 16:25.	 Those bits are divided by 1024 to get
+ * the scaling factor; setting just bit 25 thus cuts the height
+ * in half.
+ *
+ * Horizontal scaling works about the same, but it's enabled by
+ * bit 11, with bits 0:10 giving the numerator of a fraction
+ * (over 2048) for the scaling value.
+ *
+ * This function is naive in that, if the user departs from
+ * the 3x4 VGA scaling factor, the image will distort.	We
+ * could work around that if it really seemed important.
+ */
+static void viacam_set_scale(struct via_camera *cam)
+{
+	unsigned int avscale;
+	int sf;
+
+	if (cam->user_format.width == VGA_WIDTH)
+		avscale = 0;
+	else {
+		sf = (cam->user_format.width*2048)/VGA_WIDTH;
+		avscale = VCR_AVS_HEN | sf;
+	}
+	if (cam->user_format.height < VGA_HEIGHT) {
+		sf = (1024*cam->user_format.height)/VGA_HEIGHT;
+		avscale |= VCR_AVS_VEN | (sf << 16);
+	}
+	viacam_write_reg(cam, VCR_AVSCALE, avscale);
+}
+
+
+/*
+ * Configure image-related information into the capture engine.
+ */
+static void viacam_ctlr_image(struct via_camera *cam)
+{
+	int cicreg;
+
+	/*
+	 * Disable clock before messing with stuff - from the via
+	 * sample driver.
+	 */
+	viacam_write_reg(cam, VCR_CAPINTC, ~(VCR_CI_ENABLE|VCR_CI_CLKEN));
+	/*
+	 * Set up the controller for VGA resolution, modulo magic
+	 * offsets from the via sample driver.
+	 */
+	viacam_write_reg(cam, VCR_HORRANGE, 0x06200120);
+	viacam_write_reg(cam, VCR_VERTRANGE, 0x01de0000);
+	viacam_set_scale(cam);
+	/*
+	 * Image size info.
+	 */
+	viacam_write_reg(cam, VCR_MAXDATA,
+			(cam->sensor_format.height << 16) |
+			(cam->sensor_format.bytesperline >> 3));
+	viacam_write_reg(cam, VCR_MAXVBI, 0);
+	viacam_write_reg(cam, VCR_VSTRIDE,
+			cam->user_format.bytesperline & VCR_VS_STRIDE);
+	/*
+	 * Set up the capture interface control register,
+	 * everything but the "go" bit.
+	 *
+	 * The FIFO threshold is a bit of a magic number; 8 is what
+	 * VIA's sample code uses.
+	 */
+	cicreg = VCR_CI_CLKEN |
+		0x08000000 |		/* FIFO threshold */
+		VCR_CI_FLDINV |		/* OLPC-specific? */
+		VCR_CI_VREFINV |	/* OLPC-specific? */
+		VCR_CI_DIBOTH |		/* Capture both fields */
+		VCR_CI_CCIR601_8;
+	if (cam->n_cap_bufs == 3)
+		cicreg |= VCR_CI_3BUFS;
+	/*
+	 * YUV formats need different byte swapping than RGB.
+	 */
+	if (cam->user_format.pixelformat == V4L2_PIX_FMT_YUYV)
+		cicreg |= VCR_CI_YUYV;
+	else
+		cicreg |= VCR_CI_UYVY;
+	viacam_write_reg(cam, VCR_CAPINTC, cicreg);
+}
+
+
+static int viacam_config_controller(struct via_camera *cam)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cam->viadev->reg_lock, flags);
+	ret = viacam_ctlr_cbufs(cam);
+	if (!ret)
+		viacam_ctlr_image(cam);
+	spin_unlock_irqrestore(&cam->viadev->reg_lock, flags);
+	clear_bit(CF_CONFIG_NEEDED, &cam->flags);
+	return ret;
+}
+
+/*
+ * Make it start grabbing data.
+ */
+static void viacam_start_engine(struct via_camera *cam)
+{
+	spin_lock_irq(&cam->viadev->reg_lock);
+	cam->next_buf = 0;
+	viacam_write_reg_mask(cam, VCR_CAPINTC, VCR_CI_ENABLE, VCR_CI_ENABLE);
+	viacam_int_enable(cam);
+	(void) viacam_read_reg(cam, VCR_CAPINTC); /* Force post */
+	cam->opstate = S_RUNNING;
+	spin_unlock_irq(&cam->viadev->reg_lock);
+}
+
+
+static void viacam_stop_engine(struct via_camera *cam)
+{
+	spin_lock_irq(&cam->viadev->reg_lock);
+	viacam_int_disable(cam);
+	viacam_write_reg_mask(cam, VCR_CAPINTC, 0, VCR_CI_ENABLE);
+	(void) viacam_read_reg(cam, VCR_CAPINTC); /* Force post */
+	cam->opstate = S_IDLE;
+	spin_unlock_irq(&cam->viadev->reg_lock);
+}
+
+
+/* --------------------------------------------------------------------------*/
+/* Videobuf callback ops */
+
+/*
+ * buffer_setup.  The purpose of this one would appear to be to tell
+ * videobuf how big a single image is.	It's also evidently up to us
+ * to put some sort of limit on the maximum number of buffers allowed.
+ */
+static int viacam_vb_buf_setup(struct videobuf_queue *q,
+		unsigned int *count, unsigned int *size)
+{
+	struct via_camera *cam = q->priv_data;
+
+	*size = cam->user_format.sizeimage;
+	if (*count == 0 || *count > 6)	/* Arbitrary number */
+		*count = 6;
+	return 0;
+}
+
+/*
+ * Prepare a buffer.
+ */
+static int viacam_vb_buf_prepare(struct videobuf_queue *q,
+		struct videobuf_buffer *vb, enum v4l2_field field)
+{
+	struct via_camera *cam = q->priv_data;
+
+	vb->size = cam->user_format.sizeimage;
+	vb->width = cam->user_format.width; /* bytesperline???? */
+	vb->height = cam->user_format.height;
+	vb->field = field;
+	if (vb->state == VIDEOBUF_NEEDS_INIT) {
+		int ret = videobuf_iolock(q, vb, NULL);
+		if (ret)
+			return ret;
+	}
+	vb->state = VIDEOBUF_PREPARED;
+	return 0;
+}
+
+/*
+ * We've got a buffer to put data into.
+ *
+ * FIXME: check for a running engine and valid buffers?
+ */
+static void viacam_vb_buf_queue(struct videobuf_queue *q,
+		struct videobuf_buffer *vb)
+{
+	struct via_camera *cam = q->priv_data;
+
+	/*
+	 * Note that videobuf holds the lock when it calls
+	 * us, so we need not (indeed, cannot) take it here.
+	 */
+	vb->state = VIDEOBUF_QUEUED;
+	list_add_tail(&vb->queue, &cam->buffer_queue);
+}
+
+/*
+ * Free a buffer.
+ */
+static void viacam_vb_buf_release(struct videobuf_queue *q,
+		struct videobuf_buffer *vb)
+{
+	struct via_camera *cam = q->priv_data;
+
+	videobuf_dma_unmap(&cam->platdev->dev, videobuf_to_dma(vb));
+	videobuf_dma_free(videobuf_to_dma(vb));
+	vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static const struct videobuf_queue_ops viacam_vb_ops = {
+	.buf_setup	= viacam_vb_buf_setup,
+	.buf_prepare	= viacam_vb_buf_prepare,
+	.buf_queue	= viacam_vb_buf_queue,
+	.buf_release	= viacam_vb_buf_release,
+};
+
+/* --------------------------------------------------------------------------*/
+/* File operations */
+
+static int viacam_open(struct file *filp)
+{
+	struct via_camera *cam = video_drvdata(filp);
+
+	filp->private_data = cam;
+	/*
+	 * Note the new user.  If this is the first one, we'll also
+	 * need to power up the sensor.
+	 */
+	mutex_lock(&cam->lock);
+	if (cam->users == 0) {
+		int ret = viafb_request_dma();
+
+		if (ret) {
+			mutex_unlock(&cam->lock);
+			return ret;
+		}
+		via_sensor_power_up(cam);
+		set_bit(CF_CONFIG_NEEDED, &cam->flags);
+		/*
+		 * Hook into videobuf.	Evidently this cannot fail.
+		 */
+		videobuf_queue_sg_init(&cam->vb_queue, &viacam_vb_ops,
+				&cam->platdev->dev, &cam->viadev->reg_lock,
+				V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+				sizeof(struct videobuf_buffer), cam, NULL);
+	}
+	(cam->users)++;
+	mutex_unlock(&cam->lock);
+	return 0;
+}
+
+static int viacam_release(struct file *filp)
+{
+	struct via_camera *cam = video_drvdata(filp);
+
+	mutex_lock(&cam->lock);
+	(cam->users)--;
+	/*
+	 * If the "owner" is closing, shut down any ongoing
+	 * operations.
+	 */
+	if (filp == cam->owner) {
+		videobuf_stop(&cam->vb_queue);
+		/*
+		 * We don't hold the spinlock here, but, if release()
+		 * is being called by the owner, nobody else will
+		 * be changing the state.  And an extra stop would
+		 * not hurt anyway.
+		 */
+		if (cam->opstate != S_IDLE)
+			viacam_stop_engine(cam);
+		cam->owner = NULL;
+	}
+	/*
+	 * Last one out needs to turn out the lights.
+	 */
+	if (cam->users == 0) {
+		videobuf_mmap_free(&cam->vb_queue);
+		via_sensor_power_down(cam);
+		viafb_release_dma();
+	}
+	mutex_unlock(&cam->lock);
+	return 0;
+}
+
+/*
+ * Read a frame from the device.
+ */
+static ssize_t viacam_read(struct file *filp, char __user *buffer,
+		size_t len, loff_t *pos)
+{
+	struct via_camera *cam = video_drvdata(filp);
+	int ret;
+
+	mutex_lock(&cam->lock);
+	/*
+	 * Enforce the V4l2 "only one owner gets to read data" rule.
+	 */
+	if (cam->owner && cam->owner != filp) {
+		ret = -EBUSY;
+		goto out_unlock;
+	}
+	cam->owner = filp;
+	/*
+	 * Do we need to configure the hardware?
+	 */
+	if (test_bit(CF_CONFIG_NEEDED, &cam->flags)) {
+		ret = viacam_configure_sensor(cam);
+		if (!ret)
+			ret = viacam_config_controller(cam);
+		if (ret)
+			goto out_unlock;
+	}
+	/*
+	 * Fire up the capture engine, then have videobuf do
+	 * the heavy lifting.  Someday it would be good to avoid
+	 * stopping and restarting the engine each time.
+	 */
+	INIT_LIST_HEAD(&cam->buffer_queue);
+	viacam_start_engine(cam);
+	ret = videobuf_read_stream(&cam->vb_queue, buffer, len, pos, 0,
+			filp->f_flags & O_NONBLOCK);
+	viacam_stop_engine(cam);
+	/* videobuf_stop() ?? */
+
+out_unlock:
+	mutex_unlock(&cam->lock);
+	return ret;
+}
+
+
+static unsigned int viacam_poll(struct file *filp, struct poll_table_struct *pt)
+{
+	struct via_camera *cam = video_drvdata(filp);
+
+	return videobuf_poll_stream(filp, &cam->vb_queue, pt);
+}
+
+
+static int viacam_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct via_camera *cam = video_drvdata(filp);
+
+	return videobuf_mmap_mapper(&cam->vb_queue, vma);
+}
+
+
+
+static const struct v4l2_file_operations viacam_fops = {
+	.owner		= THIS_MODULE,
+	.open		= viacam_open,
+	.release	= viacam_release,
+	.read		= viacam_read,
+	.poll		= viacam_poll,
+	.mmap		= viacam_mmap,
+	.unlocked_ioctl	= video_ioctl2,
+};
+
+/*----------------------------------------------------------------------------*/
+/*
+ * The long list of v4l2 ioctl ops
+ */
+
+static int viacam_g_chip_ident(struct file *file, void *priv,
+		struct v4l2_dbg_chip_ident *ident)
+{
+	struct via_camera *cam = priv;
+
+	ident->ident = V4L2_IDENT_NONE;
+	ident->revision = 0;
+	if (v4l2_chip_match_host(&ident->match)) {
+		ident->ident = V4L2_IDENT_VIA_VX855;
+		return 0;
+	}
+	return sensor_call(cam, core, g_chip_ident, ident);
+}
+
+/*
+ * Control ops are passed through to the sensor.
+ */
+static int viacam_queryctrl(struct file *filp, void *priv,
+		struct v4l2_queryctrl *qc)
+{
+	struct via_camera *cam = priv;
+	int ret;
+
+	mutex_lock(&cam->lock);
+	ret = sensor_call(cam, core, queryctrl, qc);
+	mutex_unlock(&cam->lock);
+	return ret;
+}
+
+
+static int viacam_g_ctrl(struct file *filp, void *priv,
+		struct v4l2_control *ctrl)
+{
+	struct via_camera *cam = priv;
+	int ret;
+
+	mutex_lock(&cam->lock);
+	ret = sensor_call(cam, core, g_ctrl, ctrl);
+	mutex_unlock(&cam->lock);
+	return ret;
+}
+
+
+static int viacam_s_ctrl(struct file *filp, void *priv,
+		struct v4l2_control *ctrl)
+{
+	struct via_camera *cam = priv;
+	int ret;
+
+	mutex_lock(&cam->lock);
+	ret = sensor_call(cam, core, s_ctrl, ctrl);
+	mutex_unlock(&cam->lock);
+	return ret;
+}
+
+/*
+ * Only one input.
+ */
+static int viacam_enum_input(struct file *filp, void *priv,
+		struct v4l2_input *input)
+{
+	if (input->index != 0)
+		return -EINVAL;
+
+	input->type = V4L2_INPUT_TYPE_CAMERA;
+	input->std = V4L2_STD_ALL; /* Not sure what should go here */
+	strcpy(input->name, "Camera");
+	return 0;
+}
+
+static int viacam_g_input(struct file *filp, void *priv, unsigned int *i)
+{
+	*i = 0;
+	return 0;
+}
+
+static int viacam_s_input(struct file *filp, void *priv, unsigned int i)
+{
+	if (i != 0)
+		return -EINVAL;
+	return 0;
+}
+
+static int viacam_s_std(struct file *filp, void *priv, v4l2_std_id *std)
+{
+	return 0;
+}
+
+/*
+ * Video format stuff.	Here is our default format until
+ * user space messes with things.
+ */
+static const struct v4l2_pix_format viacam_def_pix_format = {
+	.width		= VGA_WIDTH,
+	.height		= VGA_HEIGHT,
+	.pixelformat	= V4L2_PIX_FMT_YUYV,
+	.field		= V4L2_FIELD_NONE,
+	.bytesperline	= VGA_WIDTH * 2,
+	.sizeimage	= VGA_WIDTH * VGA_HEIGHT * 2,
+};
+
+static const enum v4l2_mbus_pixelcode via_def_mbus_code = V4L2_MBUS_FMT_YUYV8_2X8;
+
+static int viacam_enum_fmt_vid_cap(struct file *filp, void *priv,
+		struct v4l2_fmtdesc *fmt)
+{
+	if (fmt->index >= N_VIA_FMTS)
+		return -EINVAL;
+	strlcpy(fmt->description, via_formats[fmt->index].desc,
+			sizeof(fmt->description));
+	fmt->pixelformat = via_formats[fmt->index].pixelformat;
+	return 0;
+}
+
+/*
+ * Figure out proper image dimensions, but always force the
+ * sensor to VGA.
+ */
+static void viacam_fmt_pre(struct v4l2_pix_format *userfmt,
+		struct v4l2_pix_format *sensorfmt)
+{
+	*sensorfmt = *userfmt;
+	if (userfmt->width < QCIF_WIDTH || userfmt->height < QCIF_HEIGHT) {
+		userfmt->width = QCIF_WIDTH;
+		userfmt->height = QCIF_HEIGHT;
+	}
+	if (userfmt->width > VGA_WIDTH || userfmt->height > VGA_HEIGHT) {
+		userfmt->width = VGA_WIDTH;
+		userfmt->height = VGA_HEIGHT;
+	}
+	sensorfmt->width = VGA_WIDTH;
+	sensorfmt->height = VGA_HEIGHT;
+}
+
+static void viacam_fmt_post(struct v4l2_pix_format *userfmt,
+		struct v4l2_pix_format *sensorfmt)
+{
+	struct via_format *f = via_find_format(userfmt->pixelformat);
+
+	sensorfmt->bytesperline = sensorfmt->width * f->bpp;
+	sensorfmt->sizeimage = sensorfmt->height * sensorfmt->bytesperline;
+	userfmt->pixelformat = sensorfmt->pixelformat;
+	userfmt->field = sensorfmt->field;
+	userfmt->bytesperline = 2 * userfmt->width;
+	userfmt->sizeimage = userfmt->bytesperline * userfmt->height;
+}
+
+
+/*
+ * The real work of figuring out a workable format.
+ */
+static int viacam_do_try_fmt(struct via_camera *cam,
+		struct v4l2_pix_format *upix, struct v4l2_pix_format *spix)
+{
+	int ret;
+	struct v4l2_mbus_framefmt mbus_fmt;
+	struct via_format *f = via_find_format(upix->pixelformat);
+
+	upix->pixelformat = f->pixelformat;
+	viacam_fmt_pre(upix, spix);
+	v4l2_fill_mbus_format(&mbus_fmt, spix, f->mbus_code);
+	ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
+	v4l2_fill_pix_format(spix, &mbus_fmt);
+	viacam_fmt_post(upix, spix);
+	return ret;
+}
+
+
+
+static int viacam_try_fmt_vid_cap(struct file *filp, void *priv,
+		struct v4l2_format *fmt)
+{
+	struct via_camera *cam = priv;
+	struct v4l2_format sfmt;
+	int ret;
+
+	mutex_lock(&cam->lock);
+	ret = viacam_do_try_fmt(cam, &fmt->fmt.pix, &sfmt.fmt.pix);
+	mutex_unlock(&cam->lock);
+	return ret;
+}
+
+
+static int viacam_g_fmt_vid_cap(struct file *filp, void *priv,
+		struct v4l2_format *fmt)
+{
+	struct via_camera *cam = priv;
+
+	mutex_lock(&cam->lock);
+	fmt->fmt.pix = cam->user_format;
+	mutex_unlock(&cam->lock);
+	return 0;
+}
+
+static int viacam_s_fmt_vid_cap(struct file *filp, void *priv,
+		struct v4l2_format *fmt)
+{
+	struct via_camera *cam = priv;
+	int ret;
+	struct v4l2_format sfmt;
+	struct via_format *f = via_find_format(fmt->fmt.pix.pixelformat);
+
+	/*
+	 * Camera must be idle or we can't mess with the
+	 * video setup.
+	 */
+	mutex_lock(&cam->lock);
+	if (cam->opstate != S_IDLE) {
+		ret = -EBUSY;
+		goto out;
+	}
+	/*
+	 * Let the sensor code look over and tweak the
+	 * requested formatting.
+	 */
+	ret = viacam_do_try_fmt(cam, &fmt->fmt.pix, &sfmt.fmt.pix);
+	if (ret)
+		goto out;
+	/*
+	 * OK, let's commit to the new format.
+	 */
+	cam->user_format = fmt->fmt.pix;
+	cam->sensor_format = sfmt.fmt.pix;
+	cam->mbus_code = f->mbus_code;
+	ret = viacam_configure_sensor(cam);
+	if (!ret)
+		ret = viacam_config_controller(cam);
+out:
+	mutex_unlock(&cam->lock);
+	return ret;
+}
+
+static int viacam_querycap(struct file *filp, void *priv,
+		struct v4l2_capability *cap)
+{
+	strcpy(cap->driver, "via-camera");
+	strcpy(cap->card, "via-camera");
+	cap->version = 1;
+	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+		V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+	return 0;
+}
+
+/*
+ * Streaming operations - pure videobuf stuff.
+ */
+static int viacam_reqbufs(struct file *filp, void *priv,
+		struct v4l2_requestbuffers *rb)
+{
+	struct via_camera *cam = priv;
+
+	return videobuf_reqbufs(&cam->vb_queue, rb);
+}
+
+static int viacam_querybuf(struct file *filp, void *priv,
+		struct v4l2_buffer *buf)
+{
+	struct via_camera *cam = priv;
+
+	return videobuf_querybuf(&cam->vb_queue, buf);
+}
+
+static int viacam_qbuf(struct file *filp, void *priv, struct v4l2_buffer *buf)
+{
+	struct via_camera *cam = priv;
+
+	return videobuf_qbuf(&cam->vb_queue, buf);
+}
+
+static int viacam_dqbuf(struct file *filp, void *priv, struct v4l2_buffer *buf)
+{
+	struct via_camera *cam = priv;
+
+	return videobuf_dqbuf(&cam->vb_queue, buf, filp->f_flags & O_NONBLOCK);
+}
+
+static int viacam_streamon(struct file *filp, void *priv, enum v4l2_buf_type t)
+{
+	struct via_camera *cam = priv;
+	int ret = 0;
+
+	if (t != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	mutex_lock(&cam->lock);
+	if (cam->opstate != S_IDLE) {
+		ret = -EBUSY;
+		goto out;
+	}
+	/*
+	 * Enforce the V4l2 "only one owner gets to read data" rule.
+	 */
+	if (cam->owner && cam->owner != filp) {
+		ret = -EBUSY;
+		goto out;
+	}
+	cam->owner = filp;
+	/*
+	 * Configure things if need be.
+	 */
+	if (test_bit(CF_CONFIG_NEEDED, &cam->flags)) {
+		ret = viacam_configure_sensor(cam);
+		if (ret)
+			goto out;
+		ret = viacam_config_controller(cam);
+		if (ret)
+			goto out;
+	}
+	/*
+	 * If the CPU goes into C3, the DMA transfer gets corrupted and
+	 * users start filing unsightly bug reports.  Put in a "latency"
+	 * requirement which will keep the CPU out of the deeper sleep
+	 * states.
+	 */
+	pm_qos_add_request(&cam->qos_request, PM_QOS_CPU_DMA_LATENCY, 50);
+	/*
+	 * Fire things up.
+	 */
+	INIT_LIST_HEAD(&cam->buffer_queue);
+	ret = videobuf_streamon(&cam->vb_queue);
+	if (!ret)
+		viacam_start_engine(cam);
+out:
+	mutex_unlock(&cam->lock);
+	return ret;
+}
+
+static int viacam_streamoff(struct file *filp, void *priv, enum v4l2_buf_type t)
+{
+	struct via_camera *cam = priv;
+	int ret;
+
+	if (t != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+	mutex_lock(&cam->lock);
+	if (cam->opstate != S_RUNNING) {
+		ret = -EINVAL;
+		goto out;
+	}
+	pm_qos_remove_request(&cam->qos_request);
+	viacam_stop_engine(cam);
+	/*
+	 * Videobuf will recycle all of the outstanding buffers, but
+	 * we should be sure we don't retain any references to
+	 * any of them.
+	 */
+	ret = videobuf_streamoff(&cam->vb_queue);
+	INIT_LIST_HEAD(&cam->buffer_queue);
+out:
+	mutex_unlock(&cam->lock);
+	return ret;
+}
+
+/* G/S_PARM */
+
+static int viacam_g_parm(struct file *filp, void *priv,
+		struct v4l2_streamparm *parm)
+{
+	struct via_camera *cam = priv;
+	int ret;
+
+	mutex_lock(&cam->lock);
+	ret = sensor_call(cam, video, g_parm, parm);
+	mutex_unlock(&cam->lock);
+	parm->parm.capture.readbuffers = cam->n_cap_bufs;
+	return ret;
+}
+
+static int viacam_s_parm(struct file *filp, void *priv,
+		struct v4l2_streamparm *parm)
+{
+	struct via_camera *cam = priv;
+	int ret;
+
+	mutex_lock(&cam->lock);
+	ret = sensor_call(cam, video, s_parm, parm);
+	mutex_unlock(&cam->lock);
+	parm->parm.capture.readbuffers = cam->n_cap_bufs;
+	return ret;
+}
+
+static int viacam_enum_framesizes(struct file *filp, void *priv,
+		struct v4l2_frmsizeenum *sizes)
+{
+	if (sizes->index != 0)
+		return -EINVAL;
+	sizes->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+	sizes->stepwise.min_width = QCIF_WIDTH;
+	sizes->stepwise.min_height = QCIF_HEIGHT;
+	sizes->stepwise.max_width = VGA_WIDTH;
+	sizes->stepwise.max_height = VGA_HEIGHT;
+	sizes->stepwise.step_width = sizes->stepwise.step_height = 1;
+	return 0;
+}
+
+static int viacam_enum_frameintervals(struct file *filp, void *priv,
+		struct v4l2_frmivalenum *interval)
+{
+	struct via_camera *cam = priv;
+	int ret;
+
+	mutex_lock(&cam->lock);
+	ret = sensor_call(cam, video, enum_frameintervals, interval);
+	mutex_unlock(&cam->lock);
+	return ret;
+}
+
+
+
+static const struct v4l2_ioctl_ops viacam_ioctl_ops = {
+	.vidioc_g_chip_ident	= viacam_g_chip_ident,
+	.vidioc_queryctrl	= viacam_queryctrl,
+	.vidioc_g_ctrl		= viacam_g_ctrl,
+	.vidioc_s_ctrl		= viacam_s_ctrl,
+	.vidioc_enum_input	= viacam_enum_input,
+	.vidioc_g_input		= viacam_g_input,
+	.vidioc_s_input		= viacam_s_input,
+	.vidioc_s_std		= viacam_s_std,
+	.vidioc_enum_fmt_vid_cap = viacam_enum_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap = viacam_try_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap	= viacam_g_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap	= viacam_s_fmt_vid_cap,
+	.vidioc_querycap	= viacam_querycap,
+	.vidioc_reqbufs		= viacam_reqbufs,
+	.vidioc_querybuf	= viacam_querybuf,
+	.vidioc_qbuf		= viacam_qbuf,
+	.vidioc_dqbuf		= viacam_dqbuf,
+	.vidioc_streamon	= viacam_streamon,
+	.vidioc_streamoff	= viacam_streamoff,
+	.vidioc_g_parm		= viacam_g_parm,
+	.vidioc_s_parm		= viacam_s_parm,
+	.vidioc_enum_framesizes = viacam_enum_framesizes,
+	.vidioc_enum_frameintervals = viacam_enum_frameintervals,
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * Power management.
+ */
+#ifdef CONFIG_PM
+
+static int viacam_suspend(void *priv)
+{
+	struct via_camera *cam = priv;
+	enum viacam_opstate state = cam->opstate;
+
+	if (cam->opstate != S_IDLE) {
+		viacam_stop_engine(cam);
+		cam->opstate = state; /* So resume restarts */
+	}
+
+	return 0;
+}
+
+static int viacam_resume(void *priv)
+{
+	struct via_camera *cam = priv;
+	int ret = 0;
+
+	/*
+	 * Get back to a reasonable operating state.
+	 */
+	via_write_reg_mask(VIASR, 0x78, 0, 0x80);
+	via_write_reg_mask(VIASR, 0x1e, 0xc0, 0xc0);
+	viacam_int_disable(cam);
+	set_bit(CF_CONFIG_NEEDED, &cam->flags);
+	/*
+	 * Make sure the sensor's power state is correct
+	 */
+	if (cam->users > 0)
+		via_sensor_power_up(cam);
+	else
+		via_sensor_power_down(cam);
+	/*
+	 * If it was operating, try to restart it.
+	 */
+	if (cam->opstate != S_IDLE) {
+		mutex_lock(&cam->lock);
+		ret = viacam_configure_sensor(cam);
+		if (!ret)
+			ret = viacam_config_controller(cam);
+		mutex_unlock(&cam->lock);
+		if (!ret)
+			viacam_start_engine(cam);
+	}
+
+	return ret;
+}
+
+static struct viafb_pm_hooks viacam_pm_hooks = {
+	.suspend = viacam_suspend,
+	.resume = viacam_resume
+};
+
+#endif /* CONFIG_PM */
+
+/*
+ * Setup stuff.
+ */
+
+static struct video_device viacam_v4l_template = {
+	.name		= "via-camera",
+	.minor		= -1,
+	.tvnorms	= V4L2_STD_NTSC_M,
+	.current_norm	= V4L2_STD_NTSC_M,
+	.fops		= &viacam_fops,
+	.ioctl_ops	= &viacam_ioctl_ops,
+	.release	= video_device_release_empty, /* Check this */
+};
+
+/*
+ * The OLPC folks put the serial port on the same pin as
+ * the camera.	They also get grumpy if we break the
+ * serial port and keep them from using it.  So we have
+ * to check the serial enable bit and not step on it.
+ */
+#define VIACAM_SERIAL_DEVFN 0x88
+#define VIACAM_SERIAL_CREG 0x46
+#define VIACAM_SERIAL_BIT 0x40
+
+static __devinit bool viacam_serial_is_enabled(void)
+{
+	struct pci_bus *pbus = pci_find_bus(0, 0);
+	u8 cbyte;
+
+	if (!pbus)
+		return false;
+	pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN,
+			VIACAM_SERIAL_CREG, &cbyte);
+	if ((cbyte & VIACAM_SERIAL_BIT) == 0)
+		return false; /* Not enabled */
+	if (override_serial == 0) {
+		printk(KERN_NOTICE "Via camera: serial port is enabled, " \
+				"refusing to load.\n");
+		printk(KERN_NOTICE "Specify override_serial=1 to force " \
+				"module loading.\n");
+		return true;
+	}
+	printk(KERN_NOTICE "Via camera: overriding serial port\n");
+	pci_bus_write_config_byte(pbus, VIACAM_SERIAL_DEVFN,
+			VIACAM_SERIAL_CREG, cbyte & ~VIACAM_SERIAL_BIT);
+	return false;
+}
+
+static struct ov7670_config sensor_cfg = {
+	/* The XO-1.5 (only known user) clocks the camera at 90MHz. */
+	.clock_speed = 90,
+};
+
+static __devinit int viacam_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct i2c_adapter *sensor_adapter;
+	struct viafb_dev *viadev = pdev->dev.platform_data;
+	struct i2c_board_info ov7670_info = {
+		.type = "ov7670",
+		.addr = 0x42 >> 1,
+		.platform_data = &sensor_cfg,
+	};
+
+	/*
+	 * Note that there are actually two capture channels on
+	 * the device.	We only deal with one for now.	That
+	 * is encoded here; nothing else assumes it's dealing with
+	 * a unique capture device.
+	 */
+	struct via_camera *cam;
+
+	/*
+	 * Ensure that frame buffer memory has been set aside for
+	 * this purpose.  As an arbitrary limit, refuse to work
+	 * with less than two frames of VGA 16-bit data.
+	 *
+	 * If we ever support the second port, we'll need to set
+	 * aside more memory.
+	 */
+	if (viadev->camera_fbmem_size < (VGA_HEIGHT*VGA_WIDTH*4)) {
+		printk(KERN_ERR "viacam: insufficient FB memory reserved\n");
+		return -ENOMEM;
+	}
+	if (viadev->engine_mmio == NULL) {
+		printk(KERN_ERR "viacam: No I/O memory, so no pictures\n");
+		return -ENOMEM;
+	}
+
+	if (machine_is_olpc() && viacam_serial_is_enabled())
+		return -EBUSY;
+
+	/*
+	 * Basic structure initialization.
+	 */
+	cam = kzalloc (sizeof(struct via_camera), GFP_KERNEL);
+	if (cam == NULL)
+		return -ENOMEM;
+	via_cam_info = cam;
+	cam->platdev = pdev;
+	cam->viadev = viadev;
+	cam->users = 0;
+	cam->owner = NULL;
+	cam->opstate = S_IDLE;
+	cam->user_format = cam->sensor_format = viacam_def_pix_format;
+	mutex_init(&cam->lock);
+	INIT_LIST_HEAD(&cam->buffer_queue);
+	cam->mmio = viadev->engine_mmio;
+	cam->fbmem = viadev->fbmem;
+	cam->fb_offset = viadev->camera_fbmem_offset;
+	cam->flags = 1 << CF_CONFIG_NEEDED;
+	cam->mbus_code = via_def_mbus_code;
+	/*
+	 * Tell V4L that we exist.
+	 */
+	ret = v4l2_device_register(&pdev->dev, &cam->v4l2_dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to register v4l2 device\n");
+		return ret;
+	}
+	/*
+	 * Convince the system that we can do DMA.
+	 */
+	pdev->dev.dma_mask = &viadev->pdev->dma_mask;
+	dma_set_mask(&pdev->dev, 0xffffffff);
+	/*
+	 * Fire up the capture port.  The write to 0x78 looks purely
+	 * OLPCish; any system will need to tweak 0x1e.
+	 */
+	via_write_reg_mask(VIASR, 0x78, 0, 0x80);
+	via_write_reg_mask(VIASR, 0x1e, 0xc0, 0xc0);
+	/*
+	 * Get the sensor powered up.
+	 */
+	ret = via_sensor_power_setup(cam);
+	if (ret)
+		goto out_unregister;
+	via_sensor_power_up(cam);
+
+	/*
+	 * See if we can't find it on the bus.	The VIA_PORT_31 assumption
+	 * is OLPC-specific.  0x42 assumption is ov7670-specific.
+	 */
+	sensor_adapter = viafb_find_i2c_adapter(VIA_PORT_31);
+	cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev, sensor_adapter,
+			&ov7670_info, NULL);
+	if (cam->sensor == NULL) {
+		dev_err(&pdev->dev, "Unable to find the sensor!\n");
+		ret = -ENODEV;
+		goto out_power_down;
+	}
+	/*
+	 * Get the IRQ.
+	 */
+	viacam_int_disable(cam);
+	ret = request_threaded_irq(viadev->pdev->irq, viacam_quick_irq,
+			viacam_irq, IRQF_SHARED, "via-camera", cam);
+	if (ret)
+		goto out_power_down;
+	/*
+	 * Tell V4l2 that we exist.
+	 */
+	cam->vdev = viacam_v4l_template;
+	cam->vdev.v4l2_dev = &cam->v4l2_dev;
+	ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
+	if (ret)
+		goto out_irq;
+	video_set_drvdata(&cam->vdev, cam);
+
+#ifdef CONFIG_PM
+	/*
+	 * Hook into PM events
+	 */
+	viacam_pm_hooks.private = cam;
+	viafb_pm_register(&viacam_pm_hooks);
+#endif
+
+	/* Power the sensor down until somebody opens the device */
+	via_sensor_power_down(cam);
+	return 0;
+
+out_irq:
+	free_irq(viadev->pdev->irq, cam);
+out_power_down:
+	via_sensor_power_release(cam);
+out_unregister:
+	v4l2_device_unregister(&cam->v4l2_dev);
+	return ret;
+}
+
+static __devexit int viacam_remove(struct platform_device *pdev)
+{
+	struct via_camera *cam = via_cam_info;
+	struct viafb_dev *viadev = pdev->dev.platform_data;
+
+	video_unregister_device(&cam->vdev);
+	v4l2_device_unregister(&cam->v4l2_dev);
+	free_irq(viadev->pdev->irq, cam);
+	via_sensor_power_release(cam);
+	via_cam_info = NULL;
+	return 0;
+}
+
+static struct platform_driver viacam_driver = {
+	.driver = {
+		.name = "viafb-camera",
+	},
+	.probe = viacam_probe,
+	.remove = viacam_remove,
+};
+
+module_platform_driver(viacam_driver);
diff --git a/drivers/media/platform/via-camera.h b/drivers/media/platform/via-camera.h
new file mode 100644
index 0000000..b12a4b3
--- /dev/null
+++ b/drivers/media/platform/via-camera.h
@@ -0,0 +1,93 @@
+/*
+ * VIA Camera register definitions.
+ */
+#define VCR_INTCTRL	0x300	/* Capture interrupt control */
+#define   VCR_IC_EAV	  0x0001   /* End of active video status */
+#define	  VCR_IC_EVBI	  0x0002   /* End of VBI status */
+#define   VCR_IC_FBOTFLD  0x0004   /* "flipping" Bottom field is active */
+#define   VCR_IC_ACTBUF	  0x0018   /* Active video buffer  */
+#define   VCR_IC_VSYNC	  0x0020   /* 0 = VB, 1 = active video */
+#define   VCR_IC_BOTFLD	  0x0040   /* Bottom field is active */
+#define   VCR_IC_FFULL	  0x0080   /* FIFO full */
+#define   VCR_IC_INTEN	  0x0100   /* End of active video int. enable */
+#define   VCR_IC_VBIINT	  0x0200   /* End of VBI int enable */
+#define   VCR_IC_VBIBUF	  0x0400   /* Current VBI buffer */
+
+#define VCR_TSC		0x308	/* Transport stream control */
+#define VCR_TSC_ENABLE    0x000001   /* Transport stream input enable */
+#define VCR_TSC_DROPERR   0x000002   /* Drop error packets */
+#define VCR_TSC_METHOD    0x00000c   /* DMA method (non-functional) */
+#define VCR_TSC_COUNT     0x07fff0   /* KByte or packet count */
+#define VCR_TSC_CBMODE	  0x080000   /* Change buffer by byte count */
+#define VCR_TSC_PSSIG	  0x100000   /* Packet starting signal disable */
+#define VCR_TSC_BE	  0x200000   /* MSB first (serial mode) */
+#define VCR_TSC_SERIAL	  0x400000   /* Serial input (0 = parallel) */
+
+#define VCR_CAPINTC	0x310	/* Capture interface control */
+#define   VCR_CI_ENABLE   0x00000001  /* Capture enable */
+#define   VCR_CI_BSS	  0x00000002  /* WTF "bit stream selection" */
+#define   VCR_CI_3BUFS	  0x00000004  /* 1 = 3 buffers, 0 = 2 buffers */
+#define   VCR_CI_VIPEN	  0x00000008  /* VIP enable */
+#define   VCR_CI_CCIR601_8  0	        /* CCIR601 input stream, 8 bit */
+#define   VCR_CI_CCIR656_8  0x00000010  /* ... CCIR656, 8 bit */
+#define   VCR_CI_CCIR601_16 0x00000020  /* ... CCIR601, 16 bit */
+#define   VCR_CI_CCIR656_16 0x00000030  /* ... CCIR656, 16 bit */
+#define   VCR_CI_HDMODE   0x00000040  /* CCIR656-16 hdr decode mode; 1=16b */
+#define   VCR_CI_BSWAP    0x00000080  /* Swap bytes (16-bit) */
+#define   VCR_CI_YUYV	  0	      /* Byte order 0123 */
+#define   VCR_CI_UYVY	  0x00000100  /* Byte order 1032 */
+#define   VCR_CI_YVYU	  0x00000200  /* Byte order 0321 */
+#define   VCR_CI_VYUY	  0x00000300  /* Byte order 3012 */
+#define   VCR_CI_VIPTYPE  0x00000400  /* VIP type */
+#define   VCR_CI_IFSEN    0x00000800  /* Input field signal enable */
+#define   VCR_CI_DIODD	  0	      /* De-interlace odd, 30fps */
+#define   VCR_CI_DIEVEN   0x00001000  /*    ...even field, 30fps */
+#define   VCR_CI_DIBOTH   0x00002000  /*    ...both fields, 60fps */
+#define   VCR_CI_DIBOTH30 0x00003000  /*    ...both fields, 30fps interlace */
+#define   VCR_CI_CONVTYPE 0x00004000  /* 4:2:2 to 4:4:4; 1 = interpolate */
+#define   VCR_CI_CFC	  0x00008000  /* Capture flipping control */
+#define   VCR_CI_FILTER   0x00070000  /* Horiz filter mode select
+					 000 = none
+					 001 = 2 tap
+					 010 = 3 tap
+					 011 = 4 tap
+					 100 = 5 tap */
+#define   VCR_CI_CLKINV   0x00080000  /* Input CLK inverted */
+#define   VCR_CI_VREFINV  0x00100000  /* VREF inverted */
+#define   VCR_CI_HREFINV  0x00200000  /* HREF inverted */
+#define   VCR_CI_FLDINV   0x00400000  /* Field inverted */
+#define   VCR_CI_CLKPIN	  0x00800000  /* Capture clock pin */
+#define   VCR_CI_THRESH   0x0f000000  /* Capture fifo threshold */
+#define   VCR_CI_HRLE     0x10000000  /* Positive edge of HREF */
+#define   VCR_CI_VRLE     0x20000000  /* Positive edge of VREF */
+#define   VCR_CI_OFLDINV  0x40000000  /* Field output inverted */
+#define   VCR_CI_CLKEN    0x80000000  /* Capture clock enable */
+
+#define VCR_HORRANGE	0x314	/* Active video horizontal range */
+#define VCR_VERTRANGE	0x318	/* Active video vertical range */
+#define VCR_AVSCALE	0x31c	/* Active video scaling control */
+#define   VCR_AVS_HEN	  0x00000800   /* Horizontal scale enable */
+#define   VCR_AVS_VEN	  0x04000000   /* Vertical enable */
+#define VCR_VBIHOR	0x320	/* VBI Data horizontal range */
+#define VCR_VBIVERT	0x324	/* VBI data vertical range */
+#define VCR_VBIBUF1	0x328	/* First VBI buffer */
+#define VCR_VBISTRIDE	0x32c	/* VBI stride */
+#define VCR_ANCDATACNT	0x330	/* Ancillary data count setting */
+#define VCR_MAXDATA	0x334	/* Active data count of active video */
+#define VCR_MAXVBI	0x338	/* Maximum data count of VBI */
+#define VCR_CAPDATA	0x33c	/* Capture data count */
+#define VCR_VBUF1	0x340	/* First video buffer */
+#define VCR_VBUF2	0x344	/* Second video buffer */
+#define VCR_VBUF3	0x348	/* Third video buffer */
+#define VCR_VBUF_MASK	0x1ffffff0	/* Bits 28:4 */
+#define VCR_VBIBUF2	0x34c	/* Second VBI buffer */
+#define VCR_VSTRIDE	0x350	/* Stride of video + coring control */
+#define   VCR_VS_STRIDE_SHIFT 4
+#define   VCR_VS_STRIDE   0x00001ff0  /* Stride (8-byte units) */
+#define   VCR_VS_CCD	  0x007f0000  /* Coring compare data */
+#define   VCR_VS_COREEN   0x00800000  /* Coring enable */
+#define VCR_TS0ERR	0x354	/* TS buffer 0 error indicator */
+#define VCR_TS1ERR	0x358	/* TS buffer 0 error indicator */
+#define VCR_TS2ERR	0x35c	/* TS buffer 0 error indicator */
+
+/* Add 0x1000 for the second capture engine registers */
diff --git a/drivers/media/platform/vino.c b/drivers/media/platform/vino.c
new file mode 100644
index 0000000..aae1720
--- /dev/null
+++ b/drivers/media/platform/vino.c
@@ -0,0 +1,4349 @@
+/*
+ * Driver for the VINO (Video In No Out) system found in SGI Indys.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License version 2 as published by the Free Software Foundation.
+ *
+ * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
+ *
+ * Based on the previous version of the driver for 2.4 kernels by:
+ * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
+ *
+ * v4l2_device/v4l2_subdev conversion by:
+ * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * Note: this conversion is untested! Please contact the linux-media
+ * mailinglist if you can test this, together with the test results.
+ */
+
+/*
+ * TODO:
+ * - remove "mark pages reserved-hacks" from memory allocation code
+ *   and implement fault()
+ * - check decimation, calculating and reporting image size when
+ *   using decimation
+ * - implement read(), user mode buffers and overlay (?)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/time.h>
+#include <linux/kmod.h>
+
+#include <linux/i2c.h>
+
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <linux/mutex.h>
+
+#include <asm/paccess.h>
+#include <asm/io.h>
+#include <asm/sgi/ip22.h>
+#include <asm/sgi/mc.h>
+
+#include "vino.h"
+#include "saa7191.h"
+#include "indycam.h"
+
+/* Uncomment the following line to get lots and lots of (mostly useless)
+ * debug info.
+ * Note that the debug output also slows down the driver significantly */
+// #define VINO_DEBUG
+// #define VINO_DEBUG_INT
+
+#define VINO_MODULE_VERSION "0.0.7"
+
+MODULE_DESCRIPTION("SGI VINO Video4Linux2 driver");
+MODULE_VERSION(VINO_MODULE_VERSION);
+MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>");
+MODULE_LICENSE("GPL");
+
+#ifdef VINO_DEBUG
+#define dprintk(x...) printk("VINO: " x);
+#else
+#define dprintk(x...)
+#endif
+
+#define VINO_NO_CHANNEL			0
+#define VINO_CHANNEL_A			1
+#define VINO_CHANNEL_B			2
+
+#define VINO_PAL_WIDTH			768
+#define VINO_PAL_HEIGHT			576
+#define VINO_NTSC_WIDTH			640
+#define VINO_NTSC_HEIGHT		480
+
+#define VINO_MIN_WIDTH			32
+#define VINO_MIN_HEIGHT			32
+
+#define VINO_CLIPPING_START_ODD_D1	1
+#define VINO_CLIPPING_START_ODD_PAL	15
+#define VINO_CLIPPING_START_ODD_NTSC	12
+
+#define VINO_CLIPPING_START_EVEN_D1	2
+#define VINO_CLIPPING_START_EVEN_PAL	15
+#define VINO_CLIPPING_START_EVEN_NTSC	12
+
+#define VINO_INPUT_CHANNEL_COUNT	3
+
+/* the number is the index for vino_inputs */
+#define VINO_INPUT_NONE			-1
+#define VINO_INPUT_COMPOSITE		0
+#define VINO_INPUT_SVIDEO		1
+#define VINO_INPUT_D1			2
+
+#define VINO_PAGE_RATIO			(PAGE_SIZE / VINO_PAGE_SIZE)
+
+#define VINO_FIFO_THRESHOLD_DEFAULT	16
+
+#define VINO_FRAMEBUFFER_SIZE		((VINO_PAL_WIDTH \
+					  * VINO_PAL_HEIGHT * 4 \
+					  + 3 * PAGE_SIZE) & ~(PAGE_SIZE - 1))
+
+#define VINO_FRAMEBUFFER_COUNT_MAX	8
+
+#define VINO_FRAMEBUFFER_UNUSED		0
+#define VINO_FRAMEBUFFER_IN_USE		1
+#define VINO_FRAMEBUFFER_READY		2
+
+#define VINO_QUEUE_ERROR		-1
+#define VINO_QUEUE_MAGIC		0x20050125
+
+#define VINO_MEMORY_NONE		0
+#define VINO_MEMORY_MMAP		1
+#define VINO_MEMORY_USERPTR		2
+
+#define VINO_DUMMY_DESC_COUNT		4
+#define VINO_DESC_FETCH_DELAY		5	/* microseconds */
+
+#define VINO_MAX_FRAME_SKIP_COUNT	128
+
+/* the number is the index for vino_data_formats */
+#define VINO_DATA_FMT_NONE		-1
+#define VINO_DATA_FMT_GREY		0
+#define VINO_DATA_FMT_RGB332		1
+#define VINO_DATA_FMT_RGB32		2
+#define VINO_DATA_FMT_YUV		3
+
+#define VINO_DATA_FMT_COUNT		4
+
+/* the number is the index for vino_data_norms */
+#define VINO_DATA_NORM_NONE		-1
+#define VINO_DATA_NORM_NTSC		0
+#define VINO_DATA_NORM_PAL		1
+#define VINO_DATA_NORM_SECAM		2
+#define VINO_DATA_NORM_D1		3
+
+#define VINO_DATA_NORM_COUNT		4
+
+/* I2C controller flags */
+#define SGI_I2C_FORCE_IDLE		(0 << 0)
+#define SGI_I2C_NOT_IDLE		(1 << 0)
+#define SGI_I2C_WRITE			(0 << 1)
+#define SGI_I2C_READ			(1 << 1)
+#define SGI_I2C_RELEASE_BUS		(0 << 2)
+#define SGI_I2C_HOLD_BUS		(1 << 2)
+#define SGI_I2C_XFER_DONE		(0 << 4)
+#define SGI_I2C_XFER_BUSY		(1 << 4)
+#define SGI_I2C_ACK			(0 << 5)
+#define SGI_I2C_NACK			(1 << 5)
+#define SGI_I2C_BUS_OK			(0 << 7)
+#define SGI_I2C_BUS_ERR			(1 << 7)
+
+/* Internal data structure definitions */
+
+struct vino_input {
+	char *name;
+	v4l2_std_id std;
+};
+
+struct vino_clipping {
+	unsigned int left, right, top, bottom;
+};
+
+struct vino_data_format {
+	/* the description */
+	char *description;
+	/* bytes per pixel */
+	unsigned int bpp;
+	/* V4L2 fourcc code */
+	__u32 pixelformat;
+	/* V4L2 colorspace (duh!) */
+	enum v4l2_colorspace colorspace;
+};
+
+struct vino_data_norm {
+	char *description;
+	unsigned int width, height;
+	struct vino_clipping odd;
+	struct vino_clipping even;
+
+	v4l2_std_id std;
+	unsigned int fps_min, fps_max;
+	__u32 framelines;
+};
+
+struct vino_descriptor_table {
+	/* the number of PAGE_SIZE sized pages in the buffer */
+	unsigned int page_count;
+	/* virtual (kmalloc'd) pointers to the actual data
+	 * (in PAGE_SIZE chunks, used with mmap streaming) */
+	unsigned long *virtual;
+
+	/* cpu address for the VINO descriptor table
+	 * (contains DMA addresses, VINO_PAGE_SIZE chunks) */
+	unsigned long *dma_cpu;
+	/* dma address for the VINO descriptor table
+	 * (contains DMA addresses, VINO_PAGE_SIZE chunks) */
+	dma_addr_t dma;
+};
+
+struct vino_framebuffer {
+	/* identifier nubmer */
+	unsigned int id;
+	/* the length of the whole buffer */
+	unsigned int size;
+	/* the length of actual data in buffer */
+	unsigned int data_size;
+	/* the data format */
+	unsigned int data_format;
+	/* the state of buffer data */
+	unsigned int state;
+	/* is the buffer mapped in user space? */
+	unsigned int map_count;
+	/* memory offset for mmap() */
+	unsigned int offset;
+	/* frame counter */
+	unsigned int frame_counter;
+	/* timestamp (written when image capture finishes) */
+	struct timeval timestamp;
+
+	struct vino_descriptor_table desc_table;
+
+	spinlock_t state_lock;
+};
+
+struct vino_framebuffer_fifo {
+	unsigned int length;
+
+	unsigned int used;
+	unsigned int head;
+	unsigned int tail;
+
+	unsigned int data[VINO_FRAMEBUFFER_COUNT_MAX];
+};
+
+struct vino_framebuffer_queue {
+	unsigned int magic;
+
+	/* VINO_MEMORY_NONE, VINO_MEMORY_MMAP or VINO_MEMORY_USERPTR */
+	unsigned int type;
+	unsigned int length;
+
+	/* data field of in and out contain index numbers for buffer */
+	struct vino_framebuffer_fifo in;
+	struct vino_framebuffer_fifo out;
+
+	struct vino_framebuffer *buffer[VINO_FRAMEBUFFER_COUNT_MAX];
+
+	spinlock_t queue_lock;
+	struct mutex queue_mutex;
+	wait_queue_head_t frame_wait_queue;
+};
+
+struct vino_interrupt_data {
+	struct timeval timestamp;
+	unsigned int frame_counter;
+	unsigned int skip_count;
+	unsigned int skip;
+};
+
+struct vino_channel_settings {
+	unsigned int channel;
+
+	int input;
+	unsigned int data_format;
+	unsigned int data_norm;
+	struct vino_clipping clipping;
+	unsigned int decimation;
+	unsigned int line_size;
+	unsigned int alpha;
+	unsigned int fps;
+	unsigned int framert_reg;
+
+	unsigned int fifo_threshold;
+
+	struct vino_framebuffer_queue fb_queue;
+
+	/* number of the current field */
+	unsigned int field;
+
+	/* read in progress */
+	int reading;
+	/* streaming is active */
+	int streaming;
+	/* the driver is currently processing the queue */
+	int capturing;
+
+	struct mutex mutex;
+	spinlock_t capture_lock;
+
+	unsigned int users;
+
+	struct vino_interrupt_data int_data;
+
+	/* V4L support */
+	struct video_device *vdev;
+};
+
+struct vino_settings {
+	struct v4l2_device v4l2_dev;
+	struct vino_channel_settings a;
+	struct vino_channel_settings b;
+
+	/* the channel which owns this client:
+	 * VINO_NO_CHANNEL, VINO_CHANNEL_A or VINO_CHANNEL_B */
+	unsigned int decoder_owner;
+	struct v4l2_subdev *decoder;
+	unsigned int camera_owner;
+	struct v4l2_subdev *camera;
+
+	/* a lock for vino register access */
+	spinlock_t vino_lock;
+	/* a lock for channel input changes */
+	spinlock_t input_lock;
+
+	unsigned long dummy_page;
+	struct vino_descriptor_table dummy_desc_table;
+};
+
+/* Module parameters */
+
+/*
+ * Using vino_pixel_conversion the ABGR32-format pixels supplied
+ * by the VINO chip can be converted to more common formats
+ * like RGBA32 (or probably RGB24 in the future). This way we
+ * can give out data that can be specified correctly with
+ * the V4L2-definitions.
+ *
+ * The pixel format is specified as RGBA32 when no conversion
+ * is used.
+ *
+ * Note that this only affects the 32-bit bit depth.
+ *
+ * Use non-zero value to enable conversion.
+ */
+static int vino_pixel_conversion;
+
+module_param_named(pixelconv, vino_pixel_conversion, int, 0);
+
+MODULE_PARM_DESC(pixelconv,
+		 "enable pixel conversion (non-zero value enables)");
+
+/* Internal data structures */
+
+static struct sgi_vino *vino;
+
+static struct vino_settings *vino_drvdata;
+
+#define camera_call(o, f, args...) \
+	v4l2_subdev_call(vino_drvdata->camera, o, f, ##args)
+#define decoder_call(o, f, args...) \
+	v4l2_subdev_call(vino_drvdata->decoder, o, f, ##args)
+
+static const char *vino_driver_name = "vino";
+static const char *vino_driver_description = "SGI VINO";
+static const char *vino_bus_name = "GIO64 bus";
+static const char *vino_vdev_name_a = "SGI VINO Channel A";
+static const char *vino_vdev_name_b = "SGI VINO Channel B";
+
+static void vino_capture_tasklet(unsigned long channel);
+
+DECLARE_TASKLET(vino_tasklet_a, vino_capture_tasklet, VINO_CHANNEL_A);
+DECLARE_TASKLET(vino_tasklet_b, vino_capture_tasklet, VINO_CHANNEL_B);
+
+static const struct vino_input vino_inputs[] = {
+	{
+		.name		= "Composite",
+		.std		= V4L2_STD_NTSC | V4L2_STD_PAL
+		| V4L2_STD_SECAM,
+	}, {
+		.name		= "S-Video",
+		.std		= V4L2_STD_NTSC | V4L2_STD_PAL
+		| V4L2_STD_SECAM,
+	}, {
+		.name		= "D1/IndyCam",
+		.std		= V4L2_STD_NTSC,
+	}
+};
+
+static const struct vino_data_format vino_data_formats[] = {
+	{
+		.description	= "8-bit greyscale",
+		.bpp		= 1,
+		.pixelformat	= V4L2_PIX_FMT_GREY,
+		.colorspace	= V4L2_COLORSPACE_SMPTE170M,
+	}, {
+		.description	= "8-bit dithered RGB 3-3-2",
+		.bpp		= 1,
+		.pixelformat	= V4L2_PIX_FMT_RGB332,
+		.colorspace	= V4L2_COLORSPACE_SRGB,
+	}, {
+		.description	= "32-bit RGB",
+		.bpp		= 4,
+		.pixelformat	= V4L2_PIX_FMT_RGB32,
+		.colorspace	= V4L2_COLORSPACE_SRGB,
+	}, {
+		.description	= "YUV 4:2:2",
+		.bpp		= 2,
+		.pixelformat	= V4L2_PIX_FMT_YUYV, // XXX: swapped?
+		.colorspace	= V4L2_COLORSPACE_SMPTE170M,
+	}
+};
+
+static const struct vino_data_norm vino_data_norms[] = {
+	{
+		.description	= "NTSC",
+		.std		= V4L2_STD_NTSC,
+		.fps_min	= 6,
+		.fps_max	= 30,
+		.framelines	= 525,
+		.width		= VINO_NTSC_WIDTH,
+		.height		= VINO_NTSC_HEIGHT,
+		.odd		= {
+			.top	= VINO_CLIPPING_START_ODD_NTSC,
+			.left	= 0,
+			.bottom	= VINO_CLIPPING_START_ODD_NTSC
+			+ VINO_NTSC_HEIGHT / 2 - 1,
+			.right	= VINO_NTSC_WIDTH,
+		},
+		.even		= {
+			.top	= VINO_CLIPPING_START_EVEN_NTSC,
+			.left	= 0,
+			.bottom	= VINO_CLIPPING_START_EVEN_NTSC
+			+ VINO_NTSC_HEIGHT / 2 - 1,
+			.right	= VINO_NTSC_WIDTH,
+		},
+	}, {
+		.description	= "PAL",
+		.std		= V4L2_STD_PAL,
+		.fps_min	= 5,
+		.fps_max	= 25,
+		.framelines	= 625,
+		.width		= VINO_PAL_WIDTH,
+		.height		= VINO_PAL_HEIGHT,
+		.odd		= {
+			.top	= VINO_CLIPPING_START_ODD_PAL,
+			.left	= 0,
+			.bottom	= VINO_CLIPPING_START_ODD_PAL
+			+ VINO_PAL_HEIGHT / 2 - 1,
+			.right	= VINO_PAL_WIDTH,
+		},
+		.even		= {
+			.top	= VINO_CLIPPING_START_EVEN_PAL,
+			.left	= 0,
+			.bottom	= VINO_CLIPPING_START_EVEN_PAL
+			+ VINO_PAL_HEIGHT / 2 - 1,
+			.right	= VINO_PAL_WIDTH,
+		},
+	}, {
+		.description	= "SECAM",
+		.std		= V4L2_STD_SECAM,
+		.fps_min	= 5,
+		.fps_max	= 25,
+		.framelines	= 625,
+		.width		= VINO_PAL_WIDTH,
+		.height		= VINO_PAL_HEIGHT,
+		.odd		= {
+			.top	= VINO_CLIPPING_START_ODD_PAL,
+			.left	= 0,
+			.bottom	= VINO_CLIPPING_START_ODD_PAL
+			+ VINO_PAL_HEIGHT / 2 - 1,
+			.right	= VINO_PAL_WIDTH,
+		},
+		.even		= {
+			.top	= VINO_CLIPPING_START_EVEN_PAL,
+			.left	= 0,
+			.bottom	= VINO_CLIPPING_START_EVEN_PAL
+			+ VINO_PAL_HEIGHT / 2 - 1,
+			.right	= VINO_PAL_WIDTH,
+		},
+	}, {
+		.description	= "NTSC/D1",
+		.std		= V4L2_STD_NTSC,
+		.fps_min	= 6,
+		.fps_max	= 30,
+		.framelines	= 525,
+		.width		= VINO_NTSC_WIDTH,
+		.height		= VINO_NTSC_HEIGHT,
+		.odd		= {
+			.top	= VINO_CLIPPING_START_ODD_D1,
+			.left	= 0,
+			.bottom	= VINO_CLIPPING_START_ODD_D1
+			+ VINO_NTSC_HEIGHT / 2 - 1,
+			.right	= VINO_NTSC_WIDTH,
+		},
+		.even		= {
+			.top	= VINO_CLIPPING_START_EVEN_D1,
+			.left	= 0,
+			.bottom	= VINO_CLIPPING_START_EVEN_D1
+			+ VINO_NTSC_HEIGHT / 2 - 1,
+			.right	= VINO_NTSC_WIDTH,
+		},
+	}
+};
+
+#define VINO_INDYCAM_V4L2_CONTROL_COUNT		9
+
+struct v4l2_queryctrl vino_indycam_v4l2_controls[] = {
+	{
+		.id = V4L2_CID_AUTOGAIN,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.name = "Automatic Gain Control",
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = INDYCAM_AGC_DEFAULT,
+	}, {
+		.id = V4L2_CID_AUTO_WHITE_BALANCE,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.name = "Automatic White Balance",
+		.minimum = 0,
+		.maximum = 1,
+		.step = 1,
+		.default_value = INDYCAM_AWB_DEFAULT,
+	}, {
+		.id = V4L2_CID_GAIN,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Gain",
+		.minimum = INDYCAM_GAIN_MIN,
+		.maximum = INDYCAM_GAIN_MAX,
+		.step = 1,
+		.default_value = INDYCAM_GAIN_DEFAULT,
+	}, {
+		.id = INDYCAM_CONTROL_RED_SATURATION,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Red Saturation",
+		.minimum = INDYCAM_RED_SATURATION_MIN,
+		.maximum = INDYCAM_RED_SATURATION_MAX,
+		.step = 1,
+		.default_value = INDYCAM_RED_SATURATION_DEFAULT,
+	}, {
+		.id = INDYCAM_CONTROL_BLUE_SATURATION,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Blue Saturation",
+		.minimum = INDYCAM_BLUE_SATURATION_MIN,
+		.maximum = INDYCAM_BLUE_SATURATION_MAX,
+		.step = 1,
+		.default_value = INDYCAM_BLUE_SATURATION_DEFAULT,
+	}, {
+		.id = V4L2_CID_RED_BALANCE,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Red Balance",
+		.minimum = INDYCAM_RED_BALANCE_MIN,
+		.maximum = INDYCAM_RED_BALANCE_MAX,
+		.step = 1,
+		.default_value = INDYCAM_RED_BALANCE_DEFAULT,
+	}, {
+		.id = V4L2_CID_BLUE_BALANCE,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Blue Balance",
+		.minimum = INDYCAM_BLUE_BALANCE_MIN,
+		.maximum = INDYCAM_BLUE_BALANCE_MAX,
+		.step = 1,
+		.default_value = INDYCAM_BLUE_BALANCE_DEFAULT,
+	}, {
+		.id = V4L2_CID_EXPOSURE,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Shutter Control",
+		.minimum = INDYCAM_SHUTTER_MIN,
+		.maximum = INDYCAM_SHUTTER_MAX,
+		.step = 1,
+		.default_value = INDYCAM_SHUTTER_DEFAULT,
+	}, {
+		.id = V4L2_CID_GAMMA,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Gamma",
+		.minimum = INDYCAM_GAMMA_MIN,
+		.maximum = INDYCAM_GAMMA_MAX,
+		.step = 1,
+		.default_value = INDYCAM_GAMMA_DEFAULT,
+	}
+};
+
+#define VINO_SAA7191_V4L2_CONTROL_COUNT		9
+
+struct v4l2_queryctrl vino_saa7191_v4l2_controls[] = {
+	{
+		.id = V4L2_CID_HUE,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Hue",
+		.minimum = SAA7191_HUE_MIN,
+		.maximum = SAA7191_HUE_MAX,
+		.step = 1,
+		.default_value = SAA7191_HUE_DEFAULT,
+	}, {
+		.id = SAA7191_CONTROL_BANDPASS,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Luminance Bandpass",
+		.minimum = SAA7191_BANDPASS_MIN,
+		.maximum = SAA7191_BANDPASS_MAX,
+		.step = 1,
+		.default_value = SAA7191_BANDPASS_DEFAULT,
+	}, {
+		.id = SAA7191_CONTROL_BANDPASS_WEIGHT,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Luminance Bandpass Weight",
+		.minimum = SAA7191_BANDPASS_WEIGHT_MIN,
+		.maximum = SAA7191_BANDPASS_WEIGHT_MAX,
+		.step = 1,
+		.default_value = SAA7191_BANDPASS_WEIGHT_DEFAULT,
+	}, {
+		.id = SAA7191_CONTROL_CORING,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "HF Luminance Coring",
+		.minimum = SAA7191_CORING_MIN,
+		.maximum = SAA7191_CORING_MAX,
+		.step = 1,
+		.default_value = SAA7191_CORING_DEFAULT,
+	}, {
+		.id = SAA7191_CONTROL_FORCE_COLOUR,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.name = "Force Colour",
+		.minimum = SAA7191_FORCE_COLOUR_MIN,
+		.maximum = SAA7191_FORCE_COLOUR_MAX,
+		.step = 1,
+		.default_value = SAA7191_FORCE_COLOUR_DEFAULT,
+	}, {
+		.id = SAA7191_CONTROL_CHROMA_GAIN,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Chrominance Gain Control",
+		.minimum = SAA7191_CHROMA_GAIN_MIN,
+		.maximum = SAA7191_CHROMA_GAIN_MAX,
+		.step = 1,
+		.default_value = SAA7191_CHROMA_GAIN_DEFAULT,
+	}, {
+		.id = SAA7191_CONTROL_VTRC,
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.name = "VTR Time Constant",
+		.minimum = SAA7191_VTRC_MIN,
+		.maximum = SAA7191_VTRC_MAX,
+		.step = 1,
+		.default_value = SAA7191_VTRC_DEFAULT,
+	}, {
+		.id = SAA7191_CONTROL_LUMA_DELAY,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Luminance Delay Compensation",
+		.minimum = SAA7191_LUMA_DELAY_MIN,
+		.maximum = SAA7191_LUMA_DELAY_MAX,
+		.step = 1,
+		.default_value = SAA7191_LUMA_DELAY_DEFAULT,
+	}, {
+		.id = SAA7191_CONTROL_VNR,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.name = "Vertical Noise Reduction",
+		.minimum = SAA7191_VNR_MIN,
+		.maximum = SAA7191_VNR_MAX,
+		.step = 1,
+		.default_value = SAA7191_VNR_DEFAULT,
+	}
+};
+
+/* VINO framebuffer/DMA descriptor management */
+
+static void vino_free_buffer_with_count(struct vino_framebuffer *fb,
+					       unsigned int count)
+{
+	unsigned int i;
+
+	dprintk("vino_free_buffer_with_count(): count = %d\n", count);
+
+	for (i = 0; i < count; i++) {
+		ClearPageReserved(virt_to_page((void *)fb->desc_table.virtual[i]));
+		dma_unmap_single(NULL,
+				 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i],
+				 PAGE_SIZE, DMA_FROM_DEVICE);
+		free_page(fb->desc_table.virtual[i]);
+	}
+
+	dma_free_coherent(NULL,
+			  VINO_PAGE_RATIO * (fb->desc_table.page_count + 4) *
+			  sizeof(dma_addr_t), (void *)fb->desc_table.dma_cpu,
+			  fb->desc_table.dma);
+	kfree(fb->desc_table.virtual);
+
+	memset(fb, 0, sizeof(struct vino_framebuffer));
+}
+
+static void vino_free_buffer(struct vino_framebuffer *fb)
+{
+	vino_free_buffer_with_count(fb, fb->desc_table.page_count);
+}
+
+static int vino_allocate_buffer(struct vino_framebuffer *fb,
+				unsigned int size)
+{
+	unsigned int count, i, j;
+	int ret = 0;
+
+	dprintk("vino_allocate_buffer():\n");
+
+	if (size < 1)
+		return -EINVAL;
+
+	memset(fb, 0, sizeof(struct vino_framebuffer));
+
+	count = ((size / PAGE_SIZE) + 4) & ~3;
+
+	dprintk("vino_allocate_buffer(): size = %d, count = %d\n",
+		size, count);
+
+	/* allocate memory for table with virtual (page) addresses */
+	fb->desc_table.virtual =
+		kmalloc(count * sizeof(unsigned long), GFP_KERNEL);
+	if (!fb->desc_table.virtual)
+		return -ENOMEM;
+
+	/* allocate memory for table with dma addresses
+	 * (has space for four extra descriptors) */
+	fb->desc_table.dma_cpu =
+		dma_alloc_coherent(NULL, VINO_PAGE_RATIO * (count + 4) *
+				   sizeof(dma_addr_t), &fb->desc_table.dma,
+				   GFP_KERNEL | GFP_DMA);
+	if (!fb->desc_table.dma_cpu) {
+		ret = -ENOMEM;
+		goto out_free_virtual;
+	}
+
+	/* allocate pages for the buffer and acquire the according
+	 * dma addresses */
+	for (i = 0; i < count; i++) {
+		dma_addr_t dma_data_addr;
+
+		fb->desc_table.virtual[i] =
+			get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		if (!fb->desc_table.virtual[i]) {
+			ret = -ENOBUFS;
+			break;
+		}
+
+		dma_data_addr =
+			dma_map_single(NULL,
+				       (void *)fb->desc_table.virtual[i],
+				       PAGE_SIZE, DMA_FROM_DEVICE);
+
+		for (j = 0; j < VINO_PAGE_RATIO; j++) {
+			fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i + j] =
+				dma_data_addr + VINO_PAGE_SIZE * j;
+		}
+
+		SetPageReserved(virt_to_page((void *)fb->desc_table.virtual[i]));
+	}
+
+	/* page_count needs to be set anyway, because the descriptor table has
+	 * been allocated according to this number */
+	fb->desc_table.page_count = count;
+
+	if (ret) {
+		/* the descriptor with index i doesn't contain
+		 * a valid address yet */
+		vino_free_buffer_with_count(fb, i);
+		return ret;
+	}
+
+	//fb->size = size;
+	fb->size = count * PAGE_SIZE;
+	fb->data_format = VINO_DATA_FMT_NONE;
+
+	/* set the dma stop-bit for the last (count+1)th descriptor */
+	fb->desc_table.dma_cpu[VINO_PAGE_RATIO * count] = VINO_DESC_STOP;
+	return 0;
+
+ out_free_virtual:
+	kfree(fb->desc_table.virtual);
+	return ret;
+}
+
+#if 0
+/* user buffers not fully implemented yet */
+static int vino_prepare_user_buffer(struct vino_framebuffer *fb,
+				     void *user,
+				     unsigned int size)
+{
+	unsigned int count, i, j;
+	int ret = 0;
+
+	dprintk("vino_prepare_user_buffer():\n");
+
+	if (size < 1)
+		return -EINVAL;
+
+	memset(fb, 0, sizeof(struct vino_framebuffer));
+
+	count = ((size / PAGE_SIZE)) & ~3;
+
+	dprintk("vino_prepare_user_buffer(): size = %d, count = %d\n",
+		size, count);
+
+	/* allocate memory for table with virtual (page) addresses */
+	fb->desc_table.virtual = (unsigned long *)
+		kmalloc(count * sizeof(unsigned long), GFP_KERNEL);
+	if (!fb->desc_table.virtual)
+		return -ENOMEM;
+
+	/* allocate memory for table with dma addresses
+	 * (has space for four extra descriptors) */
+	fb->desc_table.dma_cpu =
+		dma_alloc_coherent(NULL, VINO_PAGE_RATIO * (count + 4) *
+				   sizeof(dma_addr_t), &fb->desc_table.dma,
+				   GFP_KERNEL | GFP_DMA);
+	if (!fb->desc_table.dma_cpu) {
+		ret = -ENOMEM;
+		goto out_free_virtual;
+	}
+
+	/* allocate pages for the buffer and acquire the according
+	 * dma addresses */
+	for (i = 0; i < count; i++) {
+		dma_addr_t dma_data_addr;
+
+		fb->desc_table.virtual[i] =
+			get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		if (!fb->desc_table.virtual[i]) {
+			ret = -ENOBUFS;
+			break;
+		}
+
+		dma_data_addr =
+			dma_map_single(NULL,
+				       (void *)fb->desc_table.virtual[i],
+				       PAGE_SIZE, DMA_FROM_DEVICE);
+
+		for (j = 0; j < VINO_PAGE_RATIO; j++) {
+			fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i + j] =
+				dma_data_addr + VINO_PAGE_SIZE * j;
+		}
+
+		SetPageReserved(virt_to_page((void *)fb->desc_table.virtual[i]));
+	}
+
+	/* page_count needs to be set anyway, because the descriptor table has
+	 * been allocated according to this number */
+	fb->desc_table.page_count = count;
+
+	if (ret) {
+		/* the descriptor with index i doesn't contain
+		 * a valid address yet */
+		vino_free_buffer_with_count(fb, i);
+		return ret;
+	}
+
+	//fb->size = size;
+	fb->size = count * PAGE_SIZE;
+
+	/* set the dma stop-bit for the last (count+1)th descriptor */
+	fb->desc_table.dma_cpu[VINO_PAGE_RATIO * count] = VINO_DESC_STOP;
+	return 0;
+
+ out_free_virtual:
+	kfree(fb->desc_table.virtual);
+	return ret;
+}
+#endif
+
+static void vino_sync_buffer(struct vino_framebuffer *fb)
+{
+	int i;
+
+	dprintk("vino_sync_buffer():\n");
+
+	for (i = 0; i < fb->desc_table.page_count; i++)
+		dma_sync_single_for_cpu(NULL,
+					fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i],
+					PAGE_SIZE, DMA_FROM_DEVICE);
+}
+
+/* Framebuffer fifo functions (need to be locked externally) */
+
+static inline void vino_fifo_init(struct vino_framebuffer_fifo *f,
+			   unsigned int length)
+{
+	f->length = 0;
+	f->used = 0;
+	f->head = 0;
+	f->tail = 0;
+
+	if (length > VINO_FRAMEBUFFER_COUNT_MAX)
+		length = VINO_FRAMEBUFFER_COUNT_MAX;
+
+	f->length = length;
+}
+
+/* returns true/false */
+static inline int vino_fifo_has_id(struct vino_framebuffer_fifo *f,
+				   unsigned int id)
+{
+	unsigned int i;
+
+	for (i = f->head; i == (f->tail - 1); i = (i + 1) % f->length) {
+		if (f->data[i] == id)
+			return 1;
+	}
+
+	return 0;
+}
+
+#if 0
+/* returns true/false */
+static inline int vino_fifo_full(struct vino_framebuffer_fifo *f)
+{
+	return (f->used == f->length);
+}
+#endif
+
+static inline unsigned int vino_fifo_get_used(struct vino_framebuffer_fifo *f)
+{
+	return f->used;
+}
+
+static int vino_fifo_enqueue(struct vino_framebuffer_fifo *f, unsigned int id)
+{
+	if (id >= f->length) {
+		return VINO_QUEUE_ERROR;
+	}
+
+	if (vino_fifo_has_id(f, id)) {
+		return VINO_QUEUE_ERROR;
+	}
+
+	if (f->used < f->length) {
+		f->data[f->tail] = id;
+		f->tail = (f->tail + 1) % f->length;
+		f->used++;
+	} else {
+		return VINO_QUEUE_ERROR;
+	}
+
+	return 0;
+}
+
+static int vino_fifo_peek(struct vino_framebuffer_fifo *f, unsigned int *id)
+{
+	if (f->used > 0) {
+		*id = f->data[f->head];
+	} else {
+		return VINO_QUEUE_ERROR;
+	}
+
+	return 0;
+}
+
+static int vino_fifo_dequeue(struct vino_framebuffer_fifo *f, unsigned int *id)
+{
+	if (f->used > 0) {
+		*id = f->data[f->head];
+		f->head = (f->head + 1) % f->length;
+		f->used--;
+	} else {
+		return VINO_QUEUE_ERROR;
+	}
+
+	return 0;
+}
+
+/* Framebuffer queue functions */
+
+/* execute with queue_lock locked */
+static void vino_queue_free_with_count(struct vino_framebuffer_queue *q,
+				       unsigned int length)
+{
+	unsigned int i;
+
+	q->length = 0;
+	memset(&q->in, 0, sizeof(struct vino_framebuffer_fifo));
+	memset(&q->out, 0, sizeof(struct vino_framebuffer_fifo));
+	for (i = 0; i < length; i++) {
+		dprintk("vino_queue_free_with_count(): freeing buffer %d\n",
+			i);
+		vino_free_buffer(q->buffer[i]);
+		kfree(q->buffer[i]);
+	}
+
+	q->type = VINO_MEMORY_NONE;
+	q->magic = 0;
+}
+
+static void vino_queue_free(struct vino_framebuffer_queue *q)
+{
+	dprintk("vino_queue_free():\n");
+
+	if (q->magic != VINO_QUEUE_MAGIC)
+		return;
+	if (q->type != VINO_MEMORY_MMAP)
+		return;
+
+	mutex_lock(&q->queue_mutex);
+
+	vino_queue_free_with_count(q, q->length);
+
+	mutex_unlock(&q->queue_mutex);
+}
+
+static int vino_queue_init(struct vino_framebuffer_queue *q,
+			   unsigned int *length)
+{
+	unsigned int i;
+	int ret = 0;
+
+	dprintk("vino_queue_init(): length = %d\n", *length);
+
+	if (q->magic == VINO_QUEUE_MAGIC) {
+		dprintk("vino_queue_init(): queue already initialized!\n");
+		return -EINVAL;
+	}
+
+	if (q->type != VINO_MEMORY_NONE) {
+		dprintk("vino_queue_init(): queue already initialized!\n");
+		return -EINVAL;
+	}
+
+	if (*length < 1)
+		return -EINVAL;
+
+	mutex_lock(&q->queue_mutex);
+
+	if (*length > VINO_FRAMEBUFFER_COUNT_MAX)
+		*length = VINO_FRAMEBUFFER_COUNT_MAX;
+
+	q->length = 0;
+
+	for (i = 0; i < *length; i++) {
+		dprintk("vino_queue_init(): allocating buffer %d\n", i);
+		q->buffer[i] = kmalloc(sizeof(struct vino_framebuffer),
+				       GFP_KERNEL);
+		if (!q->buffer[i]) {
+			dprintk("vino_queue_init(): kmalloc() failed\n");
+			ret = -ENOMEM;
+			break;
+		}
+
+		ret = vino_allocate_buffer(q->buffer[i],
+					   VINO_FRAMEBUFFER_SIZE);
+		if (ret) {
+			kfree(q->buffer[i]);
+			dprintk("vino_queue_init(): "
+				"vino_allocate_buffer() failed\n");
+			break;
+		}
+
+		q->buffer[i]->id = i;
+		if (i > 0) {
+			q->buffer[i]->offset = q->buffer[i - 1]->offset +
+				q->buffer[i - 1]->size;
+		} else {
+			q->buffer[i]->offset = 0;
+		}
+
+		spin_lock_init(&q->buffer[i]->state_lock);
+
+		dprintk("vino_queue_init(): buffer = %d, offset = %d, "
+			"size = %d\n", i, q->buffer[i]->offset,
+			q->buffer[i]->size);
+	}
+
+	if (ret) {
+		vino_queue_free_with_count(q, i);
+		*length = 0;
+	} else {
+		q->length = *length;
+		vino_fifo_init(&q->in, q->length);
+		vino_fifo_init(&q->out, q->length);
+		q->type = VINO_MEMORY_MMAP;
+		q->magic = VINO_QUEUE_MAGIC;
+	}
+
+	mutex_unlock(&q->queue_mutex);
+
+	return ret;
+}
+
+static struct vino_framebuffer *vino_queue_add(struct
+					       vino_framebuffer_queue *q,
+					       unsigned int id)
+{
+	struct vino_framebuffer *ret = NULL;
+	unsigned int total;
+	unsigned long flags;
+
+	dprintk("vino_queue_add(): id = %d\n", id);
+
+	if (q->magic != VINO_QUEUE_MAGIC) {
+		return ret;
+	}
+
+	spin_lock_irqsave(&q->queue_lock, flags);
+
+	if (q->length == 0)
+		goto out;
+
+	if (id >= q->length)
+		goto out;
+
+	/* not needed?: if (vino_fifo_full(&q->out)) {
+		goto out;
+		}*/
+	/* check that outgoing queue isn't already full
+	 * (or that it won't become full) */
+	total = vino_fifo_get_used(&q->in) +
+		vino_fifo_get_used(&q->out);
+	if (total >= q->length)
+		goto out;
+
+	if (vino_fifo_enqueue(&q->in, id))
+		goto out;
+
+	ret = q->buffer[id];
+
+out:
+	spin_unlock_irqrestore(&q->queue_lock, flags);
+
+	return ret;
+}
+
+static struct vino_framebuffer *vino_queue_transfer(struct
+						    vino_framebuffer_queue *q)
+{
+	struct vino_framebuffer *ret = NULL;
+	struct vino_framebuffer *fb;
+	int id;
+	unsigned long flags;
+
+	dprintk("vino_queue_transfer():\n");
+
+	if (q->magic != VINO_QUEUE_MAGIC) {
+		return ret;
+	}
+
+	spin_lock_irqsave(&q->queue_lock, flags);
+
+	if (q->length == 0)
+		goto out;
+
+	// now this actually removes an entry from the incoming queue
+	if (vino_fifo_dequeue(&q->in, &id)) {
+		goto out;
+	}
+
+	dprintk("vino_queue_transfer(): id = %d\n", id);
+	fb = q->buffer[id];
+
+	// we have already checked that the outgoing queue is not full, but...
+	if (vino_fifo_enqueue(&q->out, id)) {
+		printk(KERN_ERR "vino_queue_transfer(): "
+		       "outgoing queue is full, this shouldn't happen!\n");
+		goto out;
+	}
+
+	ret = fb;
+out:
+	spin_unlock_irqrestore(&q->queue_lock, flags);
+
+	return ret;
+}
+
+/* returns true/false */
+static int vino_queue_incoming_contains(struct vino_framebuffer_queue *q,
+					unsigned int id)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	if (q->magic != VINO_QUEUE_MAGIC) {
+		return ret;
+	}
+
+	spin_lock_irqsave(&q->queue_lock, flags);
+
+	if (q->length == 0)
+		goto out;
+
+	ret = vino_fifo_has_id(&q->in, id);
+
+out:
+	spin_unlock_irqrestore(&q->queue_lock, flags);
+
+	return ret;
+}
+
+/* returns true/false */
+static int vino_queue_outgoing_contains(struct vino_framebuffer_queue *q,
+					unsigned int id)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	if (q->magic != VINO_QUEUE_MAGIC) {
+		return ret;
+	}
+
+	spin_lock_irqsave(&q->queue_lock, flags);
+
+	if (q->length == 0)
+		goto out;
+
+	ret = vino_fifo_has_id(&q->out, id);
+
+out:
+	spin_unlock_irqrestore(&q->queue_lock, flags);
+
+	return ret;
+}
+
+static int vino_queue_get_incoming(struct vino_framebuffer_queue *q,
+				   unsigned int *used)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	if (q->magic != VINO_QUEUE_MAGIC) {
+		return VINO_QUEUE_ERROR;
+	}
+
+	spin_lock_irqsave(&q->queue_lock, flags);
+
+	if (q->length == 0) {
+		ret = VINO_QUEUE_ERROR;
+		goto out;
+	}
+
+	*used = vino_fifo_get_used(&q->in);
+
+out:
+	spin_unlock_irqrestore(&q->queue_lock, flags);
+
+	return ret;
+}
+
+static int vino_queue_get_outgoing(struct vino_framebuffer_queue *q,
+				   unsigned int *used)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	if (q->magic != VINO_QUEUE_MAGIC) {
+		return VINO_QUEUE_ERROR;
+	}
+
+	spin_lock_irqsave(&q->queue_lock, flags);
+
+	if (q->length == 0) {
+		ret = VINO_QUEUE_ERROR;
+		goto out;
+	}
+
+	*used = vino_fifo_get_used(&q->out);
+
+out:
+	spin_unlock_irqrestore(&q->queue_lock, flags);
+
+	return ret;
+}
+
+#if 0
+static int vino_queue_get_total(struct vino_framebuffer_queue *q,
+				unsigned int *total)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	if (q->magic != VINO_QUEUE_MAGIC) {
+		return VINO_QUEUE_ERROR;
+	}
+
+	spin_lock_irqsave(&q->queue_lock, flags);
+
+	if (q->length == 0) {
+		ret = VINO_QUEUE_ERROR;
+		goto out;
+	}
+
+	*total = vino_fifo_get_used(&q->in) +
+		vino_fifo_get_used(&q->out);
+
+out:
+	spin_unlock_irqrestore(&q->queue_lock, flags);
+
+	return ret;
+}
+#endif
+
+static struct vino_framebuffer *vino_queue_peek(struct
+						vino_framebuffer_queue *q,
+						unsigned int *id)
+{
+	struct vino_framebuffer *ret = NULL;
+	unsigned long flags;
+
+	if (q->magic != VINO_QUEUE_MAGIC) {
+		return ret;
+	}
+
+	spin_lock_irqsave(&q->queue_lock, flags);
+
+	if (q->length == 0)
+		goto out;
+
+	if (vino_fifo_peek(&q->in, id)) {
+		goto out;
+	}
+
+	ret = q->buffer[*id];
+out:
+	spin_unlock_irqrestore(&q->queue_lock, flags);
+
+	return ret;
+}
+
+static struct vino_framebuffer *vino_queue_remove(struct
+						  vino_framebuffer_queue *q,
+						  unsigned int *id)
+{
+	struct vino_framebuffer *ret = NULL;
+	unsigned long flags;
+	dprintk("vino_queue_remove():\n");
+
+	if (q->magic != VINO_QUEUE_MAGIC) {
+		return ret;
+	}
+
+	spin_lock_irqsave(&q->queue_lock, flags);
+
+	if (q->length == 0)
+		goto out;
+
+	if (vino_fifo_dequeue(&q->out, id)) {
+		goto out;
+	}
+
+	dprintk("vino_queue_remove(): id = %d\n", *id);
+	ret = q->buffer[*id];
+out:
+	spin_unlock_irqrestore(&q->queue_lock, flags);
+
+	return ret;
+}
+
+static struct
+vino_framebuffer *vino_queue_get_buffer(struct vino_framebuffer_queue *q,
+					unsigned int id)
+{
+	struct vino_framebuffer *ret = NULL;
+	unsigned long flags;
+
+	if (q->magic != VINO_QUEUE_MAGIC) {
+		return ret;
+	}
+
+	spin_lock_irqsave(&q->queue_lock, flags);
+
+	if (q->length == 0)
+		goto out;
+
+	if (id >= q->length)
+		goto out;
+
+	ret = q->buffer[id];
+ out:
+	spin_unlock_irqrestore(&q->queue_lock, flags);
+
+	return ret;
+}
+
+static unsigned int vino_queue_get_length(struct vino_framebuffer_queue *q)
+{
+	unsigned int length = 0;
+	unsigned long flags;
+
+	if (q->magic != VINO_QUEUE_MAGIC) {
+		return length;
+	}
+
+	spin_lock_irqsave(&q->queue_lock, flags);
+	length = q->length;
+	spin_unlock_irqrestore(&q->queue_lock, flags);
+
+	return length;
+}
+
+static int vino_queue_has_mapped_buffers(struct vino_framebuffer_queue *q)
+{
+	unsigned int i;
+	int ret = 0;
+	unsigned long flags;
+
+	if (q->magic != VINO_QUEUE_MAGIC) {
+		return ret;
+	}
+
+	spin_lock_irqsave(&q->queue_lock, flags);
+	for (i = 0; i < q->length; i++) {
+		if (q->buffer[i]->map_count > 0) {
+			ret = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&q->queue_lock, flags);
+
+	return ret;
+}
+
+/* VINO functions */
+
+/* execute with input_lock locked */
+static void vino_update_line_size(struct vino_channel_settings *vcs)
+{
+	unsigned int w = vcs->clipping.right - vcs->clipping.left;
+	unsigned int d = vcs->decimation;
+	unsigned int bpp = vino_data_formats[vcs->data_format].bpp;
+	unsigned int lsize;
+
+	dprintk("update_line_size(): before: w = %d, d = %d, "
+		"line_size = %d\n", w, d, vcs->line_size);
+
+	/* line size must be multiple of 8 bytes */
+	lsize = (bpp * (w / d)) & ~7;
+	w = (lsize / bpp) * d;
+
+	vcs->clipping.right = vcs->clipping.left + w;
+	vcs->line_size = lsize;
+
+	dprintk("update_line_size(): after: w = %d, d = %d, "
+		"line_size = %d\n", w, d, vcs->line_size);
+}
+
+/* execute with input_lock locked */
+static void vino_set_clipping(struct vino_channel_settings *vcs,
+			      unsigned int x, unsigned int y,
+			      unsigned int w, unsigned int h)
+{
+	unsigned int maxwidth, maxheight;
+	unsigned int d;
+
+	maxwidth = vino_data_norms[vcs->data_norm].width;
+	maxheight = vino_data_norms[vcs->data_norm].height;
+	d = vcs->decimation;
+
+	y &= ~1;	/* odd/even fields */
+
+	if (x > maxwidth) {
+		x = 0;
+	}
+	if (y > maxheight) {
+		y = 0;
+	}
+
+	if (((w / d) < VINO_MIN_WIDTH)
+	    || ((h / d) < VINO_MIN_HEIGHT)) {
+		w = VINO_MIN_WIDTH * d;
+		h = VINO_MIN_HEIGHT * d;
+	}
+
+	if ((x + w) > maxwidth) {
+		w = maxwidth - x;
+		if ((w / d) < VINO_MIN_WIDTH)
+			x = maxwidth - VINO_MIN_WIDTH * d;
+	}
+	if ((y + h) > maxheight) {
+		h = maxheight - y;
+		if ((h / d) < VINO_MIN_HEIGHT)
+			y = maxheight - VINO_MIN_HEIGHT * d;
+	}
+
+	vcs->clipping.left = x;
+	vcs->clipping.top = y;
+	vcs->clipping.right = x + w;
+	vcs->clipping.bottom = y + h;
+
+	vino_update_line_size(vcs);
+
+	dprintk("clipping %d, %d, %d, %d / %d - %d\n",
+		vcs->clipping.left, vcs->clipping.top, vcs->clipping.right,
+		vcs->clipping.bottom, vcs->decimation, vcs->line_size);
+}
+
+/* execute with input_lock locked */
+static inline void vino_set_default_clipping(struct vino_channel_settings *vcs)
+{
+	vino_set_clipping(vcs, 0, 0, vino_data_norms[vcs->data_norm].width,
+			  vino_data_norms[vcs->data_norm].height);
+}
+
+/* execute with input_lock locked */
+static void vino_set_scaling(struct vino_channel_settings *vcs,
+			     unsigned int w, unsigned int h)
+{
+	unsigned int x, y, curw, curh, d;
+
+	x = vcs->clipping.left;
+	y = vcs->clipping.top;
+	curw = vcs->clipping.right - vcs->clipping.left;
+	curh = vcs->clipping.bottom - vcs->clipping.top;
+
+	d = max(curw / w, curh / h);
+
+	dprintk("scaling w: %d, h: %d, curw: %d, curh: %d, d: %d\n",
+		w, h, curw, curh, d);
+
+	if (d < 1) {
+		d = 1;
+	} else if (d > 8) {
+		d = 8;
+	}
+
+	vcs->decimation = d;
+	vino_set_clipping(vcs, x, y, w * d, h * d);
+
+	dprintk("scaling %d, %d, %d, %d / %d - %d\n", vcs->clipping.left,
+		vcs->clipping.top, vcs->clipping.right, vcs->clipping.bottom,
+		vcs->decimation, vcs->line_size);
+}
+
+/* execute with input_lock locked */
+static inline void vino_set_default_scaling(struct vino_channel_settings *vcs)
+{
+	vino_set_scaling(vcs, vcs->clipping.right - vcs->clipping.left,
+			 vcs->clipping.bottom - vcs->clipping.top);
+}
+
+/* execute with input_lock locked */
+static void vino_set_framerate(struct vino_channel_settings *vcs,
+			       unsigned int fps)
+{
+	unsigned int mask;
+
+	switch (vcs->data_norm) {
+	case VINO_DATA_NORM_NTSC:
+	case VINO_DATA_NORM_D1:
+		fps = (unsigned int)(fps / 6) * 6; // FIXME: round!
+
+		if (fps < vino_data_norms[vcs->data_norm].fps_min)
+			fps = vino_data_norms[vcs->data_norm].fps_min;
+		if (fps > vino_data_norms[vcs->data_norm].fps_max)
+			fps = vino_data_norms[vcs->data_norm].fps_max;
+
+		switch (fps) {
+		case 6:
+			mask = 0x003;
+			break;
+		case 12:
+			mask = 0x0c3;
+			break;
+		case 18:
+			mask = 0x333;
+			break;
+		case 24:
+			mask = 0x3ff;
+			break;
+		case 30:
+			mask = 0xfff;
+			break;
+		default:
+			mask = VINO_FRAMERT_FULL;
+		}
+		vcs->framert_reg = VINO_FRAMERT_RT(mask);
+		break;
+	case VINO_DATA_NORM_PAL:
+	case VINO_DATA_NORM_SECAM:
+		fps = (unsigned int)(fps / 5) * 5; // FIXME: round!
+
+		if (fps < vino_data_norms[vcs->data_norm].fps_min)
+			fps = vino_data_norms[vcs->data_norm].fps_min;
+		if (fps > vino_data_norms[vcs->data_norm].fps_max)
+			fps = vino_data_norms[vcs->data_norm].fps_max;
+
+		switch (fps) {
+		case 5:
+			mask = 0x003;
+			break;
+		case 10:
+			mask = 0x0c3;
+			break;
+		case 15:
+			mask = 0x333;
+			break;
+		case 20:
+			mask = 0x0ff;
+			break;
+		case 25:
+			mask = 0x3ff;
+			break;
+		default:
+			mask = VINO_FRAMERT_FULL;
+		}
+		vcs->framert_reg = VINO_FRAMERT_RT(mask) | VINO_FRAMERT_PAL;
+		break;
+	}
+
+	vcs->fps = fps;
+}
+
+/* execute with input_lock locked */
+static inline void vino_set_default_framerate(struct
+					      vino_channel_settings *vcs)
+{
+	vino_set_framerate(vcs, vino_data_norms[vcs->data_norm].fps_max);
+}
+
+/* VINO I2C bus functions */
+
+struct i2c_algo_sgi_data {
+	void *data;	/* private data for lowlevel routines */
+	unsigned (*getctrl)(void *data);
+	void (*setctrl)(void *data, unsigned val);
+	unsigned (*rdata)(void *data);
+	void (*wdata)(void *data, unsigned val);
+
+	int xfer_timeout;
+	int ack_timeout;
+};
+
+static int wait_xfer_done(struct i2c_algo_sgi_data *adap)
+{
+	int i;
+
+	for (i = 0; i < adap->xfer_timeout; i++) {
+		if ((adap->getctrl(adap->data) & SGI_I2C_XFER_BUSY) == 0)
+			return 0;
+		udelay(1);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int wait_ack(struct i2c_algo_sgi_data *adap)
+{
+	int i;
+
+	if (wait_xfer_done(adap))
+		return -ETIMEDOUT;
+	for (i = 0; i < adap->ack_timeout; i++) {
+		if ((adap->getctrl(adap->data) & SGI_I2C_NACK) == 0)
+			return 0;
+		udelay(1);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int force_idle(struct i2c_algo_sgi_data *adap)
+{
+	int i;
+
+	adap->setctrl(adap->data, SGI_I2C_FORCE_IDLE);
+	for (i = 0; i < adap->xfer_timeout; i++) {
+		if ((adap->getctrl(adap->data) & SGI_I2C_NOT_IDLE) == 0)
+			goto out;
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+out:
+	if (adap->getctrl(adap->data) & SGI_I2C_BUS_ERR)
+		return -EIO;
+	return 0;
+}
+
+static int do_address(struct i2c_algo_sgi_data *adap, unsigned int addr,
+		      int rd)
+{
+	if (rd)
+		adap->setctrl(adap->data, SGI_I2C_NOT_IDLE);
+	/* Check if bus is idle, eventually force it to do so */
+	if (adap->getctrl(adap->data) & SGI_I2C_NOT_IDLE)
+		if (force_idle(adap))
+			return -EIO;
+	/* Write out the i2c chip address and specify operation */
+	adap->setctrl(adap->data,
+		      SGI_I2C_HOLD_BUS | SGI_I2C_WRITE | SGI_I2C_NOT_IDLE);
+	if (rd)
+		addr |= 1;
+	adap->wdata(adap->data, addr);
+	if (wait_ack(adap))
+		return -EIO;
+	return 0;
+}
+
+static int i2c_read(struct i2c_algo_sgi_data *adap, unsigned char *buf,
+		    unsigned int len)
+{
+	int i;
+
+	adap->setctrl(adap->data,
+		      SGI_I2C_HOLD_BUS | SGI_I2C_READ | SGI_I2C_NOT_IDLE);
+	for (i = 0; i < len; i++) {
+		if (wait_xfer_done(adap))
+			return -EIO;
+		buf[i] = adap->rdata(adap->data);
+	}
+	adap->setctrl(adap->data, SGI_I2C_RELEASE_BUS | SGI_I2C_FORCE_IDLE);
+
+	return 0;
+
+}
+
+static int i2c_write(struct i2c_algo_sgi_data *adap, unsigned char *buf,
+		     unsigned int len)
+{
+	int i;
+
+	/* We are already in write state */
+	for (i = 0; i < len; i++) {
+		adap->wdata(adap->data, buf[i]);
+		if (wait_ack(adap))
+			return -EIO;
+	}
+	return 0;
+}
+
+static int sgi_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
+		    int num)
+{
+	struct i2c_algo_sgi_data *adap = i2c_adap->algo_data;
+	struct i2c_msg *p;
+	int i, err = 0;
+
+	for (i = 0; !err && i < num; i++) {
+		p = &msgs[i];
+		err = do_address(adap, p->addr, p->flags & I2C_M_RD);
+		if (err || !p->len)
+			continue;
+		if (p->flags & I2C_M_RD)
+			err = i2c_read(adap, p->buf, p->len);
+		else
+			err = i2c_write(adap, p->buf, p->len);
+	}
+
+	return (err < 0) ? err : i;
+}
+
+static u32 sgi_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm sgi_algo = {
+	.master_xfer	= sgi_xfer,
+	.functionality	= sgi_func,
+};
+
+static unsigned i2c_vino_getctrl(void *data)
+{
+	return vino->i2c_control;
+}
+
+static void i2c_vino_setctrl(void *data, unsigned val)
+{
+	vino->i2c_control = val;
+}
+
+static unsigned i2c_vino_rdata(void *data)
+{
+	return vino->i2c_data;
+}
+
+static void i2c_vino_wdata(void *data, unsigned val)
+{
+	vino->i2c_data = val;
+}
+
+static struct i2c_algo_sgi_data i2c_sgi_vino_data = {
+	.getctrl = &i2c_vino_getctrl,
+	.setctrl = &i2c_vino_setctrl,
+	.rdata   = &i2c_vino_rdata,
+	.wdata   = &i2c_vino_wdata,
+	.xfer_timeout = 200,
+	.ack_timeout  = 1000,
+};
+
+static struct i2c_adapter vino_i2c_adapter = {
+	.name			= "VINO I2C bus",
+	.algo			= &sgi_algo,
+	.algo_data		= &i2c_sgi_vino_data,
+	.owner 			= THIS_MODULE,
+};
+
+/*
+ * Prepare VINO for DMA transfer...
+ * (execute only with vino_lock and input_lock locked)
+ */
+static int vino_dma_setup(struct vino_channel_settings *vcs,
+			  struct vino_framebuffer *fb)
+{
+	u32 ctrl, intr;
+	struct sgi_vino_channel *ch;
+	const struct vino_data_norm *norm;
+
+	dprintk("vino_dma_setup():\n");
+
+	vcs->field = 0;
+	fb->frame_counter = 0;
+
+	ch = (vcs->channel == VINO_CHANNEL_A) ? &vino->a : &vino->b;
+	norm = &vino_data_norms[vcs->data_norm];
+
+	ch->page_index = 0;
+	ch->line_count = 0;
+
+	/* VINO line size register is set 8 bytes less than actual */
+	ch->line_size = vcs->line_size - 8;
+
+	/* let VINO know where to transfer data */
+	ch->start_desc_tbl = fb->desc_table.dma;
+	ch->next_4_desc = fb->desc_table.dma;
+
+	/* give vino time to fetch the first four descriptors, 5 usec
+	 * should be more than enough time */
+	udelay(VINO_DESC_FETCH_DELAY);
+
+	dprintk("vino_dma_setup(): start desc = %08x, next 4 desc = %08x\n",
+		ch->start_desc_tbl, ch->next_4_desc);
+
+	/* set the alpha register */
+	ch->alpha = vcs->alpha;
+
+	/* set clipping registers */
+	ch->clip_start = VINO_CLIP_ODD(norm->odd.top + vcs->clipping.top / 2) |
+		VINO_CLIP_EVEN(norm->even.top +
+			       vcs->clipping.top / 2) |
+		VINO_CLIP_X(vcs->clipping.left);
+	ch->clip_end = VINO_CLIP_ODD(norm->odd.top +
+				     vcs->clipping.bottom / 2 - 1) |
+		VINO_CLIP_EVEN(norm->even.top +
+			       vcs->clipping.bottom / 2 - 1) |
+		VINO_CLIP_X(vcs->clipping.right);
+
+	/* set the size of actual content in the buffer (DECIMATION !) */
+	fb->data_size = ((vcs->clipping.right - vcs->clipping.left) /
+			 vcs->decimation) *
+		((vcs->clipping.bottom - vcs->clipping.top) /
+		 vcs->decimation) *
+		vino_data_formats[vcs->data_format].bpp;
+
+	ch->frame_rate = vcs->framert_reg;
+
+	ctrl = vino->control;
+	intr = vino->intr_status;
+
+	if (vcs->channel == VINO_CHANNEL_A) {
+		/* All interrupt conditions for this channel was cleared
+		 * so clear the interrupt status register and enable
+		 * interrupts */
+		intr &=	~VINO_INTSTAT_A;
+		ctrl |= VINO_CTRL_A_INT;
+
+		/* enable synchronization */
+		ctrl |= VINO_CTRL_A_SYNC_ENBL;
+
+		/* enable frame assembly */
+		ctrl |= VINO_CTRL_A_INTERLEAVE_ENBL;
+
+		/* set decimation used */
+		if (vcs->decimation < 2)
+			ctrl &= ~VINO_CTRL_A_DEC_ENBL;
+		else {
+			ctrl |= VINO_CTRL_A_DEC_ENBL;
+			ctrl &= ~VINO_CTRL_A_DEC_SCALE_MASK;
+			ctrl |= (vcs->decimation - 1) <<
+				VINO_CTRL_A_DEC_SCALE_SHIFT;
+		}
+
+		/* select input interface */
+		if (vcs->input == VINO_INPUT_D1)
+			ctrl |= VINO_CTRL_A_SELECT;
+		else
+			ctrl &= ~VINO_CTRL_A_SELECT;
+
+		/* palette */
+		ctrl &= ~(VINO_CTRL_A_LUMA_ONLY | VINO_CTRL_A_RGB |
+			  VINO_CTRL_A_DITHER);
+	} else {
+		intr &= ~VINO_INTSTAT_B;
+		ctrl |= VINO_CTRL_B_INT;
+
+		ctrl |= VINO_CTRL_B_SYNC_ENBL;
+		ctrl |= VINO_CTRL_B_INTERLEAVE_ENBL;
+
+		if (vcs->decimation < 2)
+			ctrl &= ~VINO_CTRL_B_DEC_ENBL;
+		else {
+			ctrl |= VINO_CTRL_B_DEC_ENBL;
+			ctrl &= ~VINO_CTRL_B_DEC_SCALE_MASK;
+			ctrl |= (vcs->decimation - 1) <<
+				VINO_CTRL_B_DEC_SCALE_SHIFT;
+
+		}
+		if (vcs->input == VINO_INPUT_D1)
+			ctrl |= VINO_CTRL_B_SELECT;
+		else
+			ctrl &= ~VINO_CTRL_B_SELECT;
+
+		ctrl &= ~(VINO_CTRL_B_LUMA_ONLY | VINO_CTRL_B_RGB |
+			  VINO_CTRL_B_DITHER);
+	}
+
+	/* set palette */
+	fb->data_format = vcs->data_format;
+
+	switch (vcs->data_format) {
+		case VINO_DATA_FMT_GREY:
+			ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
+				VINO_CTRL_A_LUMA_ONLY : VINO_CTRL_B_LUMA_ONLY;
+			break;
+		case VINO_DATA_FMT_RGB32:
+			ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
+				VINO_CTRL_A_RGB : VINO_CTRL_B_RGB;
+			break;
+		case VINO_DATA_FMT_YUV:
+			/* nothing needs to be done */
+			break;
+		case VINO_DATA_FMT_RGB332:
+			ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
+				VINO_CTRL_A_RGB | VINO_CTRL_A_DITHER :
+				VINO_CTRL_B_RGB | VINO_CTRL_B_DITHER;
+			break;
+	}
+
+	vino->intr_status = intr;
+	vino->control = ctrl;
+
+	return 0;
+}
+
+/* (execute only with vino_lock locked) */
+static inline void vino_dma_start(struct vino_channel_settings *vcs)
+{
+	u32 ctrl = vino->control;
+
+	dprintk("vino_dma_start():\n");
+	ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
+		VINO_CTRL_A_DMA_ENBL : VINO_CTRL_B_DMA_ENBL;
+	vino->control = ctrl;
+}
+
+/* (execute only with vino_lock locked) */
+static inline void vino_dma_stop(struct vino_channel_settings *vcs)
+{
+	u32 ctrl = vino->control;
+
+	ctrl &= (vcs->channel == VINO_CHANNEL_A) ?
+		~VINO_CTRL_A_DMA_ENBL : ~VINO_CTRL_B_DMA_ENBL;
+	ctrl &= (vcs->channel == VINO_CHANNEL_A) ?
+		~VINO_CTRL_A_INT : ~VINO_CTRL_B_INT;
+	vino->control = ctrl;
+	dprintk("vino_dma_stop():\n");
+}
+
+/*
+ * Load dummy page to descriptor registers. This prevents generating of
+ * spurious interrupts. (execute only with vino_lock locked)
+ */
+static void vino_clear_interrupt(struct vino_channel_settings *vcs)
+{
+	struct sgi_vino_channel *ch;
+
+	ch = (vcs->channel == VINO_CHANNEL_A) ? &vino->a : &vino->b;
+
+	ch->page_index = 0;
+	ch->line_count = 0;
+
+	ch->start_desc_tbl = vino_drvdata->dummy_desc_table.dma;
+	ch->next_4_desc = vino_drvdata->dummy_desc_table.dma;
+
+	udelay(VINO_DESC_FETCH_DELAY);
+	dprintk("channel %c clear interrupt condition\n",
+	       (vcs->channel == VINO_CHANNEL_A) ? 'A':'B');
+}
+
+static int vino_capture(struct vino_channel_settings *vcs,
+			struct vino_framebuffer *fb)
+{
+	int err = 0;
+	unsigned long flags, flags2;
+
+	spin_lock_irqsave(&fb->state_lock, flags);
+
+	if (fb->state == VINO_FRAMEBUFFER_IN_USE)
+		err = -EBUSY;
+	fb->state = VINO_FRAMEBUFFER_IN_USE;
+
+	spin_unlock_irqrestore(&fb->state_lock, flags);
+
+	if (err)
+		return err;
+
+	spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags2);
+
+	vino_dma_setup(vcs, fb);
+	vino_dma_start(vcs);
+
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags2);
+	spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
+
+	return err;
+}
+
+static
+struct vino_framebuffer *vino_capture_enqueue(struct
+					      vino_channel_settings *vcs,
+					      unsigned int index)
+{
+	struct vino_framebuffer *fb;
+	unsigned long flags;
+
+	dprintk("vino_capture_enqueue():\n");
+
+	spin_lock_irqsave(&vcs->capture_lock, flags);
+
+	fb = vino_queue_add(&vcs->fb_queue, index);
+	if (fb == NULL) {
+		dprintk("vino_capture_enqueue(): vino_queue_add() failed, "
+			"queue full?\n");
+		goto out;
+	}
+out:
+	spin_unlock_irqrestore(&vcs->capture_lock, flags);
+
+	return fb;
+}
+
+static int vino_capture_next(struct vino_channel_settings *vcs, int start)
+{
+	struct vino_framebuffer *fb;
+	unsigned int incoming, id;
+	int err = 0;
+	unsigned long flags;
+
+	dprintk("vino_capture_next():\n");
+
+	spin_lock_irqsave(&vcs->capture_lock, flags);
+
+	if (start) {
+		/* start capture only if capture isn't in progress already */
+		if (vcs->capturing) {
+			spin_unlock_irqrestore(&vcs->capture_lock, flags);
+			return 0;
+		}
+
+	} else {
+		/* capture next frame:
+		 * stop capture if capturing is not set */
+		if (!vcs->capturing) {
+			spin_unlock_irqrestore(&vcs->capture_lock, flags);
+			return 0;
+		}
+	}
+
+	err = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
+	if (err) {
+		dprintk("vino_capture_next(): vino_queue_get_incoming() "
+			"failed\n");
+		err = -EINVAL;
+		goto out;
+	}
+	if (incoming == 0) {
+		dprintk("vino_capture_next(): no buffers available\n");
+		goto out;
+	}
+
+	fb = vino_queue_peek(&vcs->fb_queue, &id);
+	if (fb == NULL) {
+		dprintk("vino_capture_next(): vino_queue_peek() failed\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (start) {
+		vcs->capturing = 1;
+	}
+
+	spin_unlock_irqrestore(&vcs->capture_lock, flags);
+
+	err = vino_capture(vcs, fb);
+
+	return err;
+
+out:
+	vcs->capturing = 0;
+	spin_unlock_irqrestore(&vcs->capture_lock, flags);
+
+	return err;
+}
+
+static inline int vino_is_capturing(struct vino_channel_settings *vcs)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vcs->capture_lock, flags);
+
+	ret = vcs->capturing;
+
+	spin_unlock_irqrestore(&vcs->capture_lock, flags);
+
+	return ret;
+}
+
+/* waits until a frame is captured */
+static int vino_wait_for_frame(struct vino_channel_settings *vcs)
+{
+	wait_queue_t wait;
+	int err = 0;
+
+	dprintk("vino_wait_for_frame():\n");
+
+	init_waitqueue_entry(&wait, current);
+	/* add ourselves into wait queue */
+	add_wait_queue(&vcs->fb_queue.frame_wait_queue, &wait);
+
+	/* to ensure that schedule_timeout will return immediately
+	 * if VINO interrupt was triggered meanwhile */
+	schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+	if (signal_pending(current))
+		err = -EINTR;
+
+	remove_wait_queue(&vcs->fb_queue.frame_wait_queue, &wait);
+
+	dprintk("vino_wait_for_frame(): waiting for frame %s\n",
+		err ? "failed" : "ok");
+
+	return err;
+}
+
+/* the function assumes that PAGE_SIZE % 4 == 0 */
+static void vino_convert_to_rgba(struct vino_framebuffer *fb) {
+	unsigned char *pageptr;
+	unsigned int page, i;
+	unsigned char a;
+
+	for (page = 0; page < fb->desc_table.page_count; page++) {
+		pageptr = (unsigned char *)fb->desc_table.virtual[page];
+
+		for (i = 0; i < PAGE_SIZE; i += 4) {
+			a = pageptr[0];
+			pageptr[0] = pageptr[3];
+			pageptr[1] = pageptr[2];
+			pageptr[2] = pageptr[1];
+			pageptr[3] = a;
+			pageptr += 4;
+		}
+	}
+}
+
+/* checks if the buffer is in correct state and syncs data */
+static int vino_check_buffer(struct vino_channel_settings *vcs,
+			     struct vino_framebuffer *fb)
+{
+	int err = 0;
+	unsigned long flags;
+
+	dprintk("vino_check_buffer():\n");
+
+	spin_lock_irqsave(&fb->state_lock, flags);
+	switch (fb->state) {
+	case VINO_FRAMEBUFFER_IN_USE:
+		err = -EIO;
+		break;
+	case VINO_FRAMEBUFFER_READY:
+		vino_sync_buffer(fb);
+		fb->state = VINO_FRAMEBUFFER_UNUSED;
+		break;
+	default:
+		err = -EINVAL;
+	}
+	spin_unlock_irqrestore(&fb->state_lock, flags);
+
+	if (!err) {
+		if (vino_pixel_conversion
+		    && (fb->data_format == VINO_DATA_FMT_RGB32)) {
+			vino_convert_to_rgba(fb);
+		}
+	} else if (err && (err != -EINVAL)) {
+		dprintk("vino_check_buffer(): buffer not ready\n");
+
+		spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
+		vino_dma_stop(vcs);
+		vino_clear_interrupt(vcs);
+		spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
+	}
+
+	return err;
+}
+
+/* forcefully terminates capture */
+static void vino_capture_stop(struct vino_channel_settings *vcs)
+{
+	unsigned int incoming = 0, outgoing = 0, id;
+	unsigned long flags, flags2;
+
+	dprintk("vino_capture_stop():\n");
+
+	spin_lock_irqsave(&vcs->capture_lock, flags);
+
+	/* unset capturing to stop queue processing */
+	vcs->capturing = 0;
+
+	spin_lock_irqsave(&vino_drvdata->vino_lock, flags2);
+
+	vino_dma_stop(vcs);
+	vino_clear_interrupt(vcs);
+
+	spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags2);
+
+	/* remove all items from the queue */
+	if (vino_queue_get_incoming(&vcs->fb_queue, &incoming)) {
+		dprintk("vino_capture_stop(): "
+			"vino_queue_get_incoming() failed\n");
+		goto out;
+	}
+	while (incoming > 0) {
+		vino_queue_transfer(&vcs->fb_queue);
+
+		if (vino_queue_get_incoming(&vcs->fb_queue, &incoming)) {
+			dprintk("vino_capture_stop(): "
+				"vino_queue_get_incoming() failed\n");
+			goto out;
+		}
+	}
+
+	if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
+		dprintk("vino_capture_stop(): "
+			"vino_queue_get_outgoing() failed\n");
+		goto out;
+	}
+	while (outgoing > 0) {
+		vino_queue_remove(&vcs->fb_queue, &id);
+
+		if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
+			dprintk("vino_capture_stop(): "
+				"vino_queue_get_outgoing() failed\n");
+			goto out;
+		}
+	}
+
+out:
+	spin_unlock_irqrestore(&vcs->capture_lock, flags);
+}
+
+#if 0
+static int vino_capture_failed(struct vino_channel_settings *vcs)
+{
+	struct vino_framebuffer *fb;
+	unsigned long flags;
+	unsigned int i;
+	int ret;
+
+	dprintk("vino_capture_failed():\n");
+
+	spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
+
+	vino_dma_stop(vcs);
+	vino_clear_interrupt(vcs);
+
+	spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
+
+	ret = vino_queue_get_incoming(&vcs->fb_queue, &i);
+	if (ret == VINO_QUEUE_ERROR) {
+		dprintk("vino_queue_get_incoming() failed\n");
+		return -EINVAL;
+	}
+	if (i == 0) {
+		/* no buffers to process */
+		return 0;
+	}
+
+	fb = vino_queue_peek(&vcs->fb_queue, &i);
+	if (fb == NULL) {
+		dprintk("vino_queue_peek() failed\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&fb->state_lock, flags);
+	if (fb->state == VINO_FRAMEBUFFER_IN_USE) {
+		fb->state = VINO_FRAMEBUFFER_UNUSED;
+		vino_queue_transfer(&vcs->fb_queue);
+		vino_queue_remove(&vcs->fb_queue, &i);
+		/* we should actually discard the newest frame,
+		 * but who cares ... */
+	}
+	spin_unlock_irqrestore(&fb->state_lock, flags);
+
+	return 0;
+}
+#endif
+
+static void vino_skip_frame(struct vino_channel_settings *vcs)
+{
+	struct vino_framebuffer *fb;
+	unsigned long flags;
+	unsigned int id;
+
+	spin_lock_irqsave(&vcs->capture_lock, flags);
+	fb = vino_queue_peek(&vcs->fb_queue, &id);
+	if (!fb) {
+		spin_unlock_irqrestore(&vcs->capture_lock, flags);
+		dprintk("vino_skip_frame(): vino_queue_peek() failed!\n");
+		return;
+	}
+	spin_unlock_irqrestore(&vcs->capture_lock, flags);
+
+	spin_lock_irqsave(&fb->state_lock, flags);
+	fb->state = VINO_FRAMEBUFFER_UNUSED;
+	spin_unlock_irqrestore(&fb->state_lock, flags);
+
+	vino_capture_next(vcs, 0);
+}
+
+static void vino_frame_done(struct vino_channel_settings *vcs)
+{
+	struct vino_framebuffer *fb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vcs->capture_lock, flags);
+	fb = vino_queue_transfer(&vcs->fb_queue);
+	if (!fb) {
+		spin_unlock_irqrestore(&vcs->capture_lock, flags);
+		dprintk("vino_frame_done(): vino_queue_transfer() failed!\n");
+		return;
+	}
+	spin_unlock_irqrestore(&vcs->capture_lock, flags);
+
+	fb->frame_counter = vcs->int_data.frame_counter;
+	memcpy(&fb->timestamp, &vcs->int_data.timestamp,
+	       sizeof(struct timeval));
+
+	spin_lock_irqsave(&fb->state_lock, flags);
+	if (fb->state == VINO_FRAMEBUFFER_IN_USE)
+		fb->state = VINO_FRAMEBUFFER_READY;
+	spin_unlock_irqrestore(&fb->state_lock, flags);
+
+	wake_up(&vcs->fb_queue.frame_wait_queue);
+
+	vino_capture_next(vcs, 0);
+}
+
+static void vino_capture_tasklet(unsigned long channel) {
+	struct vino_channel_settings *vcs;
+
+	vcs = (channel == VINO_CHANNEL_A)
+		? &vino_drvdata->a : &vino_drvdata->b;
+
+	if (vcs->int_data.skip)
+		vcs->int_data.skip_count++;
+
+	if (vcs->int_data.skip && (vcs->int_data.skip_count
+				   <= VINO_MAX_FRAME_SKIP_COUNT)) {
+		vino_skip_frame(vcs);
+	} else {
+		vcs->int_data.skip_count = 0;
+		vino_frame_done(vcs);
+	}
+}
+
+static irqreturn_t vino_interrupt(int irq, void *dev_id)
+{
+	u32 ctrl, intr;
+	unsigned int fc_a, fc_b;
+	int handled_a = 0, skip_a = 0, done_a = 0;
+	int handled_b = 0, skip_b = 0, done_b = 0;
+
+#ifdef VINO_DEBUG_INT
+	int loop = 0;
+	unsigned int line_count = vino->a.line_count,
+		page_index = vino->a.page_index,
+		field_counter = vino->a.field_counter,
+		start_desc_tbl = vino->a.start_desc_tbl,
+		next_4_desc = vino->a.next_4_desc;
+	unsigned int line_count_2,
+		page_index_2,
+		field_counter_2,
+		start_desc_tbl_2,
+		next_4_desc_2;
+#endif
+
+	spin_lock(&vino_drvdata->vino_lock);
+
+	while ((intr = vino->intr_status)) {
+		fc_a = vino->a.field_counter >> 1;
+		fc_b = vino->b.field_counter >> 1;
+
+		/* handle error-interrupts in some special way ?
+		 * --> skips frames */
+		if (intr & VINO_INTSTAT_A) {
+			if (intr & VINO_INTSTAT_A_EOF) {
+				vino_drvdata->a.field++;
+				if (vino_drvdata->a.field > 1) {
+					vino_dma_stop(&vino_drvdata->a);
+					vino_clear_interrupt(&vino_drvdata->a);
+					vino_drvdata->a.field = 0;
+					done_a = 1;
+				} else {
+					if (vino->a.page_index
+					    != vino_drvdata->a.line_size) {
+						vino->a.line_count = 0;
+						vino->a.page_index =
+							vino_drvdata->
+							a.line_size;
+						vino->a.next_4_desc =
+							vino->a.start_desc_tbl;
+					}
+				}
+				dprintk("channel A end-of-field "
+					"interrupt: %04x\n", intr);
+			} else {
+				vino_dma_stop(&vino_drvdata->a);
+				vino_clear_interrupt(&vino_drvdata->a);
+				vino_drvdata->a.field = 0;
+				skip_a = 1;
+				dprintk("channel A error interrupt: %04x\n",
+					intr);
+			}
+
+#ifdef VINO_DEBUG_INT
+			line_count_2 = vino->a.line_count;
+			page_index_2 = vino->a.page_index;
+			field_counter_2 = vino->a.field_counter;
+			start_desc_tbl_2 = vino->a.start_desc_tbl;
+			next_4_desc_2 = vino->a.next_4_desc;
+
+			printk("intr = %04x, loop = %d, field = %d\n",
+			       intr, loop, vino_drvdata->a.field);
+			printk("1- line count = %04d, page index = %04d, "
+			       "start = %08x, next = %08x\n"
+			       "   fieldc = %d, framec = %d\n",
+			       line_count, page_index, start_desc_tbl,
+			       next_4_desc, field_counter, fc_a);
+			printk("12-line count = %04d, page index = %04d, "
+			       "   start = %08x, next = %08x\n",
+			       line_count_2, page_index_2, start_desc_tbl_2,
+			       next_4_desc_2);
+
+			if (done_a)
+				printk("\n");
+#endif
+		}
+
+		if (intr & VINO_INTSTAT_B) {
+			if (intr & VINO_INTSTAT_B_EOF) {
+				vino_drvdata->b.field++;
+				if (vino_drvdata->b.field > 1) {
+					vino_dma_stop(&vino_drvdata->b);
+					vino_clear_interrupt(&vino_drvdata->b);
+					vino_drvdata->b.field = 0;
+					done_b = 1;
+				}
+				dprintk("channel B end-of-field "
+					"interrupt: %04x\n", intr);
+			} else {
+				vino_dma_stop(&vino_drvdata->b);
+				vino_clear_interrupt(&vino_drvdata->b);
+				vino_drvdata->b.field = 0;
+				skip_b = 1;
+				dprintk("channel B error interrupt: %04x\n",
+					intr);
+			}
+		}
+
+		/* Always remember to clear interrupt status.
+		 * Disable VINO interrupts while we do this. */
+		ctrl = vino->control;
+		vino->control = ctrl & ~(VINO_CTRL_A_INT | VINO_CTRL_B_INT);
+		vino->intr_status = ~intr;
+		vino->control = ctrl;
+
+		spin_unlock(&vino_drvdata->vino_lock);
+
+		if ((!handled_a) && (done_a || skip_a)) {
+			if (!skip_a) {
+				do_gettimeofday(&vino_drvdata->
+						a.int_data.timestamp);
+				vino_drvdata->a.int_data.frame_counter = fc_a;
+			}
+			vino_drvdata->a.int_data.skip = skip_a;
+
+			dprintk("channel A %s, interrupt: %d\n",
+				skip_a ? "skipping frame" : "frame done",
+				intr);
+			tasklet_hi_schedule(&vino_tasklet_a);
+			handled_a = 1;
+		}
+
+		if ((!handled_b) && (done_b || skip_b)) {
+			if (!skip_b) {
+				do_gettimeofday(&vino_drvdata->
+						b.int_data.timestamp);
+				vino_drvdata->b.int_data.frame_counter = fc_b;
+			}
+			vino_drvdata->b.int_data.skip = skip_b;
+
+			dprintk("channel B %s, interrupt: %d\n",
+				skip_b ? "skipping frame" : "frame done",
+				intr);
+			tasklet_hi_schedule(&vino_tasklet_b);
+			handled_b = 1;
+		}
+
+#ifdef VINO_DEBUG_INT
+		loop++;
+#endif
+		spin_lock(&vino_drvdata->vino_lock);
+	}
+
+	spin_unlock(&vino_drvdata->vino_lock);
+
+	return IRQ_HANDLED;
+}
+
+/* VINO video input management */
+
+static int vino_get_saa7191_input(int input)
+{
+	switch (input) {
+	case VINO_INPUT_COMPOSITE:
+		return SAA7191_INPUT_COMPOSITE;
+	case VINO_INPUT_SVIDEO:
+		return SAA7191_INPUT_SVIDEO;
+	default:
+		printk(KERN_ERR "VINO: vino_get_saa7191_input(): "
+		       "invalid input!\n");
+		return -1;
+	}
+}
+
+/* execute with input_lock locked */
+static int vino_is_input_owner(struct vino_channel_settings *vcs)
+{
+	switch(vcs->input) {
+	case VINO_INPUT_COMPOSITE:
+	case VINO_INPUT_SVIDEO:
+		return vino_drvdata->decoder_owner == vcs->channel;
+	case VINO_INPUT_D1:
+		return vino_drvdata->camera_owner == vcs->channel;
+	default:
+		return 0;
+	}
+}
+
+static int vino_acquire_input(struct vino_channel_settings *vcs)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	dprintk("vino_acquire_input():\n");
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+	/* First try D1 and then SAA7191 */
+	if (vino_drvdata->camera
+	    && (vino_drvdata->camera_owner == VINO_NO_CHANNEL)) {
+		vino_drvdata->camera_owner = vcs->channel;
+		vcs->input = VINO_INPUT_D1;
+		vcs->data_norm = VINO_DATA_NORM_D1;
+	} else if (vino_drvdata->decoder
+		   && (vino_drvdata->decoder_owner == VINO_NO_CHANNEL)) {
+		int input;
+		int data_norm = 0;
+		v4l2_std_id norm;
+
+		input = VINO_INPUT_COMPOSITE;
+
+		ret = decoder_call(video, s_routing,
+				vino_get_saa7191_input(input), 0, 0);
+		if (ret) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+		/* Don't hold spinlocks while auto-detecting norm
+		 * as it may take a while... */
+
+		ret = decoder_call(video, querystd, &norm);
+		if (!ret) {
+			for (data_norm = 0; data_norm < 3; data_norm++) {
+				if (vino_data_norms[data_norm].std & norm)
+					break;
+			}
+			if (data_norm == 3)
+				data_norm = VINO_DATA_NORM_PAL;
+			ret = decoder_call(core, s_std, norm);
+		}
+
+		spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+		if (ret) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		vino_drvdata->decoder_owner = vcs->channel;
+
+		vcs->input = input;
+		vcs->data_norm = data_norm;
+	} else {
+		vcs->input = (vcs->channel == VINO_CHANNEL_A) ?
+			vino_drvdata->b.input : vino_drvdata->a.input;
+		vcs->data_norm = (vcs->channel == VINO_CHANNEL_A) ?
+			vino_drvdata->b.data_norm : vino_drvdata->a.data_norm;
+	}
+
+	if (vcs->input == VINO_INPUT_NONE) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	vino_set_default_clipping(vcs);
+	vino_set_default_scaling(vcs);
+	vino_set_default_framerate(vcs);
+
+	dprintk("vino_acquire_input(): %s\n", vino_inputs[vcs->input].name);
+
+out:
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+	return ret;
+}
+
+static int vino_set_input(struct vino_channel_settings *vcs, int input)
+{
+	struct vino_channel_settings *vcs2 = (vcs->channel == VINO_CHANNEL_A) ?
+		&vino_drvdata->b : &vino_drvdata->a;
+	unsigned long flags;
+	int ret = 0;
+
+	dprintk("vino_set_input():\n");
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+	if (vcs->input == input)
+		goto out;
+
+	switch (input) {
+	case VINO_INPUT_COMPOSITE:
+	case VINO_INPUT_SVIDEO:
+		if (!vino_drvdata->decoder) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		if (vino_drvdata->decoder_owner == VINO_NO_CHANNEL) {
+			vino_drvdata->decoder_owner = vcs->channel;
+		}
+
+		if (vino_drvdata->decoder_owner == vcs->channel) {
+			int data_norm = 0;
+			v4l2_std_id norm;
+
+			ret = decoder_call(video, s_routing,
+					vino_get_saa7191_input(input), 0, 0);
+			if (ret) {
+				vino_drvdata->decoder_owner = VINO_NO_CHANNEL;
+				ret = -EINVAL;
+				goto out;
+			}
+
+			spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+			/* Don't hold spinlocks while auto-detecting norm
+			 * as it may take a while... */
+
+			ret = decoder_call(video, querystd, &norm);
+			if (!ret) {
+				for (data_norm = 0; data_norm < 3; data_norm++) {
+					if (vino_data_norms[data_norm].std & norm)
+						break;
+				}
+				if (data_norm == 3)
+					data_norm = VINO_DATA_NORM_PAL;
+				ret = decoder_call(core, s_std, norm);
+			}
+
+			spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+			if (ret) {
+				vino_drvdata->decoder_owner = VINO_NO_CHANNEL;
+				ret = -EINVAL;
+				goto out;
+			}
+
+			vcs->input = input;
+			vcs->data_norm = data_norm;
+		} else {
+			if (input != vcs2->input) {
+				ret = -EBUSY;
+				goto out;
+			}
+
+			vcs->input = input;
+			vcs->data_norm = vcs2->data_norm;
+		}
+
+		if (vino_drvdata->camera_owner == vcs->channel) {
+			/* Transfer the ownership or release the input */
+			if (vcs2->input == VINO_INPUT_D1) {
+				vino_drvdata->camera_owner = vcs2->channel;
+			} else {
+				vino_drvdata->camera_owner = VINO_NO_CHANNEL;
+			}
+		}
+		break;
+	case VINO_INPUT_D1:
+		if (!vino_drvdata->camera) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		if (vino_drvdata->camera_owner == VINO_NO_CHANNEL)
+			vino_drvdata->camera_owner = vcs->channel;
+
+		if (vino_drvdata->decoder_owner == vcs->channel) {
+			/* Transfer the ownership or release the input */
+			if ((vcs2->input == VINO_INPUT_COMPOSITE) ||
+				 (vcs2->input == VINO_INPUT_SVIDEO)) {
+				vino_drvdata->decoder_owner = vcs2->channel;
+			} else {
+				vino_drvdata->decoder_owner = VINO_NO_CHANNEL;
+			}
+		}
+
+		vcs->input = input;
+		vcs->data_norm = VINO_DATA_NORM_D1;
+		break;
+	default:
+		ret = -EINVAL;
+		goto out;
+	}
+
+	vino_set_default_clipping(vcs);
+	vino_set_default_scaling(vcs);
+	vino_set_default_framerate(vcs);
+
+	dprintk("vino_set_input(): %s\n", vino_inputs[vcs->input].name);
+
+out:
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+	return ret;
+}
+
+static void vino_release_input(struct vino_channel_settings *vcs)
+{
+	struct vino_channel_settings *vcs2 = (vcs->channel == VINO_CHANNEL_A) ?
+		&vino_drvdata->b : &vino_drvdata->a;
+	unsigned long flags;
+
+	dprintk("vino_release_input():\n");
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+	/* Release ownership of the channel
+	 * and if the other channel takes input from
+	 * the same source, transfer the ownership */
+	if (vino_drvdata->camera_owner == vcs->channel) {
+		if (vcs2->input == VINO_INPUT_D1) {
+			vino_drvdata->camera_owner = vcs2->channel;
+		} else {
+			vino_drvdata->camera_owner = VINO_NO_CHANNEL;
+		}
+	} else if (vino_drvdata->decoder_owner == vcs->channel) {
+		if ((vcs2->input == VINO_INPUT_COMPOSITE) ||
+			 (vcs2->input == VINO_INPUT_SVIDEO)) {
+			vino_drvdata->decoder_owner = vcs2->channel;
+		} else {
+			vino_drvdata->decoder_owner = VINO_NO_CHANNEL;
+		}
+	}
+	vcs->input = VINO_INPUT_NONE;
+
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+}
+
+/* execute with input_lock locked */
+static int vino_set_data_norm(struct vino_channel_settings *vcs,
+			      unsigned int data_norm,
+			      unsigned long *flags)
+{
+	int err = 0;
+
+	if (data_norm == vcs->data_norm)
+		return 0;
+
+	switch (vcs->input) {
+	case VINO_INPUT_D1:
+		/* only one "norm" supported */
+		if (data_norm != VINO_DATA_NORM_D1)
+			return -EINVAL;
+		break;
+	case VINO_INPUT_COMPOSITE:
+	case VINO_INPUT_SVIDEO: {
+		v4l2_std_id norm;
+
+		if ((data_norm != VINO_DATA_NORM_PAL)
+		    && (data_norm != VINO_DATA_NORM_NTSC)
+		    && (data_norm != VINO_DATA_NORM_SECAM))
+			return -EINVAL;
+
+		spin_unlock_irqrestore(&vino_drvdata->input_lock, *flags);
+
+		/* Don't hold spinlocks while setting norm
+		 * as it may take a while... */
+
+		norm = vino_data_norms[data_norm].std;
+		err = decoder_call(core, s_std, norm);
+
+		spin_lock_irqsave(&vino_drvdata->input_lock, *flags);
+
+		if (err)
+			goto out;
+
+		vcs->data_norm = data_norm;
+
+		vino_set_default_clipping(vcs);
+		vino_set_default_scaling(vcs);
+		vino_set_default_framerate(vcs);
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+
+out:
+	return err;
+}
+
+/* V4L2 helper functions */
+
+static int vino_find_data_format(__u32 pixelformat)
+{
+	int i;
+
+	for (i = 0; i < VINO_DATA_FMT_COUNT; i++) {
+		if (vino_data_formats[i].pixelformat == pixelformat)
+			return i;
+	}
+
+	return VINO_DATA_FMT_NONE;
+}
+
+static int vino_int_enum_input(struct vino_channel_settings *vcs, __u32 index)
+{
+	int input = VINO_INPUT_NONE;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+	if (vino_drvdata->decoder && vino_drvdata->camera) {
+		switch (index) {
+		case 0:
+			input = VINO_INPUT_COMPOSITE;
+			break;
+		case 1:
+			input = VINO_INPUT_SVIDEO;
+			break;
+		case 2:
+			input = VINO_INPUT_D1;
+			break;
+		}
+	} else if (vino_drvdata->decoder) {
+		switch (index) {
+		case 0:
+			input = VINO_INPUT_COMPOSITE;
+			break;
+		case 1:
+			input = VINO_INPUT_SVIDEO;
+			break;
+		}
+	} else if (vino_drvdata->camera) {
+		switch (index) {
+		case 0:
+			input = VINO_INPUT_D1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+	return input;
+}
+
+/* execute with input_lock locked */
+static __u32 vino_find_input_index(struct vino_channel_settings *vcs)
+{
+	__u32 index = 0;
+	// FIXME: detect when no inputs available
+
+	if (vino_drvdata->decoder && vino_drvdata->camera) {
+		switch (vcs->input) {
+		case VINO_INPUT_COMPOSITE:
+			index = 0;
+			break;
+		case VINO_INPUT_SVIDEO:
+			index = 1;
+			break;
+		case VINO_INPUT_D1:
+			index = 2;
+			break;
+		}
+	} else if (vino_drvdata->decoder) {
+		switch (vcs->input) {
+		case VINO_INPUT_COMPOSITE:
+			index = 0;
+			break;
+		case VINO_INPUT_SVIDEO:
+			index = 1;
+			break;
+		}
+	} else if (vino_drvdata->camera) {
+		switch (vcs->input) {
+		case VINO_INPUT_D1:
+			index = 0;
+			break;
+		}
+	}
+
+	return index;
+}
+
+/* V4L2 ioctls */
+
+static int vino_querycap(struct file *file, void *__fh,
+		struct v4l2_capability *cap)
+{
+	memset(cap, 0, sizeof(struct v4l2_capability));
+
+	strcpy(cap->driver, vino_driver_name);
+	strcpy(cap->card, vino_driver_description);
+	strcpy(cap->bus_info, vino_bus_name);
+	cap->capabilities =
+		V4L2_CAP_VIDEO_CAPTURE |
+		V4L2_CAP_STREAMING;
+	// V4L2_CAP_OVERLAY, V4L2_CAP_READWRITE
+	return 0;
+}
+
+static int vino_enum_input(struct file *file, void *__fh,
+			       struct v4l2_input *i)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	__u32 index = i->index;
+	int input;
+	dprintk("requested index = %d\n", index);
+
+	input = vino_int_enum_input(vcs, index);
+	if (input == VINO_INPUT_NONE)
+		return -EINVAL;
+
+	i->type = V4L2_INPUT_TYPE_CAMERA;
+	i->std = vino_inputs[input].std;
+	strcpy(i->name, vino_inputs[input].name);
+
+	if (input == VINO_INPUT_COMPOSITE || input == VINO_INPUT_SVIDEO)
+		decoder_call(video, g_input_status, &i->status);
+	return 0;
+}
+
+static int vino_g_input(struct file *file, void *__fh,
+			     unsigned int *i)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	__u32 index;
+	int input;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+	input = vcs->input;
+	index = vino_find_input_index(vcs);
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+	dprintk("input = %d\n", input);
+
+	if (input == VINO_INPUT_NONE) {
+		return -EINVAL;
+	}
+
+	*i = index;
+
+	return 0;
+}
+
+static int vino_s_input(struct file *file, void *__fh,
+			     unsigned int i)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	int input;
+	dprintk("requested input = %d\n", i);
+
+	input = vino_int_enum_input(vcs, i);
+	if (input == VINO_INPUT_NONE)
+		return -EINVAL;
+
+	return vino_set_input(vcs, input);
+}
+
+static int vino_querystd(struct file *file, void *__fh,
+			      v4l2_std_id *std)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	unsigned long flags;
+	int err = 0;
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+	switch (vcs->input) {
+	case VINO_INPUT_D1:
+		*std = vino_inputs[vcs->input].std;
+		break;
+	case VINO_INPUT_COMPOSITE:
+	case VINO_INPUT_SVIDEO: {
+		decoder_call(video, querystd, std);
+		break;
+	}
+	default:
+		err = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+	return err;
+}
+
+static int vino_g_std(struct file *file, void *__fh,
+			   v4l2_std_id *std)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	unsigned long flags;
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+	*std = vino_data_norms[vcs->data_norm].std;
+	dprintk("current standard = %d\n", vcs->data_norm);
+
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+	return 0;
+}
+
+static int vino_s_std(struct file *file, void *__fh,
+			   v4l2_std_id *std)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+	if (!vino_is_input_owner(vcs)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/* check if the standard is valid for the current input */
+	if ((*std) & vino_inputs[vcs->input].std) {
+		dprintk("standard accepted\n");
+
+		/* change the video norm for SAA7191
+		 * and accept NTSC for D1 (do nothing) */
+
+		if (vcs->input == VINO_INPUT_D1)
+			goto out;
+
+		if ((*std) & V4L2_STD_PAL) {
+			ret = vino_set_data_norm(vcs, VINO_DATA_NORM_PAL,
+						 &flags);
+		} else if ((*std) & V4L2_STD_NTSC) {
+			ret = vino_set_data_norm(vcs, VINO_DATA_NORM_NTSC,
+						 &flags);
+		} else if ((*std) & V4L2_STD_SECAM) {
+			ret = vino_set_data_norm(vcs, VINO_DATA_NORM_SECAM,
+						 &flags);
+		} else {
+			ret = -EINVAL;
+		}
+
+		if (ret) {
+			ret = -EINVAL;
+		}
+	} else {
+		ret = -EINVAL;
+	}
+
+out:
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+	return ret;
+}
+
+static int vino_enum_fmt_vid_cap(struct file *file, void *__fh,
+			      struct v4l2_fmtdesc *fd)
+{
+	dprintk("format index = %d\n", fd->index);
+
+	if (fd->index >= VINO_DATA_FMT_COUNT)
+		return -EINVAL;
+	dprintk("format name = %s\n", vino_data_formats[fd->index].description);
+
+	fd->pixelformat = vino_data_formats[fd->index].pixelformat;
+	strcpy(fd->description, vino_data_formats[fd->index].description);
+	return 0;
+}
+
+static int vino_try_fmt_vid_cap(struct file *file, void *__fh,
+			     struct v4l2_format *f)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	struct vino_channel_settings tempvcs;
+	unsigned long flags;
+	struct v4l2_pix_format *pf = &f->fmt.pix;
+
+	dprintk("requested: w = %d, h = %d\n",
+			pf->width, pf->height);
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+	memcpy(&tempvcs, vcs, sizeof(struct vino_channel_settings));
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+	tempvcs.data_format = vino_find_data_format(pf->pixelformat);
+	if (tempvcs.data_format == VINO_DATA_FMT_NONE) {
+		tempvcs.data_format = VINO_DATA_FMT_GREY;
+		pf->pixelformat =
+			vino_data_formats[tempvcs.data_format].
+			pixelformat;
+	}
+
+	/* data format must be set before clipping/scaling */
+	vino_set_scaling(&tempvcs, pf->width, pf->height);
+
+	dprintk("data format = %s\n",
+			vino_data_formats[tempvcs.data_format].description);
+
+	pf->width = (tempvcs.clipping.right - tempvcs.clipping.left) /
+		tempvcs.decimation;
+	pf->height = (tempvcs.clipping.bottom - tempvcs.clipping.top) /
+		tempvcs.decimation;
+
+	pf->field = V4L2_FIELD_INTERLACED;
+	pf->bytesperline = tempvcs.line_size;
+	pf->sizeimage = tempvcs.line_size *
+		(tempvcs.clipping.bottom - tempvcs.clipping.top) /
+		tempvcs.decimation;
+	pf->colorspace =
+		vino_data_formats[tempvcs.data_format].colorspace;
+
+	pf->priv = 0;
+	return 0;
+}
+
+static int vino_g_fmt_vid_cap(struct file *file, void *__fh,
+			   struct v4l2_format *f)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	unsigned long flags;
+	struct v4l2_pix_format *pf = &f->fmt.pix;
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+	pf->width = (vcs->clipping.right - vcs->clipping.left) /
+		vcs->decimation;
+	pf->height = (vcs->clipping.bottom - vcs->clipping.top) /
+		vcs->decimation;
+	pf->pixelformat =
+		vino_data_formats[vcs->data_format].pixelformat;
+
+	pf->field = V4L2_FIELD_INTERLACED;
+	pf->bytesperline = vcs->line_size;
+	pf->sizeimage = vcs->line_size *
+		(vcs->clipping.bottom - vcs->clipping.top) /
+		vcs->decimation;
+	pf->colorspace =
+		vino_data_formats[vcs->data_format].colorspace;
+
+	pf->priv = 0;
+
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+	return 0;
+}
+
+static int vino_s_fmt_vid_cap(struct file *file, void *__fh,
+			   struct v4l2_format *f)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	int data_format;
+	unsigned long flags;
+	struct v4l2_pix_format *pf = &f->fmt.pix;
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+	data_format = vino_find_data_format(pf->pixelformat);
+
+	if (data_format == VINO_DATA_FMT_NONE) {
+		vcs->data_format = VINO_DATA_FMT_GREY;
+		pf->pixelformat =
+			vino_data_formats[vcs->data_format].
+			pixelformat;
+	} else {
+		vcs->data_format = data_format;
+	}
+
+	/* data format must be set before clipping/scaling */
+	vino_set_scaling(vcs, pf->width, pf->height);
+
+	dprintk("data format = %s\n",
+	       vino_data_formats[vcs->data_format].description);
+
+	pf->width = vcs->clipping.right - vcs->clipping.left;
+	pf->height = vcs->clipping.bottom - vcs->clipping.top;
+
+	pf->field = V4L2_FIELD_INTERLACED;
+	pf->bytesperline = vcs->line_size;
+	pf->sizeimage = vcs->line_size *
+		(vcs->clipping.bottom - vcs->clipping.top) /
+		vcs->decimation;
+	pf->colorspace =
+		vino_data_formats[vcs->data_format].colorspace;
+
+	pf->priv = 0;
+
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+	return 0;
+}
+
+static int vino_cropcap(struct file *file, void *__fh,
+			     struct v4l2_cropcap *ccap)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	const struct vino_data_norm *norm;
+	unsigned long flags;
+
+	switch (ccap->type) {
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+		spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+		norm = &vino_data_norms[vcs->data_norm];
+
+		spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+		ccap->bounds.left = 0;
+		ccap->bounds.top = 0;
+		ccap->bounds.width = norm->width;
+		ccap->bounds.height = norm->height;
+		memcpy(&ccap->defrect, &ccap->bounds,
+		       sizeof(struct v4l2_rect));
+
+		ccap->pixelaspect.numerator = 1;
+		ccap->pixelaspect.denominator = 1;
+		break;
+	case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vino_g_crop(struct file *file, void *__fh,
+			    struct v4l2_crop *c)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	unsigned long flags;
+
+	switch (c->type) {
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+		spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+		c->c.left = vcs->clipping.left;
+		c->c.top = vcs->clipping.top;
+		c->c.width = vcs->clipping.right - vcs->clipping.left;
+		c->c.height = vcs->clipping.bottom - vcs->clipping.top;
+
+		spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+		break;
+	case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vino_s_crop(struct file *file, void *__fh,
+			    struct v4l2_crop *c)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	unsigned long flags;
+
+	switch (c->type) {
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+		spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+		vino_set_clipping(vcs, c->c.left, c->c.top,
+				  c->c.width, c->c.height);
+
+		spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+		break;
+	case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vino_g_parm(struct file *file, void *__fh,
+			    struct v4l2_streamparm *sp)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	unsigned long flags;
+	struct v4l2_captureparm *cp = &sp->parm.capture;
+
+	cp->capability = V4L2_CAP_TIMEPERFRAME;
+	cp->timeperframe.numerator = 1;
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+	cp->timeperframe.denominator = vcs->fps;
+
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+	/* TODO: cp->readbuffers = xxx; */
+
+	return 0;
+}
+
+static int vino_s_parm(struct file *file, void *__fh,
+			    struct v4l2_streamparm *sp)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	unsigned long flags;
+	struct v4l2_captureparm *cp = &sp->parm.capture;
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+	if ((cp->timeperframe.numerator == 0) ||
+	    (cp->timeperframe.denominator == 0)) {
+		/* reset framerate */
+		vino_set_default_framerate(vcs);
+	} else {
+		vino_set_framerate(vcs, cp->timeperframe.denominator /
+				   cp->timeperframe.numerator);
+	}
+
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+	return 0;
+}
+
+static int vino_reqbufs(struct file *file, void *__fh,
+			     struct v4l2_requestbuffers *rb)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+
+	if (vcs->reading)
+		return -EBUSY;
+
+	/* TODO: check queue type */
+	if (rb->memory != V4L2_MEMORY_MMAP) {
+		dprintk("type not mmap\n");
+		return -EINVAL;
+	}
+
+	dprintk("count = %d\n", rb->count);
+	if (rb->count > 0) {
+		if (vino_is_capturing(vcs)) {
+			dprintk("busy, capturing\n");
+			return -EBUSY;
+		}
+
+		if (vino_queue_has_mapped_buffers(&vcs->fb_queue)) {
+			dprintk("busy, buffers still mapped\n");
+			return -EBUSY;
+		} else {
+			vcs->streaming = 0;
+			vino_queue_free(&vcs->fb_queue);
+			vino_queue_init(&vcs->fb_queue, &rb->count);
+		}
+	} else {
+		vcs->streaming = 0;
+		vino_capture_stop(vcs);
+		vino_queue_free(&vcs->fb_queue);
+	}
+
+	return 0;
+}
+
+static void vino_v4l2_get_buffer_status(struct vino_channel_settings *vcs,
+					struct vino_framebuffer *fb,
+					struct v4l2_buffer *b)
+{
+	if (vino_queue_outgoing_contains(&vcs->fb_queue,
+					 fb->id)) {
+		b->flags &= ~V4L2_BUF_FLAG_QUEUED;
+		b->flags |= V4L2_BUF_FLAG_DONE;
+	} else if (vino_queue_incoming_contains(&vcs->fb_queue,
+				       fb->id)) {
+		b->flags &= ~V4L2_BUF_FLAG_DONE;
+		b->flags |= V4L2_BUF_FLAG_QUEUED;
+	} else {
+		b->flags &= ~(V4L2_BUF_FLAG_DONE |
+			      V4L2_BUF_FLAG_QUEUED);
+	}
+
+	b->flags &= ~(V4L2_BUF_FLAG_TIMECODE);
+
+	if (fb->map_count > 0)
+		b->flags |= V4L2_BUF_FLAG_MAPPED;
+
+	b->index = fb->id;
+	b->memory = (vcs->fb_queue.type == VINO_MEMORY_MMAP) ?
+		V4L2_MEMORY_MMAP : V4L2_MEMORY_USERPTR;
+	b->m.offset = fb->offset;
+	b->bytesused = fb->data_size;
+	b->length = fb->size;
+	b->field = V4L2_FIELD_INTERLACED;
+	b->sequence = fb->frame_counter;
+	memcpy(&b->timestamp, &fb->timestamp,
+	       sizeof(struct timeval));
+	// b->input ?
+
+	dprintk("buffer %d: length = %d, bytesused = %d, offset = %d\n",
+		fb->id, fb->size, fb->data_size, fb->offset);
+}
+
+static int vino_querybuf(struct file *file, void *__fh,
+			      struct v4l2_buffer *b)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	struct vino_framebuffer *fb;
+
+	if (vcs->reading)
+		return -EBUSY;
+
+	/* TODO: check queue type */
+	if (b->index >= vino_queue_get_length(&vcs->fb_queue)) {
+		dprintk("invalid index = %d\n",
+		       b->index);
+		return -EINVAL;
+	}
+
+	fb = vino_queue_get_buffer(&vcs->fb_queue,
+				   b->index);
+	if (fb == NULL) {
+		dprintk("vino_queue_get_buffer() failed");
+		return -EINVAL;
+	}
+
+	vino_v4l2_get_buffer_status(vcs, fb, b);
+
+	return 0;
+}
+
+static int vino_qbuf(struct file *file, void *__fh,
+			  struct v4l2_buffer *b)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	struct vino_framebuffer *fb;
+	int ret;
+
+	if (vcs->reading)
+		return -EBUSY;
+
+	/* TODO: check queue type */
+	if (b->memory != V4L2_MEMORY_MMAP) {
+		dprintk("type not mmap\n");
+		return -EINVAL;
+	}
+
+	fb = vino_capture_enqueue(vcs, b->index);
+	if (fb == NULL)
+		return -EINVAL;
+
+	vino_v4l2_get_buffer_status(vcs, fb, b);
+
+	if (vcs->streaming) {
+		ret = vino_capture_next(vcs, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int vino_dqbuf(struct file *file, void *__fh,
+			   struct v4l2_buffer *b)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	unsigned int nonblocking = file->f_flags & O_NONBLOCK;
+	struct vino_framebuffer *fb;
+	unsigned int incoming, outgoing;
+	int err;
+
+	if (vcs->reading)
+		return -EBUSY;
+
+	/* TODO: check queue type */
+
+	err = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
+	if (err) {
+		dprintk("vino_queue_get_incoming() failed\n");
+		return -EINVAL;
+	}
+	err = vino_queue_get_outgoing(&vcs->fb_queue, &outgoing);
+	if (err) {
+		dprintk("vino_queue_get_outgoing() failed\n");
+		return -EINVAL;
+	}
+
+	dprintk("incoming = %d, outgoing = %d\n", incoming, outgoing);
+
+	if (outgoing == 0) {
+		if (incoming == 0) {
+			dprintk("no incoming or outgoing buffers\n");
+			return -EINVAL;
+		}
+		if (nonblocking) {
+			dprintk("non-blocking I/O was selected and "
+				"there are no buffers to dequeue\n");
+			return -EAGAIN;
+		}
+
+		err = vino_wait_for_frame(vcs);
+		if (err) {
+			err = vino_wait_for_frame(vcs);
+			if (err) {
+				/* interrupted or no frames captured because of
+				 * frame skipping */
+				/* vino_capture_failed(vcs); */
+				return -EIO;
+			}
+		}
+	}
+
+	fb = vino_queue_remove(&vcs->fb_queue, &b->index);
+	if (fb == NULL) {
+		dprintk("vino_queue_remove() failed\n");
+		return -EINVAL;
+	}
+
+	err = vino_check_buffer(vcs, fb);
+
+	vino_v4l2_get_buffer_status(vcs, fb, b);
+
+	if (err)
+		return -EIO;
+
+	return 0;
+}
+
+static int vino_streamon(struct file *file, void *__fh,
+		enum v4l2_buf_type i)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	unsigned int incoming;
+	int ret;
+	if (vcs->reading)
+		return -EBUSY;
+
+	if (vcs->streaming)
+		return 0;
+
+	// TODO: check queue type
+
+	if (vino_queue_get_length(&vcs->fb_queue) < 1) {
+		dprintk("no buffers allocated\n");
+		return -EINVAL;
+	}
+
+	ret = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
+	if (ret) {
+		dprintk("vino_queue_get_incoming() failed\n");
+		return -EINVAL;
+	}
+
+	vcs->streaming = 1;
+
+	if (incoming > 0) {
+		ret = vino_capture_next(vcs, 1);
+		if (ret) {
+			vcs->streaming = 0;
+
+			dprintk("couldn't start capture\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int vino_streamoff(struct file *file, void *__fh,
+		enum v4l2_buf_type i)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	if (vcs->reading)
+		return -EBUSY;
+
+	if (!vcs->streaming)
+		return 0;
+
+	vcs->streaming = 0;
+	vino_capture_stop(vcs);
+
+	return 0;
+}
+
+static int vino_queryctrl(struct file *file, void *__fh,
+			       struct v4l2_queryctrl *queryctrl)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	unsigned long flags;
+	int i;
+	int err = 0;
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+	switch (vcs->input) {
+	case VINO_INPUT_D1:
+		for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) {
+			if (vino_indycam_v4l2_controls[i].id ==
+			    queryctrl->id) {
+				memcpy(queryctrl,
+				       &vino_indycam_v4l2_controls[i],
+				       sizeof(struct v4l2_queryctrl));
+				queryctrl->reserved[0] = 0;
+				goto found;
+			}
+		}
+
+		err =  -EINVAL;
+		break;
+	case VINO_INPUT_COMPOSITE:
+	case VINO_INPUT_SVIDEO:
+		for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) {
+			if (vino_saa7191_v4l2_controls[i].id ==
+			    queryctrl->id) {
+				memcpy(queryctrl,
+				       &vino_saa7191_v4l2_controls[i],
+				       sizeof(struct v4l2_queryctrl));
+				queryctrl->reserved[0] = 0;
+				goto found;
+			}
+		}
+
+		err =  -EINVAL;
+		break;
+	default:
+		err =  -EINVAL;
+	}
+
+ found:
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+	return err;
+}
+
+static int vino_g_ctrl(struct file *file, void *__fh,
+			    struct v4l2_control *control)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	unsigned long flags;
+	int i;
+	int err = 0;
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+	switch (vcs->input) {
+	case VINO_INPUT_D1: {
+		err = -EINVAL;
+		for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) {
+			if (vino_indycam_v4l2_controls[i].id == control->id) {
+				err = 0;
+				break;
+			}
+		}
+
+		if (err)
+			goto out;
+
+		err = camera_call(core, g_ctrl, control);
+		if (err)
+			err = -EINVAL;
+		break;
+	}
+	case VINO_INPUT_COMPOSITE:
+	case VINO_INPUT_SVIDEO: {
+		err = -EINVAL;
+		for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) {
+			if (vino_saa7191_v4l2_controls[i].id == control->id) {
+				err = 0;
+				break;
+			}
+		}
+
+		if (err)
+			goto out;
+
+		err = decoder_call(core, g_ctrl, control);
+		if (err)
+			err = -EINVAL;
+		break;
+	}
+	default:
+		err =  -EINVAL;
+	}
+
+out:
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+	return err;
+}
+
+static int vino_s_ctrl(struct file *file, void *__fh,
+			    struct v4l2_control *control)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	unsigned long flags;
+	int i;
+	int err = 0;
+
+	spin_lock_irqsave(&vino_drvdata->input_lock, flags);
+
+	if (!vino_is_input_owner(vcs)) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	switch (vcs->input) {
+	case VINO_INPUT_D1: {
+		err = -EINVAL;
+		for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) {
+			if (vino_indycam_v4l2_controls[i].id == control->id) {
+				err = 0;
+				break;
+			}
+		}
+		if (err)
+			goto out;
+		if (control->value < vino_indycam_v4l2_controls[i].minimum ||
+		    control->value > vino_indycam_v4l2_controls[i].maximum) {
+			err = -ERANGE;
+			goto out;
+		}
+		err = camera_call(core, s_ctrl, control);
+		if (err)
+			err = -EINVAL;
+		break;
+	}
+	case VINO_INPUT_COMPOSITE:
+	case VINO_INPUT_SVIDEO: {
+		err = -EINVAL;
+		for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) {
+			if (vino_saa7191_v4l2_controls[i].id == control->id) {
+				err = 0;
+				break;
+			}
+		}
+		if (err)
+			goto out;
+		if (control->value < vino_saa7191_v4l2_controls[i].minimum ||
+		    control->value > vino_saa7191_v4l2_controls[i].maximum) {
+			err = -ERANGE;
+			goto out;
+		}
+
+		err = decoder_call(core, s_ctrl, control);
+		if (err)
+			err = -EINVAL;
+		break;
+	}
+	default:
+		err =  -EINVAL;
+	}
+
+out:
+	spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
+
+	return err;
+}
+
+/* File operations */
+
+static int vino_open(struct file *file)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	int ret = 0;
+	dprintk("open(): channel = %c\n",
+	       (vcs->channel == VINO_CHANNEL_A) ? 'A' : 'B');
+
+	mutex_lock(&vcs->mutex);
+
+	if (vcs->users) {
+		dprintk("open(): driver busy\n");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	ret = vino_acquire_input(vcs);
+	if (ret) {
+		dprintk("open(): vino_acquire_input() failed\n");
+		goto out;
+	}
+
+	vcs->users++;
+
+ out:
+	mutex_unlock(&vcs->mutex);
+
+	dprintk("open(): %s!\n", ret ? "failed" : "complete");
+
+	return ret;
+}
+
+static int vino_close(struct file *file)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	dprintk("close():\n");
+
+	mutex_lock(&vcs->mutex);
+
+	vcs->users--;
+
+	if (!vcs->users) {
+		vino_release_input(vcs);
+
+		/* stop DMA and free buffers */
+		vino_capture_stop(vcs);
+		vino_queue_free(&vcs->fb_queue);
+	}
+
+	mutex_unlock(&vcs->mutex);
+
+	return 0;
+}
+
+static void vino_vm_open(struct vm_area_struct *vma)
+{
+	struct vino_framebuffer *fb = vma->vm_private_data;
+
+	fb->map_count++;
+	dprintk("vino_vm_open(): count = %d\n", fb->map_count);
+}
+
+static void vino_vm_close(struct vm_area_struct *vma)
+{
+	struct vino_framebuffer *fb = vma->vm_private_data;
+
+	fb->map_count--;
+	dprintk("vino_vm_close(): count = %d\n", fb->map_count);
+}
+
+static const struct vm_operations_struct vino_vm_ops = {
+	.open	= vino_vm_open,
+	.close	= vino_vm_close,
+};
+
+static int vino_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+
+	unsigned long start = vma->vm_start;
+	unsigned long size = vma->vm_end - vma->vm_start;
+	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+
+	struct vino_framebuffer *fb = NULL;
+	unsigned int i, length;
+	int ret = 0;
+
+	dprintk("mmap():\n");
+
+	// TODO: reject mmap if already mapped
+
+	if (mutex_lock_interruptible(&vcs->mutex))
+		return -EINTR;
+
+	if (vcs->reading) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	// TODO: check queue type
+
+	if (!(vma->vm_flags & VM_WRITE)) {
+		dprintk("mmap(): app bug: PROT_WRITE please\n");
+		ret = -EINVAL;
+		goto out;
+	}
+	if (!(vma->vm_flags & VM_SHARED)) {
+		dprintk("mmap(): app bug: MAP_SHARED please\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* find the correct buffer using offset */
+	length = vino_queue_get_length(&vcs->fb_queue);
+	if (length == 0) {
+		dprintk("mmap(): queue not initialized\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	for (i = 0; i < length; i++) {
+		fb = vino_queue_get_buffer(&vcs->fb_queue, i);
+		if (fb == NULL) {
+			dprintk("mmap(): vino_queue_get_buffer() failed\n");
+			ret = -EINVAL;
+			goto out;
+		}
+
+		if (fb->offset == offset)
+			goto found;
+	}
+
+	dprintk("mmap(): invalid offset = %lu\n", offset);
+	ret = -EINVAL;
+	goto out;
+
+found:
+	dprintk("mmap(): buffer = %d\n", i);
+
+	if (size > (fb->desc_table.page_count * PAGE_SIZE)) {
+		dprintk("mmap(): failed: size = %lu > %lu\n",
+			size, fb->desc_table.page_count * PAGE_SIZE);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	for (i = 0; i < fb->desc_table.page_count; i++) {
+		unsigned long pfn =
+			virt_to_phys((void *)fb->desc_table.virtual[i]) >>
+			PAGE_SHIFT;
+
+		if (size < PAGE_SIZE)
+			break;
+
+		// protection was: PAGE_READONLY
+		if (remap_pfn_range(vma, start, pfn, PAGE_SIZE,
+				    vma->vm_page_prot)) {
+			dprintk("mmap(): remap_pfn_range() failed\n");
+			ret = -EAGAIN;
+			goto out;
+		}
+
+		start += PAGE_SIZE;
+		size -= PAGE_SIZE;
+	}
+
+	fb->map_count = 1;
+
+	vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
+	vma->vm_flags &= ~VM_IO;
+	vma->vm_private_data = fb;
+	vma->vm_file = file;
+	vma->vm_ops = &vino_vm_ops;
+
+out:
+	mutex_unlock(&vcs->mutex);
+
+	return ret;
+}
+
+static unsigned int vino_poll(struct file *file, poll_table *pt)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	unsigned int outgoing;
+	unsigned int ret = 0;
+
+	// lock mutex (?)
+	// TODO: this has to be corrected for different read modes
+
+	dprintk("poll():\n");
+
+	if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
+		dprintk("poll(): vino_queue_get_outgoing() failed\n");
+		ret = POLLERR;
+		goto error;
+	}
+	if (outgoing > 0)
+		goto over;
+
+	poll_wait(file, &vcs->fb_queue.frame_wait_queue, pt);
+
+	if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
+		dprintk("poll(): vino_queue_get_outgoing() failed\n");
+		ret = POLLERR;
+		goto error;
+	}
+
+over:
+	dprintk("poll(): data %savailable\n",
+		(outgoing > 0) ? "" : "not ");
+
+	if (outgoing > 0)
+		ret = POLLIN | POLLRDNORM;
+
+error:
+	return ret;
+}
+
+static long vino_ioctl(struct file *file,
+		      unsigned int cmd, unsigned long arg)
+{
+	struct vino_channel_settings *vcs = video_drvdata(file);
+	long ret;
+
+	if (mutex_lock_interruptible(&vcs->mutex))
+		return -EINTR;
+
+	ret = video_ioctl2(file, cmd, arg);
+
+	mutex_unlock(&vcs->mutex);
+
+	return ret;
+}
+
+/* Initialization and cleanup */
+
+/* __initdata */
+static int vino_init_stage;
+
+const struct v4l2_ioctl_ops vino_ioctl_ops = {
+	.vidioc_enum_fmt_vid_cap     = vino_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap 	     = vino_g_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap  	     = vino_s_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap	     = vino_try_fmt_vid_cap,
+	.vidioc_querycap    	     = vino_querycap,
+	.vidioc_enum_input   	     = vino_enum_input,
+	.vidioc_g_input      	     = vino_g_input,
+	.vidioc_s_input      	     = vino_s_input,
+	.vidioc_g_std 		     = vino_g_std,
+	.vidioc_s_std 		     = vino_s_std,
+	.vidioc_querystd             = vino_querystd,
+	.vidioc_cropcap      	     = vino_cropcap,
+	.vidioc_s_crop       	     = vino_s_crop,
+	.vidioc_g_crop       	     = vino_g_crop,
+	.vidioc_s_parm 		     = vino_s_parm,
+	.vidioc_g_parm 		     = vino_g_parm,
+	.vidioc_reqbufs              = vino_reqbufs,
+	.vidioc_querybuf             = vino_querybuf,
+	.vidioc_qbuf                 = vino_qbuf,
+	.vidioc_dqbuf                = vino_dqbuf,
+	.vidioc_streamon             = vino_streamon,
+	.vidioc_streamoff            = vino_streamoff,
+	.vidioc_queryctrl            = vino_queryctrl,
+	.vidioc_g_ctrl               = vino_g_ctrl,
+	.vidioc_s_ctrl               = vino_s_ctrl,
+};
+
+static const struct v4l2_file_operations vino_fops = {
+	.owner		= THIS_MODULE,
+	.open		= vino_open,
+	.release	= vino_close,
+	.unlocked_ioctl	= vino_ioctl,
+	.mmap		= vino_mmap,
+	.poll		= vino_poll,
+};
+
+static struct video_device vdev_template = {
+	.name		= "NOT SET",
+	.fops		= &vino_fops,
+	.ioctl_ops 	= &vino_ioctl_ops,
+	.tvnorms 	= V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
+};
+
+static void vino_module_cleanup(int stage)
+{
+	switch(stage) {
+	case 11:
+		video_unregister_device(vino_drvdata->b.vdev);
+		vino_drvdata->b.vdev = NULL;
+	case 10:
+		video_unregister_device(vino_drvdata->a.vdev);
+		vino_drvdata->a.vdev = NULL;
+	case 9:
+		i2c_del_adapter(&vino_i2c_adapter);
+	case 8:
+		free_irq(SGI_VINO_IRQ, NULL);
+	case 7:
+		if (vino_drvdata->b.vdev) {
+			video_device_release(vino_drvdata->b.vdev);
+			vino_drvdata->b.vdev = NULL;
+		}
+	case 6:
+		if (vino_drvdata->a.vdev) {
+			video_device_release(vino_drvdata->a.vdev);
+			vino_drvdata->a.vdev = NULL;
+		}
+	case 5:
+		/* all entries in dma_cpu dummy table have the same address */
+		dma_unmap_single(NULL,
+				 vino_drvdata->dummy_desc_table.dma_cpu[0],
+				 PAGE_SIZE, DMA_FROM_DEVICE);
+		dma_free_coherent(NULL, VINO_DUMMY_DESC_COUNT
+				  * sizeof(dma_addr_t),
+				  (void *)vino_drvdata->
+				  dummy_desc_table.dma_cpu,
+				  vino_drvdata->dummy_desc_table.dma);
+	case 4:
+		free_page(vino_drvdata->dummy_page);
+	case 3:
+		v4l2_device_unregister(&vino_drvdata->v4l2_dev);
+	case 2:
+		kfree(vino_drvdata);
+	case 1:
+		iounmap(vino);
+	case 0:
+		break;
+	default:
+		dprintk("vino_module_cleanup(): invalid cleanup stage = %d\n",
+			stage);
+	}
+}
+
+static int vino_probe(void)
+{
+	unsigned long rev_id;
+
+	if (ip22_is_fullhouse()) {
+		printk(KERN_ERR "VINO doesn't exist in IP22 Fullhouse\n");
+		return -ENODEV;
+	}
+
+	if (!(sgimc->systemid & SGIMC_SYSID_EPRESENT)) {
+		printk(KERN_ERR "VINO is not found (EISA BUS not present)\n");
+		return -ENODEV;
+	}
+
+	vino = (struct sgi_vino *)ioremap(VINO_BASE, sizeof(struct sgi_vino));
+	if (!vino) {
+		printk(KERN_ERR "VINO: ioremap() failed\n");
+		return -EIO;
+	}
+	vino_init_stage++;
+
+	if (get_dbe(rev_id, &(vino->rev_id))) {
+		printk(KERN_ERR "Failed to read VINO revision register\n");
+		vino_module_cleanup(vino_init_stage);
+		return -ENODEV;
+	}
+
+	if (VINO_ID_VALUE(rev_id) != VINO_CHIP_ID) {
+		printk(KERN_ERR "Unknown VINO chip ID (Rev/ID: 0x%02lx)\n",
+		       rev_id);
+		vino_module_cleanup(vino_init_stage);
+		return -ENODEV;
+	}
+
+	printk(KERN_INFO "VINO revision %ld found\n", VINO_REV_NUM(rev_id));
+
+	return 0;
+}
+
+static int vino_init(void)
+{
+	dma_addr_t dma_dummy_address;
+	int err;
+	int i;
+
+	vino_drvdata = kzalloc(sizeof(struct vino_settings), GFP_KERNEL);
+	if (!vino_drvdata) {
+		vino_module_cleanup(vino_init_stage);
+		return -ENOMEM;
+	}
+	vino_init_stage++;
+	strlcpy(vino_drvdata->v4l2_dev.name, "vino",
+			sizeof(vino_drvdata->v4l2_dev.name));
+	err = v4l2_device_register(NULL, &vino_drvdata->v4l2_dev);
+	if (err)
+		return err;
+	vino_init_stage++;
+
+	/* create a dummy dma descriptor */
+	vino_drvdata->dummy_page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!vino_drvdata->dummy_page) {
+		vino_module_cleanup(vino_init_stage);
+		return -ENOMEM;
+	}
+	vino_init_stage++;
+
+	// TODO: use page_count in dummy_desc_table
+
+	vino_drvdata->dummy_desc_table.dma_cpu =
+		dma_alloc_coherent(NULL,
+		VINO_DUMMY_DESC_COUNT * sizeof(dma_addr_t),
+		&vino_drvdata->dummy_desc_table.dma,
+		GFP_KERNEL | GFP_DMA);
+	if (!vino_drvdata->dummy_desc_table.dma_cpu) {
+		vino_module_cleanup(vino_init_stage);
+		return -ENOMEM;
+	}
+	vino_init_stage++;
+
+	dma_dummy_address = dma_map_single(NULL,
+					   (void *)vino_drvdata->dummy_page,
+					PAGE_SIZE, DMA_FROM_DEVICE);
+	for (i = 0; i < VINO_DUMMY_DESC_COUNT; i++) {
+		vino_drvdata->dummy_desc_table.dma_cpu[i] = dma_dummy_address;
+	}
+
+	/* initialize VINO */
+
+	vino->control = 0;
+	vino->a.next_4_desc = vino_drvdata->dummy_desc_table.dma;
+	vino->b.next_4_desc = vino_drvdata->dummy_desc_table.dma;
+	udelay(VINO_DESC_FETCH_DELAY);
+
+	vino->intr_status = 0;
+
+	vino->a.fifo_thres = VINO_FIFO_THRESHOLD_DEFAULT;
+	vino->b.fifo_thres = VINO_FIFO_THRESHOLD_DEFAULT;
+
+	return 0;
+}
+
+static int vino_init_channel_settings(struct vino_channel_settings *vcs,
+				 unsigned int channel, const char *name)
+{
+	vcs->channel = channel;
+	vcs->input = VINO_INPUT_NONE;
+	vcs->alpha = 0;
+	vcs->users = 0;
+	vcs->data_format = VINO_DATA_FMT_GREY;
+	vcs->data_norm = VINO_DATA_NORM_NTSC;
+	vcs->decimation = 1;
+	vino_set_default_clipping(vcs);
+	vino_set_default_framerate(vcs);
+
+	vcs->capturing = 0;
+
+	mutex_init(&vcs->mutex);
+	spin_lock_init(&vcs->capture_lock);
+
+	mutex_init(&vcs->fb_queue.queue_mutex);
+	spin_lock_init(&vcs->fb_queue.queue_lock);
+	init_waitqueue_head(&vcs->fb_queue.frame_wait_queue);
+
+	vcs->vdev = video_device_alloc();
+	if (!vcs->vdev) {
+		vino_module_cleanup(vino_init_stage);
+		return -ENOMEM;
+	}
+	vino_init_stage++;
+
+	memcpy(vcs->vdev, &vdev_template,
+	       sizeof(struct video_device));
+	strcpy(vcs->vdev->name, name);
+	vcs->vdev->release = video_device_release;
+	vcs->vdev->v4l2_dev = &vino_drvdata->v4l2_dev;
+
+	video_set_drvdata(vcs->vdev, vcs);
+
+	return 0;
+}
+
+static int __init vino_module_init(void)
+{
+	int ret;
+
+	printk(KERN_INFO "SGI VINO driver version %s\n",
+	       VINO_MODULE_VERSION);
+
+	ret = vino_probe();
+	if (ret)
+		return ret;
+
+	ret = vino_init();
+	if (ret)
+		return ret;
+
+	/* initialize data structures */
+
+	spin_lock_init(&vino_drvdata->vino_lock);
+	spin_lock_init(&vino_drvdata->input_lock);
+
+	ret = vino_init_channel_settings(&vino_drvdata->a, VINO_CHANNEL_A,
+				    vino_vdev_name_a);
+	if (ret)
+		return ret;
+
+	ret = vino_init_channel_settings(&vino_drvdata->b, VINO_CHANNEL_B,
+				    vino_vdev_name_b);
+	if (ret)
+		return ret;
+
+	/* initialize hardware and register V4L devices */
+
+	ret = request_irq(SGI_VINO_IRQ, vino_interrupt, 0,
+		vino_driver_description, NULL);
+	if (ret) {
+		printk(KERN_ERR "VINO: requesting IRQ %02d failed\n",
+		       SGI_VINO_IRQ);
+		vino_module_cleanup(vino_init_stage);
+		return -EAGAIN;
+	}
+	vino_init_stage++;
+
+	ret = i2c_add_adapter(&vino_i2c_adapter);
+	if (ret) {
+		printk(KERN_ERR "VINO I2C bus registration failed\n");
+		vino_module_cleanup(vino_init_stage);
+		return ret;
+	}
+	i2c_set_adapdata(&vino_i2c_adapter, &vino_drvdata->v4l2_dev);
+	vino_init_stage++;
+
+	ret = video_register_device(vino_drvdata->a.vdev,
+				    VFL_TYPE_GRABBER, -1);
+	if (ret < 0) {
+		printk(KERN_ERR "VINO channel A Video4Linux-device "
+		       "registration failed\n");
+		vino_module_cleanup(vino_init_stage);
+		return -EINVAL;
+	}
+	vino_init_stage++;
+
+	ret = video_register_device(vino_drvdata->b.vdev,
+				    VFL_TYPE_GRABBER, -1);
+	if (ret < 0) {
+		printk(KERN_ERR "VINO channel B Video4Linux-device "
+		       "registration failed\n");
+		vino_module_cleanup(vino_init_stage);
+		return -EINVAL;
+	}
+	vino_init_stage++;
+
+	vino_drvdata->decoder =
+		v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter,
+			       "saa7191", 0, I2C_ADDRS(0x45));
+	vino_drvdata->camera =
+		v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter,
+			       "indycam", 0, I2C_ADDRS(0x2b));
+
+	dprintk("init complete!\n");
+
+	return 0;
+}
+
+static void __exit vino_module_exit(void)
+{
+	dprintk("exiting, stage = %d ...\n", vino_init_stage);
+	vino_module_cleanup(vino_init_stage);
+	dprintk("cleanup complete, exit!\n");
+}
+
+module_init(vino_module_init);
+module_exit(vino_module_exit);
diff --git a/drivers/media/platform/vino.h b/drivers/media/platform/vino.h
new file mode 100644
index 0000000..de2d615
--- /dev/null
+++ b/drivers/media/platform/vino.h
@@ -0,0 +1,138 @@
+/*
+ * Driver for the VINO (Video In No Out) system found in SGI Indys.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License version 2 as published by the Free Software Foundation.
+ *
+ * Copyright (C) 1999 Ulf Karlsson <ulfc@bun.falkenberg.se>
+ * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
+ */
+
+#ifndef _VINO_H_
+#define _VINO_H_
+
+#define VINO_BASE	0x00080000	/* Vino is in the EISA address space,
+					 * but it is not an EISA bus card */
+#define VINO_PAGE_SIZE	4096
+
+struct sgi_vino_channel {
+	u32 _pad_alpha;
+	volatile u32 alpha;
+
+#define VINO_CLIP_X(x)		((x) & 0x3ff)		/* bits 0:9 */
+#define VINO_CLIP_ODD(x)	(((x) & 0x1ff) << 10)	/* bits 10:18 */
+#define VINO_CLIP_EVEN(x)	(((x) & 0x1ff) << 19)	/* bits 19:27 */
+	u32 _pad_clip_start;
+	volatile u32 clip_start;
+	u32 _pad_clip_end;
+	volatile u32 clip_end;
+
+#define VINO_FRAMERT_FULL	0xfff
+#define VINO_FRAMERT_PAL	(1<<0)			/* 0=NTSC 1=PAL */
+#define VINO_FRAMERT_RT(x)	(((x) & 0xfff) << 1)	/* bits 1:12 */
+	u32 _pad_frame_rate;
+	volatile u32 frame_rate;
+
+	u32 _pad_field_counter;
+	volatile u32 field_counter;
+	u32 _pad_line_size;
+	volatile u32 line_size;
+	u32 _pad_line_count;
+	volatile u32 line_count;
+	u32 _pad_page_index;
+	volatile u32 page_index;
+	u32 _pad_next_4_desc;
+	volatile u32 next_4_desc;
+	u32 _pad_start_desc_tbl;
+	volatile u32 start_desc_tbl;
+
+#define VINO_DESC_JUMP		(1<<30)
+#define VINO_DESC_STOP		(1<<31)
+#define VINO_DESC_VALID		(1<<32)
+	u32 _pad_desc_0;
+	volatile u32 desc_0;
+	u32 _pad_desc_1;
+	volatile u32 desc_1;
+	u32 _pad_desc_2;
+	volatile u32 desc_2;
+	u32 _pad_Bdesc_3;
+	volatile u32 desc_3;
+
+	u32 _pad_fifo_thres;
+	volatile u32 fifo_thres;
+	u32 _pad_fifo_read;
+	volatile u32 fifo_read;
+	u32 _pad_fifo_write;
+	volatile u32 fifo_write;
+};
+
+struct sgi_vino {
+#define VINO_CHIP_ID		0xb
+#define VINO_REV_NUM(x)		((x) & 0x0f)
+#define VINO_ID_VALUE(x)	(((x) & 0xf0) >> 4)
+	u32 _pad_rev_id;
+	volatile u32 rev_id;
+
+#define VINO_CTRL_LITTLE_ENDIAN		(1<<0)
+#define VINO_CTRL_A_EOF_INT		(1<<1)	/* Field transferred int */
+#define VINO_CTRL_A_FIFO_INT		(1<<2)	/* FIFO overflow int */
+#define VINO_CTRL_A_EOD_INT		(1<<3)	/* End of desc table int */
+#define VINO_CTRL_A_INT			(VINO_CTRL_A_EOF_INT | \
+					 VINO_CTRL_A_FIFO_INT | \
+					 VINO_CTRL_A_EOD_INT)
+#define VINO_CTRL_B_EOF_INT		(1<<4)	/* Field transferred int */
+#define VINO_CTRL_B_FIFO_INT		(1<<5)	/* FIFO overflow int */
+#define VINO_CTRL_B_EOD_INT		(1<<6)	/* End of desc table int */
+#define VINO_CTRL_B_INT			(VINO_CTRL_B_EOF_INT | \
+					 VINO_CTRL_B_FIFO_INT | \
+					 VINO_CTRL_B_EOD_INT)
+#define VINO_CTRL_A_DMA_ENBL		(1<<7)
+#define VINO_CTRL_A_INTERLEAVE_ENBL	(1<<8)
+#define VINO_CTRL_A_SYNC_ENBL		(1<<9)
+#define VINO_CTRL_A_SELECT		(1<<10)	/* 1=D1 0=Philips */
+#define VINO_CTRL_A_RGB			(1<<11)	/* 1=RGB 0=YUV */
+#define VINO_CTRL_A_LUMA_ONLY		(1<<12)
+#define VINO_CTRL_A_DEC_ENBL		(1<<13)	/* Decimation */
+#define VINO_CTRL_A_DEC_SCALE_MASK	0x1c000	/* bits 14:17 */
+#define VINO_CTRL_A_DEC_SCALE_SHIFT	(14)
+#define VINO_CTRL_A_DEC_HOR_ONLY	(1<<17)	/* Horizontal only */
+#define VINO_CTRL_A_DITHER		(1<<18)	/* 24 -> 8 bit dither */
+#define VINO_CTRL_B_DMA_ENBL		(1<<19)
+#define VINO_CTRL_B_INTERLEAVE_ENBL	(1<<20)
+#define VINO_CTRL_B_SYNC_ENBL		(1<<21)
+#define VINO_CTRL_B_SELECT		(1<<22)	/* 1=D1 0=Philips */
+#define VINO_CTRL_B_RGB			(1<<23)	/* 1=RGB 0=YUV */
+#define VINO_CTRL_B_LUMA_ONLY		(1<<24)
+#define VINO_CTRL_B_DEC_ENBL		(1<<25)	/* Decimation */
+#define VINO_CTRL_B_DEC_SCALE_MASK	0x1c000000	/* bits 26:28 */
+#define VINO_CTRL_B_DEC_SCALE_SHIFT	(26)
+#define VINO_CTRL_B_DEC_HOR_ONLY	(1<<29)	/* Decimation horizontal only */
+#define VINO_CTRL_B_DITHER		(1<<30)	/* ChanB 24 -> 8 bit dither */
+	u32 _pad_control;
+	volatile u32 control;
+
+#define VINO_INTSTAT_A_EOF		(1<<0)	/* Field transferred int */
+#define VINO_INTSTAT_A_FIFO		(1<<1)	/* FIFO overflow int */
+#define VINO_INTSTAT_A_EOD		(1<<2)	/* End of desc table int */
+#define VINO_INTSTAT_A			(VINO_INTSTAT_A_EOF | \
+					 VINO_INTSTAT_A_FIFO | \
+					 VINO_INTSTAT_A_EOD)
+#define VINO_INTSTAT_B_EOF		(1<<3)	/* Field transferred int */
+#define VINO_INTSTAT_B_FIFO		(1<<4)	/* FIFO overflow int */
+#define VINO_INTSTAT_B_EOD		(1<<5)	/* End of desc table int */
+#define VINO_INTSTAT_B			(VINO_INTSTAT_B_EOF | \
+					 VINO_INTSTAT_B_FIFO | \
+					 VINO_INTSTAT_B_EOD)
+	u32 _pad_intr_status;
+	volatile u32 intr_status;
+
+	u32 _pad_i2c_control;
+	volatile u32 i2c_control;
+	u32 _pad_i2c_data;
+	volatile u32 i2c_data;
+
+	struct sgi_vino_channel a;
+	struct sgi_vino_channel b;
+};
+
+#endif
diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
new file mode 100644
index 0000000..a6351c4
--- /dev/null
+++ b/drivers/media/platform/vivi.c
@@ -0,0 +1,1409 @@
+/*
+ * Virtual Video driver - This code emulates a real video device with v4l2 api
+ *
+ * Copyright (c) 2006 by:
+ *      Mauro Carvalho Chehab <mchehab--a.t--infradead.org>
+ *      Ted Walther <ted--a.t--enumera.com>
+ *      John Sokol <sokol--a.t--videotechnology.com>
+ *      http://v4l.videotechnology.com/
+ *
+ *      Conversion to videobuf2 by Pawel Osciak & Marek Szyprowski
+ *      Copyright (c) 2010 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the BSD Licence, GNU General Public License
+ * as published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/font.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
+
+#define VIVI_MODULE_NAME "vivi"
+
+/* Wake up at about 30 fps */
+#define WAKE_NUMERATOR 30
+#define WAKE_DENOMINATOR 1001
+#define BUFFER_TIMEOUT     msecs_to_jiffies(500)  /* 0.5 seconds */
+
+#define MAX_WIDTH 1920
+#define MAX_HEIGHT 1200
+
+#define VIVI_VERSION "0.8.1"
+
+MODULE_DESCRIPTION("Video Technology Magazine Virtual Video Capture Board");
+MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(VIVI_VERSION);
+
+static unsigned video_nr = -1;
+module_param(video_nr, uint, 0644);
+MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
+
+static unsigned n_devs = 1;
+module_param(n_devs, uint, 0644);
+MODULE_PARM_DESC(n_devs, "number of video devices to create");
+
+static unsigned debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, "activates debug info");
+
+static unsigned int vid_limit = 16;
+module_param(vid_limit, uint, 0644);
+MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
+
+/* Global font descriptor */
+static const u8 *font8x16;
+
+#define dprintk(dev, level, fmt, arg...) \
+	v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ## arg)
+
+/* ------------------------------------------------------------------
+	Basic structures
+   ------------------------------------------------------------------*/
+
+struct vivi_fmt {
+	char  *name;
+	u32   fourcc;          /* v4l2 format id */
+	u8    depth;
+	bool  is_yuv;
+};
+
+static struct vivi_fmt formats[] = {
+	{
+		.name     = "4:2:2, packed, YUYV",
+		.fourcc   = V4L2_PIX_FMT_YUYV,
+		.depth    = 16,
+		.is_yuv   = true,
+	},
+	{
+		.name     = "4:2:2, packed, UYVY",
+		.fourcc   = V4L2_PIX_FMT_UYVY,
+		.depth    = 16,
+		.is_yuv   = true,
+	},
+	{
+		.name     = "4:2:2, packed, YVYU",
+		.fourcc   = V4L2_PIX_FMT_YVYU,
+		.depth    = 16,
+		.is_yuv   = true,
+	},
+	{
+		.name     = "4:2:2, packed, VYUY",
+		.fourcc   = V4L2_PIX_FMT_VYUY,
+		.depth    = 16,
+		.is_yuv   = true,
+	},
+	{
+		.name     = "RGB565 (LE)",
+		.fourcc   = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
+		.depth    = 16,
+	},
+	{
+		.name     = "RGB565 (BE)",
+		.fourcc   = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+		.depth    = 16,
+	},
+	{
+		.name     = "RGB555 (LE)",
+		.fourcc   = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
+		.depth    = 16,
+	},
+	{
+		.name     = "RGB555 (BE)",
+		.fourcc   = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
+		.depth    = 16,
+	},
+	{
+		.name     = "RGB24 (LE)",
+		.fourcc   = V4L2_PIX_FMT_RGB24, /* rgb */
+		.depth    = 24,
+	},
+	{
+		.name     = "RGB24 (BE)",
+		.fourcc   = V4L2_PIX_FMT_BGR24, /* bgr */
+		.depth    = 24,
+	},
+	{
+		.name     = "RGB32 (LE)",
+		.fourcc   = V4L2_PIX_FMT_RGB32, /* argb */
+		.depth    = 32,
+	},
+	{
+		.name     = "RGB32 (BE)",
+		.fourcc   = V4L2_PIX_FMT_BGR32, /* bgra */
+		.depth    = 32,
+	},
+};
+
+static struct vivi_fmt *get_format(struct v4l2_format *f)
+{
+	struct vivi_fmt *fmt;
+	unsigned int k;
+
+	for (k = 0; k < ARRAY_SIZE(formats); k++) {
+		fmt = &formats[k];
+		if (fmt->fourcc == f->fmt.pix.pixelformat)
+			break;
+	}
+
+	if (k == ARRAY_SIZE(formats))
+		return NULL;
+
+	return &formats[k];
+}
+
+/* buffer for one video frame */
+struct vivi_buffer {
+	/* common v4l buffer stuff -- must be first */
+	struct vb2_buffer	vb;
+	struct list_head	list;
+	struct vivi_fmt        *fmt;
+};
+
+struct vivi_dmaqueue {
+	struct list_head       active;
+
+	/* thread for generating video stream*/
+	struct task_struct         *kthread;
+	wait_queue_head_t          wq;
+	/* Counters to control fps rate */
+	int                        frame;
+	int                        ini_jiffies;
+};
+
+static LIST_HEAD(vivi_devlist);
+
+struct vivi_dev {
+	struct list_head           vivi_devlist;
+	struct v4l2_device 	   v4l2_dev;
+	struct v4l2_ctrl_handler   ctrl_handler;
+	struct video_device	   vdev;
+
+	/* controls */
+	struct v4l2_ctrl	   *brightness;
+	struct v4l2_ctrl	   *contrast;
+	struct v4l2_ctrl	   *saturation;
+	struct v4l2_ctrl	   *hue;
+	struct {
+		/* autogain/gain cluster */
+		struct v4l2_ctrl	   *autogain;
+		struct v4l2_ctrl	   *gain;
+	};
+	struct v4l2_ctrl	   *volume;
+	struct v4l2_ctrl	   *alpha;
+	struct v4l2_ctrl	   *button;
+	struct v4l2_ctrl	   *boolean;
+	struct v4l2_ctrl	   *int32;
+	struct v4l2_ctrl	   *int64;
+	struct v4l2_ctrl	   *menu;
+	struct v4l2_ctrl	   *string;
+	struct v4l2_ctrl	   *bitmask;
+	struct v4l2_ctrl	   *int_menu;
+
+	spinlock_t                 slock;
+	struct mutex		   mutex;
+
+	struct vivi_dmaqueue       vidq;
+
+	/* Several counters */
+	unsigned 		   ms;
+	unsigned long              jiffies;
+	unsigned		   button_pressed;
+
+	int			   mv_count;	/* Controls bars movement */
+
+	/* Input Number */
+	int			   input;
+
+	/* video capture */
+	struct vivi_fmt            *fmt;
+	unsigned int               width, height;
+	struct vb2_queue	   vb_vidq;
+	unsigned int		   field_count;
+
+	u8			   bars[9][3];
+	u8			   line[MAX_WIDTH * 8];
+	unsigned int		   pixelsize;
+	u8			   alpha_component;
+};
+
+/* ------------------------------------------------------------------
+	DMA and thread functions
+   ------------------------------------------------------------------*/
+
+/* Bars and Colors should match positions */
+
+enum colors {
+	WHITE,
+	AMBER,
+	CYAN,
+	GREEN,
+	MAGENTA,
+	RED,
+	BLUE,
+	BLACK,
+	TEXT_BLACK,
+};
+
+/* R   G   B */
+#define COLOR_WHITE	{204, 204, 204}
+#define COLOR_AMBER	{208, 208,   0}
+#define COLOR_CYAN	{  0, 206, 206}
+#define	COLOR_GREEN	{  0, 239,   0}
+#define COLOR_MAGENTA	{239,   0, 239}
+#define COLOR_RED	{205,   0,   0}
+#define COLOR_BLUE	{  0,   0, 255}
+#define COLOR_BLACK	{  0,   0,   0}
+
+struct bar_std {
+	u8 bar[9][3];
+};
+
+/* Maximum number of bars are 10 - otherwise, the input print code
+   should be modified */
+static struct bar_std bars[] = {
+	{	/* Standard ITU-R color bar sequence */
+		{ COLOR_WHITE, COLOR_AMBER, COLOR_CYAN, COLOR_GREEN,
+		  COLOR_MAGENTA, COLOR_RED, COLOR_BLUE, COLOR_BLACK, COLOR_BLACK }
+	}, {
+		{ COLOR_WHITE, COLOR_AMBER, COLOR_BLACK, COLOR_WHITE,
+		  COLOR_AMBER, COLOR_BLACK, COLOR_WHITE, COLOR_AMBER, COLOR_BLACK }
+	}, {
+		{ COLOR_WHITE, COLOR_CYAN, COLOR_BLACK, COLOR_WHITE,
+		  COLOR_CYAN, COLOR_BLACK, COLOR_WHITE, COLOR_CYAN, COLOR_BLACK }
+	}, {
+		{ COLOR_WHITE, COLOR_GREEN, COLOR_BLACK, COLOR_WHITE,
+		  COLOR_GREEN, COLOR_BLACK, COLOR_WHITE, COLOR_GREEN, COLOR_BLACK }
+	},
+};
+
+#define NUM_INPUTS ARRAY_SIZE(bars)
+
+#define TO_Y(r, g, b) \
+	(((16829 * r + 33039 * g + 6416 * b  + 32768) >> 16) + 16)
+/* RGB to  V(Cr) Color transform */
+#define TO_V(r, g, b) \
+	(((28784 * r - 24103 * g - 4681 * b  + 32768) >> 16) + 128)
+/* RGB to  U(Cb) Color transform */
+#define TO_U(r, g, b) \
+	(((-9714 * r - 19070 * g + 28784 * b + 32768) >> 16) + 128)
+
+/* precalculate color bar values to speed up rendering */
+static void precalculate_bars(struct vivi_dev *dev)
+{
+	u8 r, g, b;
+	int k, is_yuv;
+
+	for (k = 0; k < 9; k++) {
+		r = bars[dev->input].bar[k][0];
+		g = bars[dev->input].bar[k][1];
+		b = bars[dev->input].bar[k][2];
+		is_yuv = dev->fmt->is_yuv;
+
+		switch (dev->fmt->fourcc) {
+		case V4L2_PIX_FMT_RGB565:
+		case V4L2_PIX_FMT_RGB565X:
+			r >>= 3;
+			g >>= 2;
+			b >>= 3;
+			break;
+		case V4L2_PIX_FMT_RGB555:
+		case V4L2_PIX_FMT_RGB555X:
+			r >>= 3;
+			g >>= 3;
+			b >>= 3;
+			break;
+		case V4L2_PIX_FMT_YUYV:
+		case V4L2_PIX_FMT_UYVY:
+		case V4L2_PIX_FMT_YVYU:
+		case V4L2_PIX_FMT_VYUY:
+		case V4L2_PIX_FMT_RGB24:
+		case V4L2_PIX_FMT_BGR24:
+		case V4L2_PIX_FMT_RGB32:
+		case V4L2_PIX_FMT_BGR32:
+			break;
+		}
+
+		if (is_yuv) {
+			dev->bars[k][0] = TO_Y(r, g, b);	/* Luma */
+			dev->bars[k][1] = TO_U(r, g, b);	/* Cb */
+			dev->bars[k][2] = TO_V(r, g, b);	/* Cr */
+		} else {
+			dev->bars[k][0] = r;
+			dev->bars[k][1] = g;
+			dev->bars[k][2] = b;
+		}
+	}
+}
+
+#define TSTAMP_MIN_Y	24
+#define TSTAMP_MAX_Y	(TSTAMP_MIN_Y + 15)
+#define TSTAMP_INPUT_X	10
+#define TSTAMP_MIN_X	(54 + TSTAMP_INPUT_X)
+
+/* 'odd' is true for pixels 1, 3, 5, etc. and false for pixels 0, 2, 4, etc. */
+static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos, bool odd)
+{
+	u8 r_y, g_u, b_v;
+	u8 alpha = dev->alpha_component;
+	int color;
+	u8 *p;
+
+	r_y = dev->bars[colorpos][0]; /* R or precalculated Y */
+	g_u = dev->bars[colorpos][1]; /* G or precalculated U */
+	b_v = dev->bars[colorpos][2]; /* B or precalculated V */
+
+	for (color = 0; color < dev->pixelsize; color++) {
+		p = buf + color;
+
+		switch (dev->fmt->fourcc) {
+		case V4L2_PIX_FMT_YUYV:
+			switch (color) {
+			case 0:
+				*p = r_y;
+				break;
+			case 1:
+				*p = odd ? b_v : g_u;
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_UYVY:
+			switch (color) {
+			case 0:
+				*p = odd ? b_v : g_u;
+				break;
+			case 1:
+				*p = r_y;
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_YVYU:
+			switch (color) {
+			case 0:
+				*p = r_y;
+				break;
+			case 1:
+				*p = odd ? g_u : b_v;
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_VYUY:
+			switch (color) {
+			case 0:
+				*p = odd ? g_u : b_v;
+				break;
+			case 1:
+				*p = r_y;
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_RGB565:
+			switch (color) {
+			case 0:
+				*p = (g_u << 5) | b_v;
+				break;
+			case 1:
+				*p = (r_y << 3) | (g_u >> 3);
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_RGB565X:
+			switch (color) {
+			case 0:
+				*p = (r_y << 3) | (g_u >> 3);
+				break;
+			case 1:
+				*p = (g_u << 5) | b_v;
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_RGB555:
+			switch (color) {
+			case 0:
+				*p = (g_u << 5) | b_v;
+				break;
+			case 1:
+				*p = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_RGB555X:
+			switch (color) {
+			case 0:
+				*p = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
+				break;
+			case 1:
+				*p = (g_u << 5) | b_v;
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_RGB24:
+			switch (color) {
+			case 0:
+				*p = r_y;
+				break;
+			case 1:
+				*p = g_u;
+				break;
+			case 2:
+				*p = b_v;
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_BGR24:
+			switch (color) {
+			case 0:
+				*p = b_v;
+				break;
+			case 1:
+				*p = g_u;
+				break;
+			case 2:
+				*p = r_y;
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_RGB32:
+			switch (color) {
+			case 0:
+				*p = alpha;
+				break;
+			case 1:
+				*p = r_y;
+				break;
+			case 2:
+				*p = g_u;
+				break;
+			case 3:
+				*p = b_v;
+				break;
+			}
+			break;
+		case V4L2_PIX_FMT_BGR32:
+			switch (color) {
+			case 0:
+				*p = b_v;
+				break;
+			case 1:
+				*p = g_u;
+				break;
+			case 2:
+				*p = r_y;
+				break;
+			case 3:
+				*p = alpha;
+				break;
+			}
+			break;
+		}
+	}
+}
+
+static void precalculate_line(struct vivi_dev *dev)
+{
+	int w;
+
+	for (w = 0; w < dev->width * 2; w++) {
+		int colorpos = w / (dev->width / 8) % 8;
+
+		gen_twopix(dev, dev->line + w * dev->pixelsize, colorpos, w & 1);
+	}
+}
+
+static void gen_text(struct vivi_dev *dev, char *basep,
+					int y, int x, char *text)
+{
+	int line;
+
+	/* Checks if it is possible to show string */
+	if (y + 16 >= dev->height || x + strlen(text) * 8 >= dev->width)
+		return;
+
+	/* Print stream time */
+	for (line = y; line < y + 16; line++) {
+		int j = 0;
+		char *pos = basep + line * dev->width * dev->pixelsize + x * dev->pixelsize;
+		char *s;
+
+		for (s = text; *s; s++) {
+			u8 chr = font8x16[*s * 16 + line - y];
+			int i;
+
+			for (i = 0; i < 7; i++, j++) {
+				/* Draw white font on black background */
+				if (chr & (1 << (7 - i)))
+					gen_twopix(dev, pos + j * dev->pixelsize, WHITE, (x+y) & 1);
+				else
+					gen_twopix(dev, pos + j * dev->pixelsize, TEXT_BLACK, (x+y) & 1);
+			}
+		}
+	}
+}
+
+static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
+{
+	int wmax = dev->width;
+	int hmax = dev->height;
+	struct timeval ts;
+	void *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+	unsigned ms;
+	char str[100];
+	int h, line = 1;
+	s32 gain;
+
+	if (!vbuf)
+		return;
+
+	for (h = 0; h < hmax; h++)
+		memcpy(vbuf + h * wmax * dev->pixelsize,
+		       dev->line + (dev->mv_count % wmax) * dev->pixelsize,
+		       wmax * dev->pixelsize);
+
+	/* Updates stream time */
+
+	dev->ms += jiffies_to_msecs(jiffies - dev->jiffies);
+	dev->jiffies = jiffies;
+	ms = dev->ms;
+	snprintf(str, sizeof(str), " %02d:%02d:%02d:%03d ",
+			(ms / (60 * 60 * 1000)) % 24,
+			(ms / (60 * 1000)) % 60,
+			(ms / 1000) % 60,
+			ms % 1000);
+	gen_text(dev, vbuf, line++ * 16, 16, str);
+	snprintf(str, sizeof(str), " %dx%d, input %d ",
+			dev->width, dev->height, dev->input);
+	gen_text(dev, vbuf, line++ * 16, 16, str);
+
+	gain = v4l2_ctrl_g_ctrl(dev->gain);
+	mutex_lock(dev->ctrl_handler.lock);
+	snprintf(str, sizeof(str), " brightness %3d, contrast %3d, saturation %3d, hue %d ",
+			dev->brightness->cur.val,
+			dev->contrast->cur.val,
+			dev->saturation->cur.val,
+			dev->hue->cur.val);
+	gen_text(dev, vbuf, line++ * 16, 16, str);
+	snprintf(str, sizeof(str), " autogain %d, gain %3d, volume %3d, alpha 0x%02x ",
+			dev->autogain->cur.val, gain, dev->volume->cur.val,
+			dev->alpha->cur.val);
+	gen_text(dev, vbuf, line++ * 16, 16, str);
+	snprintf(str, sizeof(str), " int32 %d, int64 %lld, bitmask %08x ",
+			dev->int32->cur.val,
+			dev->int64->cur.val64,
+			dev->bitmask->cur.val);
+	gen_text(dev, vbuf, line++ * 16, 16, str);
+	snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ",
+			dev->boolean->cur.val,
+			dev->menu->qmenu[dev->menu->cur.val],
+			dev->string->cur.string);
+	gen_text(dev, vbuf, line++ * 16, 16, str);
+	snprintf(str, sizeof(str), " integer_menu %lld, value %d ",
+			dev->int_menu->qmenu_int[dev->int_menu->cur.val],
+			dev->int_menu->cur.val);
+	gen_text(dev, vbuf, line++ * 16, 16, str);
+	mutex_unlock(dev->ctrl_handler.lock);
+	if (dev->button_pressed) {
+		dev->button_pressed--;
+		snprintf(str, sizeof(str), " button pressed!");
+		gen_text(dev, vbuf, line++ * 16, 16, str);
+	}
+
+	dev->mv_count += 2;
+
+	buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
+	dev->field_count++;
+	buf->vb.v4l2_buf.sequence = dev->field_count >> 1;
+	do_gettimeofday(&ts);
+	buf->vb.v4l2_buf.timestamp = ts;
+}
+
+static void vivi_thread_tick(struct vivi_dev *dev)
+{
+	struct vivi_dmaqueue *dma_q = &dev->vidq;
+	struct vivi_buffer *buf;
+	unsigned long flags = 0;
+
+	dprintk(dev, 1, "Thread tick\n");
+
+	spin_lock_irqsave(&dev->slock, flags);
+	if (list_empty(&dma_q->active)) {
+		dprintk(dev, 1, "No active queue to serve\n");
+		spin_unlock_irqrestore(&dev->slock, flags);
+		return;
+	}
+
+	buf = list_entry(dma_q->active.next, struct vivi_buffer, list);
+	list_del(&buf->list);
+	spin_unlock_irqrestore(&dev->slock, flags);
+
+	do_gettimeofday(&buf->vb.v4l2_buf.timestamp);
+
+	/* Fill buffer */
+	vivi_fillbuff(dev, buf);
+	dprintk(dev, 1, "filled buffer %p\n", buf);
+
+	vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+	dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index);
+}
+
+#define frames_to_ms(frames)					\
+	((frames * WAKE_NUMERATOR * 1000) / WAKE_DENOMINATOR)
+
+static void vivi_sleep(struct vivi_dev *dev)
+{
+	struct vivi_dmaqueue *dma_q = &dev->vidq;
+	int timeout;
+	DECLARE_WAITQUEUE(wait, current);
+
+	dprintk(dev, 1, "%s dma_q=0x%08lx\n", __func__,
+		(unsigned long)dma_q);
+
+	add_wait_queue(&dma_q->wq, &wait);
+	if (kthread_should_stop())
+		goto stop_task;
+
+	/* Calculate time to wake up */
+	timeout = msecs_to_jiffies(frames_to_ms(1));
+
+	vivi_thread_tick(dev);
+
+	schedule_timeout_interruptible(timeout);
+
+stop_task:
+	remove_wait_queue(&dma_q->wq, &wait);
+	try_to_freeze();
+}
+
+static int vivi_thread(void *data)
+{
+	struct vivi_dev *dev = data;
+
+	dprintk(dev, 1, "thread started\n");
+
+	set_freezable();
+
+	for (;;) {
+		vivi_sleep(dev);
+
+		if (kthread_should_stop())
+			break;
+	}
+	dprintk(dev, 1, "thread: exit\n");
+	return 0;
+}
+
+static int vivi_start_generating(struct vivi_dev *dev)
+{
+	struct vivi_dmaqueue *dma_q = &dev->vidq;
+
+	dprintk(dev, 1, "%s\n", __func__);
+
+	/* Resets frame counters */
+	dev->ms = 0;
+	dev->mv_count = 0;
+	dev->jiffies = jiffies;
+
+	dma_q->frame = 0;
+	dma_q->ini_jiffies = jiffies;
+	dma_q->kthread = kthread_run(vivi_thread, dev, dev->v4l2_dev.name);
+
+	if (IS_ERR(dma_q->kthread)) {
+		v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+		return PTR_ERR(dma_q->kthread);
+	}
+	/* Wakes thread */
+	wake_up_interruptible(&dma_q->wq);
+
+	dprintk(dev, 1, "returning from %s\n", __func__);
+	return 0;
+}
+
+static void vivi_stop_generating(struct vivi_dev *dev)
+{
+	struct vivi_dmaqueue *dma_q = &dev->vidq;
+
+	dprintk(dev, 1, "%s\n", __func__);
+
+	/* shutdown control thread */
+	if (dma_q->kthread) {
+		kthread_stop(dma_q->kthread);
+		dma_q->kthread = NULL;
+	}
+
+	/*
+	 * Typical driver might need to wait here until dma engine stops.
+	 * In this case we can abort imiedetly, so it's just a noop.
+	 */
+
+	/* Release all active buffers */
+	while (!list_empty(&dma_q->active)) {
+		struct vivi_buffer *buf;
+		buf = list_entry(dma_q->active.next, struct vivi_buffer, list);
+		list_del(&buf->list);
+		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+		dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index);
+	}
+}
+/* ------------------------------------------------------------------
+	Videobuf operations
+   ------------------------------------------------------------------*/
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+				unsigned int *nbuffers, unsigned int *nplanes,
+				unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct vivi_dev *dev = vb2_get_drv_priv(vq);
+	unsigned long size;
+
+	if (fmt)
+		size = fmt->fmt.pix.sizeimage;
+	else
+		size = dev->width * dev->height * dev->pixelsize;
+
+	if (size == 0)
+		return -EINVAL;
+
+	if (0 == *nbuffers)
+		*nbuffers = 32;
+
+	while (size * *nbuffers > vid_limit * 1024 * 1024)
+		(*nbuffers)--;
+
+	*nplanes = 1;
+
+	sizes[0] = size;
+
+	/*
+	 * videobuf2-vmalloc allocator is context-less so no need to set
+	 * alloc_ctxs array.
+	 */
+
+	dprintk(dev, 1, "%s, count=%d, size=%ld\n", __func__,
+		*nbuffers, size);
+
+	return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+	struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+	struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
+	unsigned long size;
+
+	dprintk(dev, 1, "%s, field=%d\n", __func__, vb->v4l2_buf.field);
+
+	BUG_ON(NULL == dev->fmt);
+
+	/*
+	 * Theses properties only change when queue is idle, see s_fmt.
+	 * The below checks should not be performed here, on each
+	 * buffer_prepare (i.e. on each qbuf). Most of the code in this function
+	 * should thus be moved to buffer_init and s_fmt.
+	 */
+	if (dev->width  < 48 || dev->width  > MAX_WIDTH ||
+	    dev->height < 32 || dev->height > MAX_HEIGHT)
+		return -EINVAL;
+
+	size = dev->width * dev->height * dev->pixelsize;
+	if (vb2_plane_size(vb, 0) < size) {
+		dprintk(dev, 1, "%s data will not fit into plane (%lu < %lu)\n",
+				__func__, vb2_plane_size(vb, 0), size);
+		return -EINVAL;
+	}
+
+	vb2_set_plane_payload(&buf->vb, 0, size);
+
+	buf->fmt = dev->fmt;
+
+	precalculate_bars(dev);
+	precalculate_line(dev);
+
+	return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+	struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+	struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
+	struct vivi_dmaqueue *vidq = &dev->vidq;
+	unsigned long flags = 0;
+
+	dprintk(dev, 1, "%s\n", __func__);
+
+	spin_lock_irqsave(&dev->slock, flags);
+	list_add_tail(&buf->list, &vidq->active);
+	spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct vivi_dev *dev = vb2_get_drv_priv(vq);
+	dprintk(dev, 1, "%s\n", __func__);
+	return vivi_start_generating(dev);
+}
+
+/* abort streaming and wait for last buffer */
+static int stop_streaming(struct vb2_queue *vq)
+{
+	struct vivi_dev *dev = vb2_get_drv_priv(vq);
+	dprintk(dev, 1, "%s\n", __func__);
+	vivi_stop_generating(dev);
+	return 0;
+}
+
+static void vivi_lock(struct vb2_queue *vq)
+{
+	struct vivi_dev *dev = vb2_get_drv_priv(vq);
+	mutex_lock(&dev->mutex);
+}
+
+static void vivi_unlock(struct vb2_queue *vq)
+{
+	struct vivi_dev *dev = vb2_get_drv_priv(vq);
+	mutex_unlock(&dev->mutex);
+}
+
+
+static struct vb2_ops vivi_video_qops = {
+	.queue_setup		= queue_setup,
+	.buf_prepare		= buffer_prepare,
+	.buf_queue		= buffer_queue,
+	.start_streaming	= start_streaming,
+	.stop_streaming		= stop_streaming,
+	.wait_prepare		= vivi_unlock,
+	.wait_finish		= vivi_lock,
+};
+
+/* ------------------------------------------------------------------
+	IOCTL vidioc handling
+   ------------------------------------------------------------------*/
+static int vidioc_querycap(struct file *file, void  *priv,
+					struct v4l2_capability *cap)
+{
+	struct vivi_dev *dev = video_drvdata(file);
+
+	strcpy(cap->driver, "vivi");
+	strcpy(cap->card, "vivi");
+	strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));
+	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+			    V4L2_CAP_READWRITE;
+	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+	return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void  *priv,
+					struct v4l2_fmtdesc *f)
+{
+	struct vivi_fmt *fmt;
+
+	if (f->index >= ARRAY_SIZE(formats))
+		return -EINVAL;
+
+	fmt = &formats[f->index];
+
+	strlcpy(f->description, fmt->name, sizeof(f->description));
+	f->pixelformat = fmt->fourcc;
+	return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+					struct v4l2_format *f)
+{
+	struct vivi_dev *dev = video_drvdata(file);
+
+	f->fmt.pix.width        = dev->width;
+	f->fmt.pix.height       = dev->height;
+	f->fmt.pix.field        = V4L2_FIELD_INTERLACED;
+	f->fmt.pix.pixelformat  = dev->fmt->fourcc;
+	f->fmt.pix.bytesperline =
+		(f->fmt.pix.width * dev->fmt->depth) >> 3;
+	f->fmt.pix.sizeimage =
+		f->fmt.pix.height * f->fmt.pix.bytesperline;
+	if (dev->fmt->is_yuv)
+		f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+	else
+		f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+	return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+			struct v4l2_format *f)
+{
+	struct vivi_dev *dev = video_drvdata(file);
+	struct vivi_fmt *fmt;
+
+	fmt = get_format(f);
+	if (!fmt) {
+		dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n",
+			f->fmt.pix.pixelformat);
+		f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
+		fmt = get_format(f);
+	}
+
+	f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+	v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
+			      &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
+	f->fmt.pix.bytesperline =
+		(f->fmt.pix.width * fmt->depth) >> 3;
+	f->fmt.pix.sizeimage =
+		f->fmt.pix.height * f->fmt.pix.bytesperline;
+	if (fmt->is_yuv)
+		f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+	else
+		f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+	f->fmt.pix.priv = 0;
+	return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+					struct v4l2_format *f)
+{
+	struct vivi_dev *dev = video_drvdata(file);
+	struct vb2_queue *q = &dev->vb_vidq;
+
+	int ret = vidioc_try_fmt_vid_cap(file, priv, f);
+	if (ret < 0)
+		return ret;
+
+	if (vb2_is_busy(q)) {
+		dprintk(dev, 1, "%s device busy\n", __func__);
+		return -EBUSY;
+	}
+
+	dev->fmt = get_format(f);
+	dev->pixelsize = dev->fmt->depth / 8;
+	dev->width = f->fmt.pix.width;
+	dev->height = f->fmt.pix.height;
+
+	return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *fh,
+					 struct v4l2_frmsizeenum *fsize)
+{
+	static const struct v4l2_frmsize_stepwise sizes = {
+		48, MAX_WIDTH, 4,
+		32, MAX_HEIGHT, 1
+	};
+	int i;
+
+	if (fsize->index)
+		return -EINVAL;
+	for (i = 0; i < ARRAY_SIZE(formats); i++)
+		if (formats[i].fourcc == fsize->pixel_format)
+			break;
+	if (i == ARRAY_SIZE(formats))
+		return -EINVAL;
+	fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+	fsize->stepwise = sizes;
+	return 0;
+}
+
+/* only one input in this sample driver */
+static int vidioc_enum_input(struct file *file, void *priv,
+				struct v4l2_input *inp)
+{
+	if (inp->index >= NUM_INPUTS)
+		return -EINVAL;
+
+	inp->type = V4L2_INPUT_TYPE_CAMERA;
+	sprintf(inp->name, "Camera %u", inp->index);
+	return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+	struct vivi_dev *dev = video_drvdata(file);
+
+	*i = dev->input;
+	return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+	struct vivi_dev *dev = video_drvdata(file);
+
+	if (i >= NUM_INPUTS)
+		return -EINVAL;
+
+	if (i == dev->input)
+		return 0;
+
+	dev->input = i;
+	precalculate_bars(dev);
+	precalculate_line(dev);
+	return 0;
+}
+
+/* --- controls ---------------------------------------------- */
+
+static int vivi_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct vivi_dev *dev = container_of(ctrl->handler, struct vivi_dev, ctrl_handler);
+
+	if (ctrl == dev->autogain)
+		dev->gain->val = jiffies & 0xff;
+	return 0;
+}
+
+static int vivi_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct vivi_dev *dev = container_of(ctrl->handler, struct vivi_dev, ctrl_handler);
+
+	switch (ctrl->id) {
+	case V4L2_CID_ALPHA_COMPONENT:
+		dev->alpha_component = ctrl->val;
+		break;
+	default:
+		if (ctrl == dev->button)
+			dev->button_pressed = 30;
+		break;
+	}
+	return 0;
+}
+
+/* ------------------------------------------------------------------
+	File operations for the device
+   ------------------------------------------------------------------*/
+
+static const struct v4l2_ctrl_ops vivi_ctrl_ops = {
+	.g_volatile_ctrl = vivi_g_volatile_ctrl,
+	.s_ctrl = vivi_s_ctrl,
+};
+
+#define VIVI_CID_CUSTOM_BASE	(V4L2_CID_USER_BASE | 0xf000)
+
+static const struct v4l2_ctrl_config vivi_ctrl_button = {
+	.ops = &vivi_ctrl_ops,
+	.id = VIVI_CID_CUSTOM_BASE + 0,
+	.name = "Button",
+	.type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivi_ctrl_boolean = {
+	.ops = &vivi_ctrl_ops,
+	.id = VIVI_CID_CUSTOM_BASE + 1,
+	.name = "Boolean",
+	.type = V4L2_CTRL_TYPE_BOOLEAN,
+	.min = 0,
+	.max = 1,
+	.step = 1,
+	.def = 1,
+};
+
+static const struct v4l2_ctrl_config vivi_ctrl_int32 = {
+	.ops = &vivi_ctrl_ops,
+	.id = VIVI_CID_CUSTOM_BASE + 2,
+	.name = "Integer 32 Bits",
+	.type = V4L2_CTRL_TYPE_INTEGER,
+	.min = 0x80000000,
+	.max = 0x7fffffff,
+	.step = 1,
+};
+
+static const struct v4l2_ctrl_config vivi_ctrl_int64 = {
+	.ops = &vivi_ctrl_ops,
+	.id = VIVI_CID_CUSTOM_BASE + 3,
+	.name = "Integer 64 Bits",
+	.type = V4L2_CTRL_TYPE_INTEGER64,
+};
+
+static const char * const vivi_ctrl_menu_strings[] = {
+	"Menu Item 0 (Skipped)",
+	"Menu Item 1",
+	"Menu Item 2 (Skipped)",
+	"Menu Item 3",
+	"Menu Item 4",
+	"Menu Item 5 (Skipped)",
+	NULL,
+};
+
+static const struct v4l2_ctrl_config vivi_ctrl_menu = {
+	.ops = &vivi_ctrl_ops,
+	.id = VIVI_CID_CUSTOM_BASE + 4,
+	.name = "Menu",
+	.type = V4L2_CTRL_TYPE_MENU,
+	.min = 1,
+	.max = 4,
+	.def = 3,
+	.menu_skip_mask = 0x04,
+	.qmenu = vivi_ctrl_menu_strings,
+};
+
+static const struct v4l2_ctrl_config vivi_ctrl_string = {
+	.ops = &vivi_ctrl_ops,
+	.id = VIVI_CID_CUSTOM_BASE + 5,
+	.name = "String",
+	.type = V4L2_CTRL_TYPE_STRING,
+	.min = 2,
+	.max = 4,
+	.step = 1,
+};
+
+static const struct v4l2_ctrl_config vivi_ctrl_bitmask = {
+	.ops = &vivi_ctrl_ops,
+	.id = VIVI_CID_CUSTOM_BASE + 6,
+	.name = "Bitmask",
+	.type = V4L2_CTRL_TYPE_BITMASK,
+	.def = 0x80002000,
+	.min = 0,
+	.max = 0x80402010,
+	.step = 0,
+};
+
+static const s64 vivi_ctrl_int_menu_values[] = {
+	1, 1, 2, 3, 5, 8, 13, 21, 42,
+};
+
+static const struct v4l2_ctrl_config vivi_ctrl_int_menu = {
+	.ops = &vivi_ctrl_ops,
+	.id = VIVI_CID_CUSTOM_BASE + 7,
+	.name = "Integer menu",
+	.type = V4L2_CTRL_TYPE_INTEGER_MENU,
+	.min = 1,
+	.max = 8,
+	.def = 4,
+	.menu_skip_mask = 0x02,
+	.qmenu_int = vivi_ctrl_int_menu_values,
+};
+
+static const struct v4l2_file_operations vivi_fops = {
+	.owner		= THIS_MODULE,
+	.open           = v4l2_fh_open,
+	.release        = vb2_fop_release,
+	.read           = vb2_fop_read,
+	.poll		= vb2_fop_poll,
+	.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
+	.mmap           = vb2_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
+	.vidioc_querycap      = vidioc_querycap,
+	.vidioc_enum_fmt_vid_cap  = vidioc_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap     = vidioc_g_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap   = vidioc_try_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap     = vidioc_s_fmt_vid_cap,
+	.vidioc_enum_framesizes   = vidioc_enum_framesizes,
+	.vidioc_reqbufs       = vb2_ioctl_reqbufs,
+	.vidioc_create_bufs   = vb2_ioctl_create_bufs,
+	.vidioc_prepare_buf   = vb2_ioctl_prepare_buf,
+	.vidioc_querybuf      = vb2_ioctl_querybuf,
+	.vidioc_qbuf          = vb2_ioctl_qbuf,
+	.vidioc_dqbuf         = vb2_ioctl_dqbuf,
+	.vidioc_enum_input    = vidioc_enum_input,
+	.vidioc_g_input       = vidioc_g_input,
+	.vidioc_s_input       = vidioc_s_input,
+	.vidioc_streamon      = vb2_ioctl_streamon,
+	.vidioc_streamoff     = vb2_ioctl_streamoff,
+	.vidioc_log_status    = v4l2_ctrl_log_status,
+	.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static struct video_device vivi_template = {
+	.name		= "vivi",
+	.fops           = &vivi_fops,
+	.ioctl_ops 	= &vivi_ioctl_ops,
+	.release	= video_device_release_empty,
+};
+
+/* -----------------------------------------------------------------
+	Initialization and module stuff
+   ------------------------------------------------------------------*/
+
+static int vivi_release(void)
+{
+	struct vivi_dev *dev;
+	struct list_head *list;
+
+	while (!list_empty(&vivi_devlist)) {
+		list = vivi_devlist.next;
+		list_del(list);
+		dev = list_entry(list, struct vivi_dev, vivi_devlist);
+
+		v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+			video_device_node_name(&dev->vdev));
+		video_unregister_device(&dev->vdev);
+		v4l2_device_unregister(&dev->v4l2_dev);
+		v4l2_ctrl_handler_free(&dev->ctrl_handler);
+		kfree(dev);
+	}
+
+	return 0;
+}
+
+static int __init vivi_create_instance(int inst)
+{
+	struct vivi_dev *dev;
+	struct video_device *vfd;
+	struct v4l2_ctrl_handler *hdl;
+	struct vb2_queue *q;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
+			"%s-%03d", VIVI_MODULE_NAME, inst);
+	ret = v4l2_device_register(NULL, &dev->v4l2_dev);
+	if (ret)
+		goto free_dev;
+
+	dev->fmt = &formats[0];
+	dev->width = 640;
+	dev->height = 480;
+	dev->pixelsize = dev->fmt->depth / 8;
+	hdl = &dev->ctrl_handler;
+	v4l2_ctrl_handler_init(hdl, 11);
+	dev->volume = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+			V4L2_CID_AUDIO_VOLUME, 0, 255, 1, 200);
+	dev->brightness = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+			V4L2_CID_BRIGHTNESS, 0, 255, 1, 127);
+	dev->contrast = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+			V4L2_CID_CONTRAST, 0, 255, 1, 16);
+	dev->saturation = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+			V4L2_CID_SATURATION, 0, 255, 1, 127);
+	dev->hue = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+			V4L2_CID_HUE, -128, 127, 1, 0);
+	dev->autogain = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+			V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+	dev->gain = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+			V4L2_CID_GAIN, 0, 255, 1, 100);
+	dev->alpha = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+			V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 0);
+	dev->button = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_button, NULL);
+	dev->int32 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int32, NULL);
+	dev->int64 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int64, NULL);
+	dev->boolean = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_boolean, NULL);
+	dev->menu = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_menu, NULL);
+	dev->string = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_string, NULL);
+	dev->bitmask = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_bitmask, NULL);
+	dev->int_menu = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int_menu, NULL);
+	if (hdl->error) {
+		ret = hdl->error;
+		goto unreg_dev;
+	}
+	v4l2_ctrl_auto_cluster(2, &dev->autogain, 0, true);
+	dev->v4l2_dev.ctrl_handler = hdl;
+
+	/* initialize locks */
+	spin_lock_init(&dev->slock);
+
+	/* initialize queue */
+	q = &dev->vb_vidq;
+	memset(q, 0, sizeof(dev->vb_vidq));
+	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+	q->drv_priv = dev;
+	q->buf_struct_size = sizeof(struct vivi_buffer);
+	q->ops = &vivi_video_qops;
+	q->mem_ops = &vb2_vmalloc_memops;
+
+	vb2_queue_init(q);
+
+	mutex_init(&dev->mutex);
+
+	/* init video dma queues */
+	INIT_LIST_HEAD(&dev->vidq.active);
+	init_waitqueue_head(&dev->vidq.wq);
+
+	vfd = &dev->vdev;
+	*vfd = vivi_template;
+	vfd->debug = debug;
+	vfd->v4l2_dev = &dev->v4l2_dev;
+	vfd->queue = q;
+	set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
+
+	/*
+	 * Provide a mutex to v4l2 core. It will be used to protect
+	 * all fops and v4l2 ioctls.
+	 */
+	vfd->lock = &dev->mutex;
+	video_set_drvdata(vfd, dev);
+
+	ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
+	if (ret < 0)
+		goto unreg_dev;
+
+	/* Now that everything is fine, let's add it to device list */
+	list_add_tail(&dev->vivi_devlist, &vivi_devlist);
+
+	v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
+		  video_device_node_name(vfd));
+	return 0;
+
+unreg_dev:
+	v4l2_ctrl_handler_free(hdl);
+	v4l2_device_unregister(&dev->v4l2_dev);
+free_dev:
+	kfree(dev);
+	return ret;
+}
+
+/* This routine allocates from 1 to n_devs virtual drivers.
+
+   The real maximum number of virtual drivers will depend on how many drivers
+   will succeed. This is limited to the maximum number of devices that
+   videodev supports, which is equal to VIDEO_NUM_DEVICES.
+ */
+static int __init vivi_init(void)
+{
+	const struct font_desc *font = find_font("VGA8x16");
+	int ret = 0, i;
+
+	if (font == NULL) {
+		printk(KERN_ERR "vivi: could not find font\n");
+		return -ENODEV;
+	}
+	font8x16 = font->data;
+
+	if (n_devs <= 0)
+		n_devs = 1;
+
+	for (i = 0; i < n_devs; i++) {
+		ret = vivi_create_instance(i);
+		if (ret) {
+			/* If some instantiations succeeded, keep driver */
+			if (i)
+				ret = 0;
+			break;
+		}
+	}
+
+	if (ret < 0) {
+		printk(KERN_ERR "vivi: error %d while loading driver\n", ret);
+		return ret;
+	}
+
+	printk(KERN_INFO "Video Technology Magazine Virtual Video "
+			"Capture Board ver %s successfully loaded.\n",
+			VIVI_VERSION);
+
+	/* n_devs will reflect the actual number of allocated devices */
+	n_devs = i;
+
+	return ret;
+}
+
+static void __exit vivi_exit(void)
+{
+	vivi_release();
+}
+
+module_init(vivi_init);
+module_exit(vivi_exit);