summaryrefslogtreecommitdiff
path: root/drivers/video
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video')
-rw-r--r--drivers/video/Kconfig3
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/adf/Kconfig14
-rw-r--r--drivers/video/adf/Makefile15
-rw-r--r--drivers/video/adf/adf.c1166
-rw-r--r--drivers/video/adf/adf.h71
-rw-r--r--drivers/video/adf/adf_client.c810
-rw-r--r--drivers/video/adf/adf_fbdev.c651
-rw-r--r--drivers/video/adf/adf_fops.c957
-rw-r--r--drivers/video/adf/adf_fops.h37
-rw-r--r--drivers/video/adf/adf_fops32.c217
-rw-r--r--drivers/video/adf/adf_fops32.h78
-rw-r--r--drivers/video/adf/adf_format.c280
-rw-r--r--drivers/video/adf/adf_memblock.c149
-rw-r--r--drivers/video/adf/adf_sysfs.c296
-rw-r--r--drivers/video/adf/adf_sysfs.h33
-rw-r--r--drivers/video/adf/adf_trace.h93
17 files changed, 4869 insertions, 2 deletions
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 2b651d7ff1f0..e9c57dd05f3c 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -23,8 +23,6 @@ source "drivers/gpu/drm/Kconfig"
source "drivers/gpu/host1x/Kconfig"
-source "drivers/gpu/ion/Kconfig"
-
config VGASTATE
tristate
default n
@@ -2479,6 +2477,7 @@ source "drivers/video/mmp/Kconfig"
source "drivers/video/tegra/Kconfig"
source "drivers/video/backlight/Kconfig"
+source "drivers/video/adf/Kconfig"
if VT
source "drivers/video/console/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index d15a83f800c5..abf6c94c50a3 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -12,6 +12,7 @@ fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
modedb.o fbcvt.o
fb-objs := $(fb-y)
+obj-$(CONFIG_ADF) += adf/
obj-$(CONFIG_VT) += console/
obj-$(CONFIG_LOGO) += logo/
obj-y += backlight/
diff --git a/drivers/video/adf/Kconfig b/drivers/video/adf/Kconfig
new file mode 100644
index 000000000000..33858b73d8bb
--- /dev/null
+++ b/drivers/video/adf/Kconfig
@@ -0,0 +1,14 @@
+menuconfig ADF
+ depends on SYNC
+ depends on DMA_SHARED_BUFFER
+ tristate "Atomic Display Framework"
+
+menuconfig ADF_FBDEV
+ depends on ADF
+ depends on FB
+ tristate "Helper for implementing the fbdev API in ADF drivers"
+
+menuconfig ADF_MEMBLOCK
+ depends on ADF
+ depends on HAVE_MEMBLOCK
+ tristate "Helper for using memblocks as buffers in ADF drivers"
diff --git a/drivers/video/adf/Makefile b/drivers/video/adf/Makefile
new file mode 100644
index 000000000000..78d0915122f4
--- /dev/null
+++ b/drivers/video/adf/Makefile
@@ -0,0 +1,15 @@
+ccflags-y := -Idrivers/staging/android
+
+CFLAGS_adf.o := -I$(src)
+
+obj-$(CONFIG_ADF) += adf.o \
+ adf_client.o \
+ adf_fops.o \
+ adf_format.o \
+ adf_sysfs.o
+
+obj-$(CONFIG_COMPAT) += adf_fops32.o
+
+obj-$(CONFIG_ADF_FBDEV) += adf_fbdev.o
+
+obj-$(CONFIG_ADF_MEMBLOCK) += adf_memblock.o
diff --git a/drivers/video/adf/adf.c b/drivers/video/adf/adf.c
new file mode 100644
index 000000000000..933e74ac8098
--- /dev/null
+++ b/drivers/video/adf/adf.c
@@ -0,0 +1,1166 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ * adf_modeinfo_{set_name,set_vrefresh} modified from
+ * drivers/gpu/drm/drm_modes.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <video/adf_format.h>
+
+#include "sw_sync.h"
+#include "sync.h"
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+#define CREATE_TRACE_POINTS
+#include "adf_trace.h"
+
+#define ADF_SHORT_FENCE_TIMEOUT (1 * MSEC_PER_SEC)
+#define ADF_LONG_FENCE_TIMEOUT (10 * MSEC_PER_SEC)
+
+static DEFINE_IDR(adf_devices);
+
+static void adf_fence_wait(struct adf_device *dev, struct sync_fence *fence)
+{
+ /* sync_fence_wait() dumps debug information on timeout. Experience
+ has shown that if the pipeline gets stuck, a short timeout followed
+ by a longer one provides useful information for debugging. */
+ int err = sync_fence_wait(fence, ADF_SHORT_FENCE_TIMEOUT);
+ if (err >= 0)
+ return;
+
+ if (err == -ETIME)
+ err = sync_fence_wait(fence, ADF_LONG_FENCE_TIMEOUT);
+
+ if (err < 0)
+ dev_warn(&dev->base.dev, "error waiting on fence: %d\n", err);
+}
+
+void adf_buffer_cleanup(struct adf_buffer *buf)
+{
+ size_t i;
+ for (i = 0; i < ARRAY_SIZE(buf->dma_bufs); i++)
+ if (buf->dma_bufs[i])
+ dma_buf_put(buf->dma_bufs[i]);
+
+ if (buf->acquire_fence)
+ sync_fence_put(buf->acquire_fence);
+}
+
+void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
+ struct adf_buffer *buf)
+{
+ /* calling adf_buffer_mapping_cleanup() is safe even if mapping is
+ uninitialized or partially-initialized, as long as it was
+ zeroed on allocation */
+ size_t i;
+ for (i = 0; i < ARRAY_SIZE(mapping->sg_tables); i++) {
+ if (mapping->sg_tables[i])
+ dma_buf_unmap_attachment(mapping->attachments[i],
+ mapping->sg_tables[i], DMA_TO_DEVICE);
+ if (mapping->attachments[i])
+ dma_buf_detach(buf->dma_bufs[i],
+ mapping->attachments[i]);
+ }
+}
+
+void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post)
+{
+ size_t i;
+
+ if (post->state)
+ dev->ops->state_free(dev, post->state);
+
+ for (i = 0; i < post->config.n_bufs; i++) {
+ adf_buffer_mapping_cleanup(&post->config.mappings[i],
+ &post->config.bufs[i]);
+ adf_buffer_cleanup(&post->config.bufs[i]);
+ }
+
+ kfree(post->config.custom_data);
+ kfree(post->config.mappings);
+ kfree(post->config.bufs);
+ kfree(post);
+}
+
+static void adf_sw_advance_timeline(struct adf_device *dev)
+{
+#ifdef CONFIG_SW_SYNC
+ sw_sync_timeline_inc(dev->timeline, 1);
+#else
+ BUG();
+#endif
+}
+
+static void adf_post_work_func(struct kthread_work *work)
+{
+ struct adf_device *dev =
+ container_of(work, struct adf_device, post_work);
+ struct adf_pending_post *post, *next;
+ struct list_head saved_list;
+
+ mutex_lock(&dev->post_lock);
+ memcpy(&saved_list, &dev->post_list, sizeof(saved_list));
+ list_replace_init(&dev->post_list, &saved_list);
+ mutex_unlock(&dev->post_lock);
+
+ list_for_each_entry_safe(post, next, &saved_list, head) {
+ int i;
+
+ for (i = 0; i < post->config.n_bufs; i++) {
+ struct sync_fence *fence =
+ post->config.bufs[i].acquire_fence;
+ if (fence)
+ adf_fence_wait(dev, fence);
+ }
+
+ dev->ops->post(dev, &post->config, post->state);
+
+ if (dev->ops->advance_timeline)
+ dev->ops->advance_timeline(dev, &post->config,
+ post->state);
+ else
+ adf_sw_advance_timeline(dev);
+
+ list_del(&post->head);
+ if (dev->onscreen)
+ adf_post_cleanup(dev, dev->onscreen);
+ dev->onscreen = post;
+ }
+}
+
+void adf_attachment_free(struct adf_attachment_list *attachment)
+{
+ list_del(&attachment->head);
+ kfree(attachment);
+}
+
+struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
+ enum adf_event_type type)
+{
+ struct rb_root *root = &obj->event_refcount;
+ struct rb_node **new = &(root->rb_node);
+ struct rb_node *parent = NULL;
+ struct adf_event_refcount *refcount;
+
+ while (*new) {
+ refcount = container_of(*new, struct adf_event_refcount, node);
+ parent = *new;
+
+ if (refcount->type > type)
+ new = &(*new)->rb_left;
+ else if (refcount->type < type)
+ new = &(*new)->rb_right;
+ else
+ return refcount;
+ }
+
+ refcount = kzalloc(sizeof(*refcount), GFP_KERNEL);
+ if (!refcount)
+ return NULL;
+ refcount->type = type;
+
+ rb_link_node(&refcount->node, parent, new);
+ rb_insert_color(&refcount->node, root);
+ return refcount;
+}
+
+/**
+ * adf_event_get - increase the refcount for an event
+ *
+ * @obj: the object that produces the event
+ * @type: the event type
+ *
+ * ADF will call the object's set_event() op if needed. ops are allowed
+ * to sleep, so adf_event_get() must NOT be called from an atomic context.
+ *
+ * Returns 0 if successful, or -%EINVAL if the object does not support the
+ * requested event type.
+ */
+int adf_event_get(struct adf_obj *obj, enum adf_event_type type)
+{
+ struct adf_event_refcount *refcount;
+ int old_refcount;
+ int ret;
+
+ ret = adf_obj_check_supports_event(obj, type);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&obj->event_lock);
+
+ refcount = adf_obj_find_event_refcount(obj, type);
+ if (!refcount) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ old_refcount = refcount->refcount++;
+
+ if (old_refcount == 0) {
+ obj->ops->set_event(obj, type, true);
+ trace_adf_event_enable(obj, type);
+ }
+
+done:
+ mutex_unlock(&obj->event_lock);
+ return ret;
+}
+EXPORT_SYMBOL(adf_event_get);
+
+/**
+ * adf_event_put - decrease the refcount for an event
+ *
+ * @obj: the object that produces the event
+ * @type: the event type
+ *
+ * ADF will call the object's set_event() op if needed. ops are allowed
+ * to sleep, so adf_event_put() must NOT be called from an atomic context.
+ *
+ * Returns 0 if successful, -%EINVAL if the object does not support the
+ * requested event type, or -%EALREADY if the refcount is already 0.
+ */
+int adf_event_put(struct adf_obj *obj, enum adf_event_type type)
+{
+ struct adf_event_refcount *refcount;
+ int old_refcount;
+ int ret;
+
+ ret = adf_obj_check_supports_event(obj, type);
+ if (ret < 0)
+ return ret;
+
+
+ mutex_lock(&obj->event_lock);
+
+ refcount = adf_obj_find_event_refcount(obj, type);
+ if (!refcount) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ old_refcount = refcount->refcount--;
+
+ if (WARN_ON(old_refcount == 0)) {
+ refcount->refcount++;
+ ret = -EALREADY;
+ } else if (old_refcount == 1) {
+ obj->ops->set_event(obj, type, false);
+ trace_adf_event_disable(obj, type);
+ }
+
+done:
+ mutex_unlock(&obj->event_lock);
+ return ret;
+}
+EXPORT_SYMBOL(adf_event_put);
+
+/**
+ * adf_vsync_wait - wait for a vsync event on a display interface
+ *
+ * @intf: the display interface
+ * @timeout: timeout in jiffies (0 = wait indefinitely)
+ *
+ * adf_vsync_wait() may sleep, so it must NOT be called from an atomic context.
+ *
+ * This function returns -%ERESTARTSYS if it is interrupted by a signal.
+ * If @timeout == 0 then this function returns 0 on vsync. If @timeout > 0 then
+ * this function returns the number of remaining jiffies or -%ETIMEDOUT on
+ * timeout.
+ */
+int adf_vsync_wait(struct adf_interface *intf, long timeout)
+{
+ ktime_t timestamp;
+ int ret;
+ unsigned long flags;
+
+ read_lock_irqsave(&intf->vsync_lock, flags);
+ timestamp = intf->vsync_timestamp;
+ read_unlock_irqrestore(&intf->vsync_lock, flags);
+
+ adf_vsync_get(intf);
+ if (timeout) {
+ ret = wait_event_interruptible_timeout(intf->vsync_wait,
+ !ktime_equal(timestamp,
+ intf->vsync_timestamp),
+ msecs_to_jiffies(timeout));
+ if (ret == 0 && ktime_equal(timestamp, intf->vsync_timestamp))
+ ret = -ETIMEDOUT;
+ } else {
+ ret = wait_event_interruptible(intf->vsync_wait,
+ !ktime_equal(timestamp,
+ intf->vsync_timestamp));
+ }
+ adf_vsync_put(intf);
+
+ return ret;
+}
+EXPORT_SYMBOL(adf_vsync_wait);
+
+static void adf_event_queue(struct adf_obj *obj, struct adf_event *event)
+{
+ struct adf_file *file;
+ unsigned long flags;
+
+ trace_adf_event(obj, event->type);
+
+ spin_lock_irqsave(&obj->file_lock, flags);
+
+ list_for_each_entry(file, &obj->file_list, head)
+ if (test_bit(event->type, file->event_subscriptions))
+ adf_file_queue_event(file, event);
+
+ spin_unlock_irqrestore(&obj->file_lock, flags);
+}
+
+/**
+ * adf_event_notify - notify userspace of a driver-private event
+ *
+ * @obj: the ADF object that produced the event
+ * @event: the event
+ *
+ * adf_event_notify() may be called safely from an atomic context. It will
+ * copy @event if needed, so @event may point to a variable on the stack.
+ *
+ * Drivers must NOT call adf_event_notify() for vsync and hotplug events.
+ * ADF provides adf_vsync_notify() and
+ * adf_hotplug_notify_{connected,disconnected}() for these events.
+ */
+int adf_event_notify(struct adf_obj *obj, struct adf_event *event)
+{
+ if (WARN_ON(event->type == ADF_EVENT_VSYNC ||
+ event->type == ADF_EVENT_HOTPLUG))
+ return -EINVAL;
+
+ adf_event_queue(obj, event);
+ return 0;
+}
+EXPORT_SYMBOL(adf_event_notify);
+
+/**
+ * adf_vsync_notify - notify ADF of a display interface's vsync event
+ *
+ * @intf: the display interface
+ * @timestamp: the time the vsync occurred
+ *
+ * adf_vsync_notify() may be called safely from an atomic context.
+ */
+void adf_vsync_notify(struct adf_interface *intf, ktime_t timestamp)
+{
+ unsigned long flags;
+ struct adf_vsync_event event;
+
+ write_lock_irqsave(&intf->vsync_lock, flags);
+ intf->vsync_timestamp = timestamp;
+ write_unlock_irqrestore(&intf->vsync_lock, flags);
+
+ wake_up_interruptible_all(&intf->vsync_wait);
+
+ event.base.type = ADF_EVENT_VSYNC;
+ event.base.length = sizeof(event);
+ event.timestamp = ktime_to_ns(timestamp);
+ adf_event_queue(&intf->base, &event.base);
+}
+EXPORT_SYMBOL(adf_vsync_notify);
+
+void adf_hotplug_notify(struct adf_interface *intf, bool connected,
+ struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+ unsigned long flags;
+ struct adf_hotplug_event event;
+ struct drm_mode_modeinfo *old_modelist;
+
+ write_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+ old_modelist = intf->modelist;
+ intf->hotplug_detect = connected;
+ intf->modelist = modelist;
+ intf->n_modes = n_modes;
+ write_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+ kfree(old_modelist);
+
+ event.base.length = sizeof(event);
+ event.base.type = ADF_EVENT_HOTPLUG;
+ event.connected = connected;
+ adf_event_queue(&intf->base, &event.base);
+}
+
+/**
+ * adf_hotplug_notify_connected - notify ADF of a display interface being
+ * connected to a display
+ *
+ * @intf: the display interface
+ * @modelist: hardware modes supported by display
+ * @n_modes: length of modelist
+ *
+ * @modelist is copied as needed, so it may point to a variable on the stack.
+ *
+ * adf_hotplug_notify_connected() may NOT be called safely from an atomic
+ * context.
+ *
+ * Returns 0 on success or error code (<0) on error.
+ */
+int adf_hotplug_notify_connected(struct adf_interface *intf,
+ struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+ struct drm_mode_modeinfo *modelist_copy;
+
+ if (n_modes > ADF_MAX_MODES)
+ return -ENOMEM;
+
+ modelist_copy = kzalloc(sizeof(modelist_copy[0]) * n_modes,
+ GFP_KERNEL);
+ if (!modelist_copy)
+ return -ENOMEM;
+ memcpy(modelist_copy, modelist, sizeof(modelist_copy[0]) * n_modes);
+
+ adf_hotplug_notify(intf, true, modelist_copy, n_modes);
+ return 0;
+}
+EXPORT_SYMBOL(adf_hotplug_notify_connected);
+
+/**
+ * adf_hotplug_notify_disconnected - notify ADF of a display interface being
+ * disconnected from a display
+ *
+ * @intf: the display interface
+ *
+ * adf_hotplug_notify_disconnected() may be called safely from an atomic
+ * context.
+ */
+void adf_hotplug_notify_disconnected(struct adf_interface *intf)
+{
+ adf_hotplug_notify(intf, false, NULL, 0);
+}
+EXPORT_SYMBOL(adf_hotplug_notify_disconnected);
+
+static int adf_obj_init(struct adf_obj *obj, enum adf_obj_type type,
+ struct idr *idr, struct adf_device *parent,
+ const struct adf_obj_ops *ops, const char *fmt, va_list args)
+{
+ int ret;
+
+ if (ops && ops->supports_event && !ops->set_event) {
+ pr_err("%s: %s implements supports_event but not set_event\n",
+ __func__, adf_obj_type_str(type));
+ return -EINVAL;
+ }
+
+ ret = idr_alloc(idr, obj, 0, 0, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("%s: allocating object id failed: %d\n", __func__, ret);
+ return ret;
+ }
+ obj->id = ret;
+
+ vscnprintf(obj->name, sizeof(obj->name), fmt, args);
+
+ obj->type = type;
+ obj->ops = ops;
+ obj->parent = parent;
+ mutex_init(&obj->event_lock);
+ obj->event_refcount = RB_ROOT;
+ spin_lock_init(&obj->file_lock);
+ INIT_LIST_HEAD(&obj->file_list);
+ return 0;
+}
+
+static void adf_obj_destroy(struct adf_obj *obj, struct idr *idr)
+{
+ struct rb_node *node = rb_first(&obj->event_refcount);
+
+ while (node) {
+ struct adf_event_refcount *refcount =
+ container_of(node, struct adf_event_refcount,
+ node);
+ kfree(refcount);
+ node = rb_first(&obj->event_refcount);
+ }
+
+ mutex_destroy(&obj->event_lock);
+ idr_remove(idr, obj->id);
+}
+
+/**
+ * adf_device_init - initialize ADF-internal data for a display device
+ * and create sysfs entries
+ *
+ * @dev: the display device
+ * @parent: the device's parent device
+ * @ops: the device's associated ops
+ * @fmt: formatting string for the display device's name
+ *
+ * @fmt specifies the device's sysfs filename and the name returned to
+ * userspace through the %ADF_GET_DEVICE_DATA ioctl.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_device_init(struct adf_device *dev, struct device *parent,
+ const struct adf_device_ops *ops, const char *fmt, ...)
+{
+ int ret;
+ va_list args;
+
+ if (!ops->validate || !ops->post) {
+ pr_err("%s: device must implement validate and post\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!ops->complete_fence && !ops->advance_timeline) {
+ if (!IS_ENABLED(CONFIG_SW_SYNC)) {
+ pr_err("%s: device requires sw_sync but it is not enabled in the kernel\n",
+ __func__);
+ return -EINVAL;
+ }
+ } else if (!(ops->complete_fence && ops->advance_timeline)) {
+ pr_err("%s: device must implement both complete_fence and advance_timeline, or implement neither\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(dev, 0, sizeof(*dev));
+
+ va_start(args, fmt);
+ ret = adf_obj_init(&dev->base, ADF_OBJ_DEVICE, &adf_devices, dev,
+ &ops->base, fmt, args);
+ va_end(args);
+ if (ret < 0)
+ return ret;
+
+ dev->dev = parent;
+ dev->ops = ops;
+ idr_init(&dev->overlay_engines);
+ idr_init(&dev->interfaces);
+ mutex_init(&dev->client_lock);
+ INIT_LIST_HEAD(&dev->post_list);
+ mutex_init(&dev->post_lock);
+ init_kthread_worker(&dev->post_worker);
+ INIT_LIST_HEAD(&dev->attached);
+ INIT_LIST_HEAD(&dev->attach_allowed);
+
+ dev->post_thread = kthread_run(kthread_worker_fn,
+ &dev->post_worker, dev->base.name);
+ if (IS_ERR(dev->post_thread)) {
+ ret = PTR_ERR(dev->post_thread);
+ dev->post_thread = NULL;
+
+ pr_err("%s: failed to run config posting thread: %d\n",
+ __func__, ret);
+ goto err;
+ }
+ init_kthread_work(&dev->post_work, adf_post_work_func);
+
+ ret = adf_device_sysfs_init(dev);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ adf_device_destroy(dev);
+ return ret;
+}
+EXPORT_SYMBOL(adf_device_init);
+
+/**
+ * adf_device_destroy - clean up ADF-internal data for a display device
+ *
+ * @dev: the display device
+ */
+void adf_device_destroy(struct adf_device *dev)
+{
+ struct adf_attachment_list *entry, *next;
+
+ idr_destroy(&dev->interfaces);
+ idr_destroy(&dev->overlay_engines);
+
+ if (dev->post_thread) {
+ flush_kthread_worker(&dev->post_worker);
+ kthread_stop(dev->post_thread);
+ }
+
+ if (dev->onscreen)
+ adf_post_cleanup(dev, dev->onscreen);
+ adf_device_sysfs_destroy(dev);
+ list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+ adf_attachment_free(entry);
+ }
+ list_for_each_entry_safe(entry, next, &dev->attached, head) {
+ adf_attachment_free(entry);
+ }
+ mutex_destroy(&dev->post_lock);
+ mutex_destroy(&dev->client_lock);
+ adf_obj_destroy(&dev->base, &adf_devices);
+}
+EXPORT_SYMBOL(adf_device_destroy);
+
+/**
+ * adf_interface_init - initialize ADF-internal data for a display interface
+ * and create sysfs entries
+ *
+ * @intf: the display interface
+ * @dev: the interface's "parent" display device
+ * @type: interface type (see enum @adf_interface_type)
+ * @idx: which interface of type @type;
+ * e.g. interface DSI.1 -> @type=%ADF_INTF_TYPE_DSI, @idx=1
+ * @flags: informational flags (bitmask of %ADF_INTF_FLAG_* values)
+ * @ops: the interface's associated ops
+ * @fmt: formatting string for the display interface's name
+ *
+ * @dev must have previously been initialized with adf_device_init().
+ *
+ * @fmt affects the name returned to userspace through the
+ * %ADF_GET_INTERFACE_DATA ioctl. It does not affect the sysfs filename,
+ * which is derived from @dev's name.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_interface_init(struct adf_interface *intf, struct adf_device *dev,
+ enum adf_interface_type type, u32 idx, u32 flags,
+ const struct adf_interface_ops *ops, const char *fmt, ...)
+{
+ int ret;
+ va_list args;
+ const u32 allowed_flags = ADF_INTF_FLAG_PRIMARY |
+ ADF_INTF_FLAG_EXTERNAL;
+
+ if (dev->n_interfaces == ADF_MAX_INTERFACES) {
+ pr_err("%s: parent device %s has too many interfaces\n",
+ __func__, dev->base.name);
+ return -ENOMEM;
+ }
+
+ if (type >= ADF_INTF_MEMORY && type <= ADF_INTF_TYPE_DEVICE_CUSTOM) {
+ pr_err("%s: invalid interface type %u\n", __func__, type);
+ return -EINVAL;
+ }
+
+ if (flags & ~allowed_flags) {
+ pr_err("%s: invalid interface flags 0x%X\n", __func__,
+ flags & ~allowed_flags);
+ return -EINVAL;
+ }
+
+ memset(intf, 0, sizeof(*intf));
+
+ va_start(args, fmt);
+ ret = adf_obj_init(&intf->base, ADF_OBJ_INTERFACE, &dev->interfaces,
+ dev, ops ? &ops->base : NULL, fmt, args);
+ va_end(args);
+ if (ret < 0)
+ return ret;
+
+ intf->type = type;
+ intf->idx = idx;
+ intf->flags = flags;
+ intf->ops = ops;
+ intf->dpms_state = DRM_MODE_DPMS_OFF;
+ init_waitqueue_head(&intf->vsync_wait);
+ rwlock_init(&intf->vsync_lock);
+ rwlock_init(&intf->hotplug_modelist_lock);
+
+ ret = adf_interface_sysfs_init(intf);
+ if (ret < 0)
+ goto err;
+ dev->n_interfaces++;
+
+ return 0;
+
+err:
+ adf_obj_destroy(&intf->base, &dev->interfaces);
+ return ret;
+}
+EXPORT_SYMBOL(adf_interface_init);
+
+/**
+ * adf_interface_destroy - clean up ADF-internal data for a display interface
+ *
+ * @intf: the display interface
+ */
+void adf_interface_destroy(struct adf_interface *intf)
+{
+ struct adf_device *dev = adf_interface_parent(intf);
+ struct adf_attachment_list *entry, *next;
+
+ mutex_lock(&dev->client_lock);
+ list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+ if (entry->attachment.interface == intf) {
+ adf_attachment_free(entry);
+ dev->n_attach_allowed--;
+ }
+ }
+ list_for_each_entry_safe(entry, next, &dev->attached, head) {
+ if (entry->attachment.interface == intf) {
+ adf_device_detach_op(dev,
+ entry->attachment.overlay_engine, intf);
+ adf_attachment_free(entry);
+ dev->n_attached--;
+ }
+ }
+ kfree(intf->modelist);
+ adf_interface_sysfs_destroy(intf);
+ adf_obj_destroy(&intf->base, &dev->interfaces);
+ dev->n_interfaces--;
+ mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_interface_destroy);
+
+static bool adf_overlay_engine_has_custom_formats(
+ const struct adf_overlay_engine_ops *ops)
+{
+ size_t i;
+ for (i = 0; i < ops->n_supported_formats; i++)
+ if (!adf_format_is_standard(ops->supported_formats[i]))
+ return true;
+ return false;
+}
+
+/**
+ * adf_overlay_engine_init - initialize ADF-internal data for an
+ * overlay engine and create sysfs entries
+ *
+ * @eng: the overlay engine
+ * @dev: the overlay engine's "parent" display device
+ * @ops: the overlay engine's associated ops
+ * @fmt: formatting string for the overlay engine's name
+ *
+ * @dev must have previously been initialized with adf_device_init().
+ *
+ * @fmt affects the name returned to userspace through the
+ * %ADF_GET_OVERLAY_ENGINE_DATA ioctl. It does not affect the sysfs filename,
+ * which is derived from @dev's name.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_overlay_engine_init(struct adf_overlay_engine *eng,
+ struct adf_device *dev,
+ const struct adf_overlay_engine_ops *ops, const char *fmt, ...)
+{
+ int ret;
+ va_list args;
+
+ if (!ops->supported_formats) {
+ pr_err("%s: overlay engine must support at least one format\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (ops->n_supported_formats > ADF_MAX_SUPPORTED_FORMATS) {
+ pr_err("%s: overlay engine supports too many formats\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (adf_overlay_engine_has_custom_formats(ops) &&
+ !dev->ops->validate_custom_format) {
+ pr_err("%s: overlay engine has custom formats but parent device %s does not implement validate_custom_format\n",
+ __func__, dev->base.name);
+ return -EINVAL;
+ }
+
+ memset(eng, 0, sizeof(*eng));
+
+ va_start(args, fmt);
+ ret = adf_obj_init(&eng->base, ADF_OBJ_OVERLAY_ENGINE,
+ &dev->overlay_engines, dev, &ops->base, fmt, args);
+ va_end(args);
+ if (ret < 0)
+ return ret;
+
+ eng->ops = ops;
+
+ ret = adf_overlay_engine_sysfs_init(eng);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ adf_obj_destroy(&eng->base, &dev->overlay_engines);
+ return ret;
+}
+EXPORT_SYMBOL(adf_overlay_engine_init);
+
+/**
+ * adf_interface_destroy - clean up ADF-internal data for an overlay engine
+ *
+ * @eng: the overlay engine
+ */
+void adf_overlay_engine_destroy(struct adf_overlay_engine *eng)
+{
+ struct adf_device *dev = adf_overlay_engine_parent(eng);
+ struct adf_attachment_list *entry, *next;
+
+ mutex_lock(&dev->client_lock);
+ list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+ if (entry->attachment.overlay_engine == eng) {
+ adf_attachment_free(entry);
+ dev->n_attach_allowed--;
+ }
+ }
+ list_for_each_entry_safe(entry, next, &dev->attached, head) {
+ if (entry->attachment.overlay_engine == eng) {
+ adf_device_detach_op(dev, eng,
+ entry->attachment.interface);
+ adf_attachment_free(entry);
+ dev->n_attached--;
+ }
+ }
+ adf_overlay_engine_sysfs_destroy(eng);
+ adf_obj_destroy(&eng->base, &dev->overlay_engines);
+ mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_overlay_engine_destroy);
+
+struct adf_attachment_list *adf_attachment_find(struct list_head *list,
+ struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+ struct adf_attachment_list *entry;
+ list_for_each_entry(entry, list, head) {
+ if (entry->attachment.interface == intf &&
+ entry->attachment.overlay_engine == eng)
+ return entry;
+ }
+ return NULL;
+}
+
+int adf_attachment_validate(struct adf_device *dev,
+ struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+ struct adf_device *intf_dev = adf_interface_parent(intf);
+ struct adf_device *eng_dev = adf_overlay_engine_parent(eng);
+
+ if (intf_dev != dev) {
+ dev_err(&dev->base.dev, "can't attach interface %s belonging to device %s\n",
+ intf->base.name, intf_dev->base.name);
+ return -EINVAL;
+ }
+
+ if (eng_dev != dev) {
+ dev_err(&dev->base.dev, "can't attach overlay engine %s belonging to device %s\n",
+ eng->base.name, eng_dev->base.name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * adf_attachment_allow - add a new entry to the list of allowed
+ * attachments
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * adf_attachment_allow() indicates that the underlying display hardware allows
+ * @intf to scan out @eng's output. It is intended to be called at
+ * driver initialization for each supported overlay engine + interface pair.
+ *
+ * Returns 0 on success, -%EALREADY if the entry already exists, or -errno on
+ * any other failure.
+ */
+int adf_attachment_allow(struct adf_device *dev,
+ struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+ int ret;
+ struct adf_attachment_list *entry = NULL;
+
+ ret = adf_attachment_validate(dev, eng, intf);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&dev->client_lock);
+
+ if (dev->n_attach_allowed == ADF_MAX_ATTACHMENTS) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ if (adf_attachment_find(&dev->attach_allowed, eng, intf)) {
+ ret = -EALREADY;
+ goto done;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ entry->attachment.interface = intf;
+ entry->attachment.overlay_engine = eng;
+ list_add_tail(&entry->head, &dev->attach_allowed);
+ dev->n_attach_allowed++;
+
+done:
+ mutex_unlock(&dev->client_lock);
+ if (ret < 0)
+ kfree(entry);
+
+ return ret;
+}
+
+/**
+ * adf_obj_type_str - string representation of an adf_obj_type
+ *
+ * @type: the object type
+ */
+const char *adf_obj_type_str(enum adf_obj_type type)
+{
+ switch (type) {
+ case ADF_OBJ_OVERLAY_ENGINE:
+ return "overlay engine";
+
+ case ADF_OBJ_INTERFACE:
+ return "interface";
+
+ case ADF_OBJ_DEVICE:
+ return "device";
+
+ default:
+ return "unknown";
+ }
+}
+EXPORT_SYMBOL(adf_obj_type_str);
+
+/**
+ * adf_interface_type_str - string representation of an adf_interface's type
+ *
+ * @intf: the interface
+ */
+const char *adf_interface_type_str(struct adf_interface *intf)
+{
+ switch (intf->type) {
+ case ADF_INTF_DSI:
+ return "DSI";
+
+ case ADF_INTF_eDP:
+ return "eDP";
+
+ case ADF_INTF_DPI:
+ return "DPI";
+
+ case ADF_INTF_VGA:
+ return "VGA";
+
+ case ADF_INTF_DVI:
+ return "DVI";
+
+ case ADF_INTF_HDMI:
+ return "HDMI";
+
+ case ADF_INTF_MEMORY:
+ return "memory";
+
+ default:
+ if (intf->type >= ADF_INTF_TYPE_DEVICE_CUSTOM) {
+ if (intf->ops && intf->ops->type_str)
+ return intf->ops->type_str(intf);
+ return "custom";
+ }
+ return "unknown";
+ }
+}
+EXPORT_SYMBOL(adf_interface_type_str);
+
+/**
+ * adf_event_type_str - string representation of an adf_event_type
+ *
+ * @obj: ADF object that produced the event
+ * @type: event type
+ */
+const char *adf_event_type_str(struct adf_obj *obj, enum adf_event_type type)
+{
+ switch (type) {
+ case ADF_EVENT_VSYNC:
+ return "vsync";
+
+ case ADF_EVENT_HOTPLUG:
+ return "hotplug";
+
+ default:
+ if (type >= ADF_EVENT_DEVICE_CUSTOM) {
+ if (obj->ops && obj->ops->event_type_str)
+ return obj->ops->event_type_str(obj, type);
+ return "custom";
+ }
+ return "unknown";
+ }
+}
+EXPORT_SYMBOL(adf_event_type_str);
+
+/**
+ * adf_format_str - string representation of an ADF/DRM fourcc format
+ *
+ * @format: format fourcc
+ * @buf: target buffer for the format's string representation
+ */
+void adf_format_str(u32 format, char buf[ADF_FORMAT_STR_SIZE])
+{
+ buf[0] = format & 0xFF;
+ buf[1] = (format >> 8) & 0xFF;
+ buf[2] = (format >> 16) & 0xFF;
+ buf[3] = (format >> 24) & 0xFF;
+ buf[4] = '\0';
+}
+EXPORT_SYMBOL(adf_format_str);
+
+/**
+ * adf_format_validate_yuv - validate the number and size of planes in buffers
+ * with a custom YUV format.
+ *
+ * @dev: ADF device performing the validation
+ * @buf: buffer to validate
+ * @num_planes: expected number of planes
+ * @hsub: expected horizontal chroma subsampling factor, in pixels
+ * @vsub: expected vertical chroma subsampling factor, in pixels
+ * @cpp: expected bytes per pixel for each plane (length @num_planes)
+ *
+ * adf_format_validate_yuv() is intended to be called as a helper from @dev's
+ * validate_custom_format() op.
+ *
+ * Returns 0 if @buf has the expected number of planes and each plane
+ * has sufficient size, or -EINVAL otherwise.
+ */
+int adf_format_validate_yuv(struct adf_device *dev, struct adf_buffer *buf,
+ u8 num_planes, u8 hsub, u8 vsub, u8 cpp[])
+{
+ u8 i;
+
+ if (num_planes != buf->n_planes) {
+ char format_str[ADF_FORMAT_STR_SIZE];
+ adf_format_str(buf->format, format_str);
+ dev_err(&dev->base.dev, "%u planes expected for format %s but %u planes provided\n",
+ num_planes, format_str, buf->n_planes);
+ return -EINVAL;
+ }
+
+ if (buf->w == 0 || buf->w % hsub) {
+ dev_err(&dev->base.dev, "bad buffer width %u\n", buf->w);
+ return -EINVAL;
+ }
+
+ if (buf->h == 0 || buf->h % vsub) {
+ dev_err(&dev->base.dev, "bad buffer height %u\n", buf->h);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_planes; i++) {
+ u32 width = buf->w / (i != 0 ? hsub : 1);
+ u32 height = buf->h / (i != 0 ? vsub : 1);
+ u8 cpp = adf_format_plane_cpp(buf->format, i);
+
+ if (buf->pitch[i] < (u64) width * cpp) {
+ dev_err(&dev->base.dev, "plane %u pitch is shorter than buffer width (pitch = %u, width = %u, bpp = %u)\n",
+ i, buf->pitch[i], width, cpp * 8);
+ return -EINVAL;
+ }
+
+ if ((u64) height * buf->pitch[i] + buf->offset[i] >
+ buf->dma_bufs[i]->size) {
+ dev_err(&dev->base.dev, "plane %u buffer too small (height = %u, pitch = %u, offset = %u, size = %zu)\n",
+ i, height, buf->pitch[i],
+ buf->offset[i], buf->dma_bufs[i]->size);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(adf_format_validate_yuv);
+
+/**
+ * adf_modeinfo_set_name - sets the name of a mode from its display resolution
+ *
+ * @mode: mode
+ *
+ * adf_modeinfo_set_name() fills in @mode->name in the format
+ * "[hdisplay]x[vdisplay](i)". It is intended to help drivers create
+ * ADF/DRM-style modelists from other mode formats.
+ */
+void adf_modeinfo_set_name(struct drm_mode_modeinfo *mode)
+{
+ bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+ mode->hdisplay, mode->vdisplay,
+ interlaced ? "i" : "");
+}
+EXPORT_SYMBOL(adf_modeinfo_set_name);
+
+/**
+ * adf_modeinfo_set_vrefresh - sets the vrefresh of a mode from its other
+ * timing data
+ *
+ * @mode: mode
+ *
+ * adf_modeinfo_set_vrefresh() calculates @mode->vrefresh from
+ * @mode->{h,v}display and @mode->flags. It is intended to help drivers
+ * create ADF/DRM-style modelists from other mode formats.
+ */
+void adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo *mode)
+{
+ int refresh = 0;
+ unsigned int calc_val;
+
+ if (mode->vrefresh > 0)
+ return;
+
+ if (mode->htotal <= 0 || mode->vtotal <= 0)
+ return;
+
+ /* work out vrefresh the value will be x1000 */
+ calc_val = (mode->clock * 1000);
+ calc_val /= mode->htotal;
+ refresh = (calc_val + mode->vtotal / 2) / mode->vtotal;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ refresh *= 2;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ refresh /= 2;
+ if (mode->vscan > 1)
+ refresh /= mode->vscan;
+
+ mode->vrefresh = refresh;
+}
+EXPORT_SYMBOL(adf_modeinfo_set_vrefresh);
+
+static int __init adf_init(void)
+{
+ int err;
+
+ err = adf_sysfs_init();
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void __exit adf_exit(void)
+{
+ adf_sysfs_destroy();
+}
+
+module_init(adf_init);
+module_exit(adf_exit);
diff --git a/drivers/video/adf/adf.h b/drivers/video/adf/adf.h
new file mode 100644
index 000000000000..3bcf1fabc23c
--- /dev/null
+++ b/drivers/video/adf/adf.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_H
+#define __VIDEO_ADF_ADF_H
+
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <video/adf.h>
+#include "sync.h"
+
+struct adf_event_refcount {
+ struct rb_node node;
+ enum adf_event_type type;
+ int refcount;
+};
+
+void adf_buffer_cleanup(struct adf_buffer *buf);
+void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
+ struct adf_buffer *buf);
+void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post);
+
+struct adf_attachment_list *adf_attachment_find(struct list_head *list,
+ struct adf_overlay_engine *eng, struct adf_interface *intf);
+int adf_attachment_validate(struct adf_device *dev,
+ struct adf_overlay_engine *eng, struct adf_interface *intf);
+void adf_attachment_free(struct adf_attachment_list *attachment);
+
+struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
+ enum adf_event_type type);
+
+static inline int adf_obj_check_supports_event(struct adf_obj *obj,
+ enum adf_event_type type)
+{
+ if (!obj->ops || !obj->ops->supports_event)
+ return -EOPNOTSUPP;
+ if (!obj->ops->supports_event(obj, type))
+ return -EINVAL;
+ return 0;
+}
+
+static inline int adf_device_attach_op(struct adf_device *dev,
+ struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+ if (!dev->ops->attach)
+ return 0;
+
+ return dev->ops->attach(dev, eng, intf);
+}
+
+static inline int adf_device_detach_op(struct adf_device *dev,
+ struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+ if (!dev->ops->detach)
+ return 0;
+
+ return dev->ops->detach(dev, eng, intf);
+}
+
+#endif /* __VIDEO_ADF_ADF_H */
diff --git a/drivers/video/adf/adf_client.c b/drivers/video/adf/adf_client.c
new file mode 100644
index 000000000000..bba873d34bbb
--- /dev/null
+++ b/drivers/video/adf/adf_client.c
@@ -0,0 +1,810 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "sw_sync.h"
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+#include <video/adf_format.h>
+
+#include "adf.h"
+
+static inline bool vsync_active(u8 state)
+{
+ return state == DRM_MODE_DPMS_ON || state == DRM_MODE_DPMS_STANDBY;
+}
+
+/**
+ * adf_interface_blank - set interface's DPMS state
+ *
+ * @intf: the interface
+ * @state: one of %DRM_MODE_DPMS_*
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_blank(struct adf_interface *intf, u8 state)
+{
+ struct adf_device *dev = adf_interface_parent(intf);
+ u8 prev_state;
+ bool disable_vsync;
+ bool enable_vsync;
+ int ret = 0;
+ struct adf_event_refcount *vsync_refcount;
+
+ if (!intf->ops || !intf->ops->blank)
+ return -EOPNOTSUPP;
+
+ if (state > DRM_MODE_DPMS_OFF)
+ return -EINVAL;
+
+ mutex_lock(&dev->client_lock);
+ if (state != DRM_MODE_DPMS_ON)
+ flush_kthread_worker(&dev->post_worker);
+ mutex_lock(&intf->base.event_lock);
+
+ vsync_refcount = adf_obj_find_event_refcount(&intf->base,
+ ADF_EVENT_VSYNC);
+ if (!vsync_refcount) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ prev_state = intf->dpms_state;
+ if (prev_state == state) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ disable_vsync = vsync_active(prev_state) &&
+ !vsync_active(state) &&
+ vsync_refcount->refcount;
+ enable_vsync = !vsync_active(prev_state) &&
+ vsync_active(state) &&
+ vsync_refcount->refcount;
+
+ if (disable_vsync)
+ intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+ false);
+
+ ret = intf->ops->blank(intf, state);
+ if (ret < 0) {
+ if (disable_vsync)
+ intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+ true);
+ goto done;
+ }
+
+ if (enable_vsync)
+ intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+ true);
+
+ intf->dpms_state = state;
+done:
+ mutex_unlock(&intf->base.event_lock);
+ mutex_unlock(&dev->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(adf_interface_blank);
+
+/**
+ * adf_interface_blank - get interface's current DPMS state
+ *
+ * @intf: the interface
+ *
+ * Returns one of %DRM_MODE_DPMS_*.
+ */
+u8 adf_interface_dpms_state(struct adf_interface *intf)
+{
+ struct adf_device *dev = adf_interface_parent(intf);
+ u8 dpms_state;
+
+ mutex_lock(&dev->client_lock);
+ dpms_state = intf->dpms_state;
+ mutex_unlock(&dev->client_lock);
+
+ return dpms_state;
+}
+EXPORT_SYMBOL(adf_interface_dpms_state);
+
+/**
+ * adf_interface_current_mode - get interface's current display mode
+ *
+ * @intf: the interface
+ * @mode: returns the current mode
+ */
+void adf_interface_current_mode(struct adf_interface *intf,
+ struct drm_mode_modeinfo *mode)
+{
+ struct adf_device *dev = adf_interface_parent(intf);
+
+ mutex_lock(&dev->client_lock);
+ memcpy(mode, &intf->current_mode, sizeof(*mode));
+ mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_interface_current_mode);
+
+/**
+ * adf_interface_modelist - get interface's modelist
+ *
+ * @intf: the interface
+ * @modelist: storage for the modelist (optional)
+ * @n_modes: length of @modelist
+ *
+ * If @modelist is not NULL, adf_interface_modelist() will copy up to @n_modes
+ * modelist entries into @modelist.
+ *
+ * Returns the length of the modelist.
+ */
+size_t adf_interface_modelist(struct adf_interface *intf,
+ struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+ unsigned long flags;
+ size_t retval;
+
+ read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+ if (modelist)
+ memcpy(modelist, intf->modelist, sizeof(modelist[0]) *
+ min(n_modes, intf->n_modes));
+ retval = intf->n_modes;
+ read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL(adf_interface_modelist);
+
+/**
+ * adf_interface_set_mode - set interface's display mode
+ *
+ * @intf: the interface
+ * @mode: the new mode
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_set_mode(struct adf_interface *intf,
+ struct drm_mode_modeinfo *mode)
+{
+ struct adf_device *dev = adf_interface_parent(intf);
+ int ret = 0;
+
+ if (!intf->ops || !intf->ops->modeset)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&dev->client_lock);
+ flush_kthread_worker(&dev->post_worker);
+
+ ret = intf->ops->modeset(intf, mode);
+ if (ret < 0)
+ goto done;
+
+ memcpy(&intf->current_mode, mode, sizeof(*mode));
+done:
+ mutex_unlock(&dev->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(adf_interface_set_mode);
+
+/**
+ * adf_interface_screen_size - get size of screen connected to interface
+ *
+ * @intf: the interface
+ * @width_mm: returns the screen width in mm
+ * @height_mm: returns the screen width in mm
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width_mm,
+ u16 *height_mm)
+{
+ struct adf_device *dev = adf_interface_parent(intf);
+ int ret;
+
+ if (!intf->ops || !intf->ops->screen_size)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&dev->client_lock);
+ ret = intf->ops->screen_size(intf, width_mm, height_mm);
+ mutex_unlock(&dev->client_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(adf_interface_get_screen_size);
+
+/**
+ * adf_overlay_engine_supports_format - returns whether a format is in an
+ * overlay engine's supported list
+ *
+ * @eng: the overlay engine
+ * @format: format fourcc
+ */
+bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng,
+ u32 format)
+{
+ size_t i;
+ for (i = 0; i < eng->ops->n_supported_formats; i++)
+ if (format == eng->ops->supported_formats[i])
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(adf_overlay_engine_supports_format);
+
+static int adf_buffer_validate(struct adf_buffer *buf)
+{
+ struct adf_overlay_engine *eng = buf->overlay_engine;
+ struct device *dev = &eng->base.dev;
+ struct adf_device *parent = adf_overlay_engine_parent(eng);
+ u8 hsub, vsub, num_planes, cpp[ADF_MAX_PLANES], i;
+
+ if (!adf_overlay_engine_supports_format(eng, buf->format)) {
+ char format_str[ADF_FORMAT_STR_SIZE];
+ adf_format_str(buf->format, format_str);
+ dev_err(dev, "unsupported format %s\n", format_str);
+ return -EINVAL;
+ }
+
+ if (!adf_format_is_standard(buf->format))
+ return parent->ops->validate_custom_format(parent, buf);
+
+ hsub = adf_format_horz_chroma_subsampling(buf->format);
+ vsub = adf_format_vert_chroma_subsampling(buf->format);
+ num_planes = adf_format_num_planes(buf->format);
+ for (i = 0; i < num_planes; i++)
+ cpp[i] = adf_format_plane_cpp(buf->format, i);
+
+ return adf_format_validate_yuv(parent, buf, num_planes, hsub, vsub,
+ cpp);
+}
+
+static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf,
+ struct adf_buffer_mapping *mapping)
+{
+ int ret = 0;
+ size_t i;
+
+ for (i = 0; i < buf->n_planes; i++) {
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sg_table;
+
+ attachment = dma_buf_attach(buf->dma_bufs[i], dev->dev);
+ if (IS_ERR(attachment)) {
+ ret = PTR_ERR(attachment);
+ dev_err(&dev->base.dev, "attaching plane %u failed: %d\n",
+ i, ret);
+ goto done;
+ }
+ mapping->attachments[i] = attachment;
+
+ sg_table = dma_buf_map_attachment(attachment, DMA_TO_DEVICE);
+ if (IS_ERR(sg_table)) {
+ ret = PTR_ERR(sg_table);
+ dev_err(&dev->base.dev, "mapping plane %u failed: %d",
+ i, ret);
+ goto done;
+ } else if (!sg_table) {
+ ret = -ENOMEM;
+ dev_err(&dev->base.dev, "mapping plane %u failed\n", i);
+ goto done;
+ }
+ mapping->sg_tables[i] = sg_table;
+ }
+
+done:
+ if (ret < 0)
+ adf_buffer_mapping_cleanup(mapping, buf);
+
+ return ret;
+}
+
+static struct sync_fence *adf_sw_complete_fence(struct adf_device *dev)
+{
+ struct sync_pt *pt;
+ struct sync_fence *complete_fence;
+
+ if (!dev->timeline) {
+ dev->timeline = sw_sync_timeline_create(dev->base.name);
+ if (!dev->timeline)
+ return ERR_PTR(-ENOMEM);
+ dev->timeline_max = 1;
+ }
+
+ dev->timeline_max++;
+ pt = sw_sync_pt_create(dev->timeline, dev->timeline_max);
+ if (!pt)
+ goto err_pt_create;
+ complete_fence = sync_fence_create(dev->base.name, pt);
+ if (!complete_fence)
+ goto err_fence_create;
+
+ return complete_fence;
+
+err_fence_create:
+ sync_pt_free(pt);
+err_pt_create:
+ dev->timeline_max--;
+ return ERR_PTR(-ENOSYS);
+}
+
+/**
+ * adf_device_post - flip to a new set of buffers
+ *
+ * @dev: device targeted by the flip
+ * @intfs: interfaces targeted by the flip
+ * @n_intfs: number of targeted interfaces
+ * @bufs: description of buffers displayed
+ * @n_bufs: number of buffers displayed
+ * @custom_data: driver-private data
+ * @custom_data_size: size of driver-private data
+ *
+ * adf_device_post() will copy @intfs, @bufs, and @custom_data, so they may
+ * point to variables on the stack. adf_device_post() also takes its own
+ * reference on each of the dma-bufs in @bufs. The adf_device_post_nocopy()
+ * variant transfers ownership of these resources to ADF instead.
+ *
+ * On success, returns a sync fence which signals when the buffers are removed
+ * from the screen. On failure, returns ERR_PTR(-errno).
+ */
+struct sync_fence *adf_device_post(struct adf_device *dev,
+ struct adf_interface **intfs, size_t n_intfs,
+ struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
+ size_t custom_data_size)
+{
+ struct adf_interface **intfs_copy = NULL;
+ struct adf_buffer *bufs_copy = NULL;
+ void *custom_data_copy = NULL;
+ struct sync_fence *ret;
+ size_t i;
+
+ intfs_copy = kzalloc(sizeof(intfs_copy[0]) * n_intfs, GFP_KERNEL);
+ if (!intfs_copy)
+ return ERR_PTR(-ENOMEM);
+
+ bufs_copy = kzalloc(sizeof(bufs_copy[0]) * n_bufs, GFP_KERNEL);
+ if (!bufs_copy) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_alloc;
+ }
+
+ custom_data_copy = kzalloc(custom_data_size, GFP_KERNEL);
+ if (!custom_data_copy) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_alloc;
+ }
+
+ for (i = 0; i < n_bufs; i++) {
+ size_t j;
+ for (j = 0; j < bufs[i].n_planes; j++)
+ get_dma_buf(bufs[i].dma_bufs[j]);
+ }
+
+ memcpy(intfs_copy, intfs, sizeof(intfs_copy[0]) * n_intfs);
+ memcpy(bufs_copy, bufs, sizeof(bufs_copy[0]) * n_bufs);
+ memcpy(custom_data_copy, custom_data, custom_data_size);
+
+ ret = adf_device_post_nocopy(dev, intfs_copy, n_intfs, bufs_copy,
+ n_bufs, custom_data_copy, custom_data_size);
+ if (IS_ERR(ret))
+ goto err_post;
+
+ return ret;
+
+err_post:
+ for (i = 0; i < n_bufs; i++) {
+ size_t j;
+ for (j = 0; j < bufs[i].n_planes; j++)
+ dma_buf_put(bufs[i].dma_bufs[j]);
+ }
+err_alloc:
+ kfree(custom_data_copy);
+ kfree(bufs_copy);
+ kfree(intfs_copy);
+ return ret;
+}
+EXPORT_SYMBOL(adf_device_post);
+
+/**
+ * adf_device_post_nocopy - flip to a new set of buffers
+ *
+ * adf_device_post_nocopy() has the same behavior as adf_device_post(),
+ * except ADF does not copy @intfs, @bufs, or @custom_data, and it does
+ * not take an extra reference on the dma-bufs in @bufs.
+ *
+ * @intfs, @bufs, and @custom_data must point to buffers allocated by
+ * kmalloc(). On success, ADF takes ownership of these buffers and the dma-bufs
+ * in @bufs, and will kfree()/dma_buf_put() them when they are no longer needed.
+ * On failure, adf_device_post_nocopy() does NOT take ownership of these
+ * buffers or the dma-bufs, and the caller must clean them up.
+ *
+ * adf_device_post_nocopy() is mainly intended for implementing ADF's ioctls.
+ * Clients may find the nocopy variant useful in limited cases, but most should
+ * call adf_device_post() instead.
+ */
+struct sync_fence *adf_device_post_nocopy(struct adf_device *dev,
+ struct adf_interface **intfs, size_t n_intfs,
+ struct adf_buffer *bufs, size_t n_bufs,
+ void *custom_data, size_t custom_data_size)
+{
+ struct adf_pending_post *cfg;
+ struct adf_buffer_mapping *mappings;
+ struct sync_fence *ret;
+ size_t i;
+ int err;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return ERR_PTR(-ENOMEM);
+
+ mappings = kzalloc(sizeof(mappings[0]) * n_bufs, GFP_KERNEL);
+ if (!mappings) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_alloc;
+ }
+
+ mutex_lock(&dev->client_lock);
+
+ for (i = 0; i < n_bufs; i++) {
+ err = adf_buffer_validate(&bufs[i]);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ goto err_buf;
+ }
+
+ err = adf_buffer_map(dev, &bufs[i], &mappings[i]);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ goto err_buf;
+ }
+ }
+
+ INIT_LIST_HEAD(&cfg->head);
+ cfg->config.n_bufs = n_bufs;
+ cfg->config.bufs = bufs;
+ cfg->config.mappings = mappings;
+ cfg->config.custom_data = custom_data;
+ cfg->config.custom_data_size = custom_data_size;
+
+ err = dev->ops->validate(dev, &cfg->config, &cfg->state);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ goto err_buf;
+ }
+
+ mutex_lock(&dev->post_lock);
+
+ if (dev->ops->complete_fence)
+ ret = dev->ops->complete_fence(dev, &cfg->config,
+ cfg->state);
+ else
+ ret = adf_sw_complete_fence(dev);
+
+ if (IS_ERR(ret))
+ goto err_fence;
+
+ list_add_tail(&cfg->head, &dev->post_list);
+ queue_kthread_work(&dev->post_worker, &dev->post_work);
+ mutex_unlock(&dev->post_lock);
+ mutex_unlock(&dev->client_lock);
+ kfree(intfs);
+ return ret;
+
+err_fence:
+ mutex_unlock(&dev->post_lock);
+
+err_buf:
+ for (i = 0; i < n_bufs; i++)
+ adf_buffer_mapping_cleanup(&mappings[i], &bufs[i]);
+
+ mutex_unlock(&dev->client_lock);
+ kfree(mappings);
+
+err_alloc:
+ kfree(cfg);
+ return ret;
+}
+EXPORT_SYMBOL(adf_device_post_nocopy);
+
+static void adf_attachment_list_to_array(struct adf_device *dev,
+ struct list_head *src, struct adf_attachment *dst, size_t size)
+{
+ struct adf_attachment_list *entry;
+ size_t i = 0;
+
+ if (!dst)
+ return;
+
+ list_for_each_entry(entry, src, head) {
+ if (i == size)
+ return;
+ dst[i] = entry->attachment;
+ i++;
+ }
+}
+
+/**
+ * adf_device_attachments - get device's list of active attachments
+ *
+ * @dev: the device
+ * @attachments: storage for the attachment list (optional)
+ * @n_attachments: length of @attachments
+ *
+ * If @attachments is not NULL, adf_device_attachments() will copy up to
+ * @n_attachments entries into @attachments.
+ *
+ * Returns the length of the active attachment list.
+ */
+size_t adf_device_attachments(struct adf_device *dev,
+ struct adf_attachment *attachments, size_t n_attachments)
+{
+ size_t retval;
+
+ mutex_lock(&dev->client_lock);
+ adf_attachment_list_to_array(dev, &dev->attached, attachments,
+ n_attachments);
+ retval = dev->n_attached;
+ mutex_unlock(&dev->client_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(adf_device_attachments);
+
+/**
+ * adf_device_attachments_allowed - get device's list of allowed attachments
+ *
+ * @dev: the device
+ * @attachments: storage for the attachment list (optional)
+ * @n_attachments: length of @attachments
+ *
+ * If @attachments is not NULL, adf_device_attachments_allowed() will copy up to
+ * @n_attachments entries into @attachments.
+ *
+ * Returns the length of the allowed attachment list.
+ */
+size_t adf_device_attachments_allowed(struct adf_device *dev,
+ struct adf_attachment *attachments, size_t n_attachments)
+{
+ size_t retval;
+
+ mutex_lock(&dev->client_lock);
+ adf_attachment_list_to_array(dev, &dev->attach_allowed, attachments,
+ n_attachments);
+ retval = dev->n_attach_allowed;
+ mutex_unlock(&dev->client_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(adf_device_attachments_allowed);
+
+/**
+ * adf_device_attached - return whether an overlay engine and interface are
+ * attached
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ */
+bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng,
+ struct adf_interface *intf)
+{
+ struct adf_attachment_list *attachment;
+
+ mutex_lock(&dev->client_lock);
+ attachment = adf_attachment_find(&dev->attached, eng, intf);
+ mutex_unlock(&dev->client_lock);
+
+ return attachment != NULL;
+}
+EXPORT_SYMBOL(adf_device_attached);
+
+/**
+ * adf_device_attach_allowed - return whether the ADF device supports attaching
+ * an overlay engine and interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ */
+bool adf_device_attach_allowed(struct adf_device *dev,
+ struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+ struct adf_attachment_list *attachment;
+
+ mutex_lock(&dev->client_lock);
+ attachment = adf_attachment_find(&dev->attach_allowed, eng, intf);
+ mutex_unlock(&dev->client_lock);
+
+ return attachment != NULL;
+}
+EXPORT_SYMBOL(adf_device_attach_allowed);
+/**
+ * adf_device_attach - attach an overlay engine to an interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * Returns 0 on success, -%EINVAL if attaching @intf and @eng is not allowed,
+ * -%EALREADY if @intf and @eng are already attached, or -errno on any other
+ * failure.
+ */
+int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng,
+ struct adf_interface *intf)
+{
+ int ret;
+ struct adf_attachment_list *attachment = NULL;
+
+ ret = adf_attachment_validate(dev, eng, intf);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&dev->client_lock);
+
+ if (dev->n_attached == ADF_MAX_ATTACHMENTS) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ if (!adf_attachment_find(&dev->attach_allowed, eng, intf)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (adf_attachment_find(&dev->attached, eng, intf)) {
+ ret = -EALREADY;
+ goto done;
+ }
+
+ ret = adf_device_attach_op(dev, eng, intf);
+ if (ret < 0)
+ goto done;
+
+ attachment = kzalloc(sizeof(*attachment), GFP_KERNEL);
+ if (!attachment) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ attachment->attachment.interface = intf;
+ attachment->attachment.overlay_engine = eng;
+ list_add_tail(&attachment->head, &dev->attached);
+ dev->n_attached++;
+
+done:
+ mutex_unlock(&dev->client_lock);
+ if (ret < 0)
+ kfree(attachment);
+
+ return ret;
+}
+EXPORT_SYMBOL(adf_device_attach);
+
+/**
+ * adf_device_detach - detach an overlay engine from an interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * Returns 0 on success, -%EINVAL if @intf and @eng are not attached,
+ * or -errno on any other failure.
+ */
+int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng,
+ struct adf_interface *intf)
+{
+ int ret;
+ struct adf_attachment_list *attachment;
+
+ ret = adf_attachment_validate(dev, eng, intf);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&dev->client_lock);
+
+ attachment = adf_attachment_find(&dev->attached, eng, intf);
+ if (!attachment) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = adf_device_detach_op(dev, eng, intf);
+ if (ret < 0)
+ goto done;
+
+ adf_attachment_free(attachment);
+ dev->n_attached--;
+done:
+ mutex_unlock(&dev->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(adf_device_detach);
+
+/**
+ * adf_interface_simple_buffer_alloc - allocate a simple buffer
+ *
+ * @intf: target interface
+ * @w: width in pixels
+ * @h: height in pixels
+ * @format: format fourcc
+ * @dma_buf: returns the allocated buffer
+ * @offset: returns the byte offset of the allocated buffer's first pixel
+ * @pitch: returns the allocated buffer's pitch
+ *
+ * See &struct adf_simple_buffer_alloc for a description of simple buffers and
+ * their limitations.
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h,
+ u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch)
+{
+ if (!intf->ops || !intf->ops->alloc_simple_buffer)
+ return -EOPNOTSUPP;
+
+ if (!adf_format_is_rgb(format))
+ return -EINVAL;
+
+ return intf->ops->alloc_simple_buffer(intf, w, h, format, dma_buf,
+ offset, pitch);
+}
+EXPORT_SYMBOL(adf_interface_simple_buffer_alloc);
+
+/**
+ * adf_interface_simple_post - flip to a single buffer
+ *
+ * @intf: interface targeted by the flip
+ * @buf: buffer to display
+ *
+ * adf_interface_simple_post() can be used generically for simple display
+ * configurations, since the client does not need to provide any driver-private
+ * configuration data.
+ *
+ * adf_interface_simple_post() has the same copying semantics as
+ * adf_device_post().
+ *
+ * On success, returns a sync fence which signals when the buffer is removed
+ * from the screen. On failure, returns ERR_PTR(-errno).
+ */
+struct sync_fence *adf_interface_simple_post(struct adf_interface *intf,
+ struct adf_buffer *buf)
+{
+ size_t custom_data_size = 0;
+ void *custom_data = NULL;
+ struct sync_fence *ret;
+
+ if (intf->ops && intf->ops->describe_simple_post) {
+ int err;
+
+ custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
+ if (!custom_data) {
+ ret = ERR_PTR(-ENOMEM);
+ goto done;
+ }
+
+ err = intf->ops->describe_simple_post(intf, buf, custom_data,
+ &custom_data_size);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ goto done;
+ }
+ }
+
+ ret = adf_device_post(adf_interface_parent(intf), &intf, 1, buf, 1,
+ custom_data, custom_data_size);
+done:
+ kfree(custom_data);
+ return ret;
+}
+EXPORT_SYMBOL(adf_interface_simple_post);
diff --git a/drivers/video/adf/adf_fbdev.c b/drivers/video/adf/adf_fbdev.c
new file mode 100644
index 000000000000..cac34d14cbc2
--- /dev/null
+++ b/drivers/video/adf/adf_fbdev.c
@@ -0,0 +1,651 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/vmalloc.h>
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+#include <video/adf_fbdev.h>
+#include <video/adf_format.h>
+
+#include "adf.h"
+
+struct adf_fbdev_format {
+ u32 fourcc;
+ u32 bpp;
+ u32 r_length;
+ u32 g_length;
+ u32 b_length;
+ u32 a_length;
+ u32 r_offset;
+ u32 g_offset;
+ u32 b_offset;
+ u32 a_offset;
+};
+
+static const struct adf_fbdev_format format_table[] = {
+ {DRM_FORMAT_RGB332, 8, 3, 3, 2, 0, 5, 2, 0, 0},
+ {DRM_FORMAT_BGR233, 8, 3, 3, 2, 0, 0, 3, 5, 0},
+
+ {DRM_FORMAT_XRGB4444, 16, 4, 4, 4, 0, 8, 4, 0, 0},
+ {DRM_FORMAT_XBGR4444, 16, 4, 4, 4, 0, 0, 4, 8, 0},
+ {DRM_FORMAT_RGBX4444, 16, 4, 4, 4, 0, 12, 8, 4, 0},
+ {DRM_FORMAT_BGRX4444, 16, 4, 4, 4, 0, 0, 4, 8, 0},
+
+ {DRM_FORMAT_ARGB4444, 16, 4, 4, 4, 4, 8, 4, 0, 12},
+ {DRM_FORMAT_ABGR4444, 16, 4, 4, 4, 4, 0, 4, 8, 12},
+ {DRM_FORMAT_RGBA4444, 16, 4, 4, 4, 4, 12, 8, 4, 0},
+ {DRM_FORMAT_BGRA4444, 16, 4, 4, 4, 4, 0, 4, 8, 0},
+
+ {DRM_FORMAT_XRGB1555, 16, 5, 5, 5, 0, 10, 5, 0, 0},
+ {DRM_FORMAT_XBGR1555, 16, 5, 5, 5, 0, 0, 5, 10, 0},
+ {DRM_FORMAT_RGBX5551, 16, 5, 5, 5, 0, 11, 6, 1, 0},
+ {DRM_FORMAT_BGRX5551, 16, 5, 5, 5, 0, 1, 6, 11, 0},
+
+ {DRM_FORMAT_ARGB1555, 16, 5, 5, 5, 1, 10, 5, 0, 15},
+ {DRM_FORMAT_ABGR1555, 16, 5, 5, 5, 1, 0, 5, 10, 15},
+ {DRM_FORMAT_RGBA5551, 16, 5, 5, 5, 1, 11, 6, 1, 0},
+ {DRM_FORMAT_BGRA5551, 16, 5, 5, 5, 1, 1, 6, 11, 0},
+
+ {DRM_FORMAT_RGB565, 16, 5, 6, 5, 0, 11, 5, 0, 0},
+ {DRM_FORMAT_BGR565, 16, 5, 6, 5, 0, 0, 5, 11, 0},
+
+ {DRM_FORMAT_RGB888, 24, 8, 8, 8, 0, 16, 8, 0, 0},
+ {DRM_FORMAT_BGR888, 24, 8, 8, 8, 0, 0, 8, 16, 0},
+
+ {DRM_FORMAT_XRGB8888, 32, 8, 8, 8, 0, 16, 8, 0, 0},
+ {DRM_FORMAT_XBGR8888, 32, 8, 8, 8, 0, 0, 8, 16, 0},
+ {DRM_FORMAT_RGBX8888, 32, 8, 8, 8, 0, 24, 16, 8, 0},
+ {DRM_FORMAT_BGRX8888, 32, 8, 8, 8, 0, 8, 16, 24, 0},
+
+ {DRM_FORMAT_ARGB8888, 32, 8, 8, 8, 8, 16, 8, 0, 24},
+ {DRM_FORMAT_ABGR8888, 32, 8, 8, 8, 8, 0, 8, 16, 24},
+ {DRM_FORMAT_RGBA8888, 32, 8, 8, 8, 8, 24, 16, 8, 0},
+ {DRM_FORMAT_BGRA8888, 32, 8, 8, 8, 8, 8, 16, 24, 0},
+
+ {DRM_FORMAT_XRGB2101010, 32, 10, 10, 10, 0, 20, 10, 0, 0},
+ {DRM_FORMAT_XBGR2101010, 32, 10, 10, 10, 0, 0, 10, 20, 0},
+ {DRM_FORMAT_RGBX1010102, 32, 10, 10, 10, 0, 22, 12, 2, 0},
+ {DRM_FORMAT_BGRX1010102, 32, 10, 10, 10, 0, 2, 12, 22, 0},
+
+ {DRM_FORMAT_ARGB2101010, 32, 10, 10, 10, 2, 20, 10, 0, 30},
+ {DRM_FORMAT_ABGR2101010, 32, 10, 10, 10, 2, 0, 10, 20, 30},
+ {DRM_FORMAT_RGBA1010102, 32, 10, 10, 10, 2, 22, 12, 2, 0},
+ {DRM_FORMAT_BGRA1010102, 32, 10, 10, 10, 2, 2, 12, 22, 0},
+};
+
+static u32 drm_fourcc_from_fb_var(struct fb_var_screeninfo *var)
+{
+ size_t i;
+ for (i = 0; i < ARRAY_SIZE(format_table); i++) {
+ const struct adf_fbdev_format *f = &format_table[i];
+ if (var->red.length == f->r_length &&
+ var->red.offset == f->r_offset &&
+ var->green.length == f->g_length &&
+ var->green.offset == f->g_offset &&
+ var->blue.length == f->b_length &&
+ var->blue.offset == f->b_offset &&
+ var->transp.length == f->a_length &&
+ (var->transp.length == 0 ||
+ var->transp.offset == f->a_offset))
+ return f->fourcc;
+ }
+
+ return 0;
+}
+
+static const struct adf_fbdev_format *fbdev_format_info(u32 format)
+{
+ size_t i;
+ for (i = 0; i < ARRAY_SIZE(format_table); i++) {
+ const struct adf_fbdev_format *f = &format_table[i];
+ if (f->fourcc == format)
+ return f;
+ }
+
+ BUG();
+}
+
+void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
+ struct fb_videomode *vmode)
+{
+ memset(vmode, 0, sizeof(*vmode));
+
+ vmode->refresh = mode->vrefresh;
+
+ vmode->xres = mode->hdisplay;
+ vmode->yres = mode->vdisplay;
+
+ vmode->pixclock = mode->clock ? KHZ2PICOS(mode->clock) : 0;
+ vmode->left_margin = mode->htotal - mode->hsync_end;
+ vmode->right_margin = mode->hsync_start - mode->hdisplay;
+ vmode->upper_margin = mode->vtotal - mode->vsync_end;
+ vmode->lower_margin = mode->vsync_start - mode->vdisplay;
+ vmode->hsync_len = mode->hsync_end - mode->hsync_start;
+ vmode->vsync_len = mode->vsync_end - mode->vsync_start;
+
+ vmode->sync = 0;
+ if (mode->flags | DRM_MODE_FLAG_PHSYNC)
+ vmode->sync |= FB_SYNC_HOR_HIGH_ACT;
+ if (mode->flags | DRM_MODE_FLAG_PVSYNC)
+ vmode->sync |= FB_SYNC_VERT_HIGH_ACT;
+ if (mode->flags | DRM_MODE_FLAG_PCSYNC)
+ vmode->sync |= FB_SYNC_COMP_HIGH_ACT;
+ if (mode->flags | DRM_MODE_FLAG_BCAST)
+ vmode->sync |= FB_SYNC_BROADCAST;
+
+ vmode->vmode = 0;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vmode->vmode |= FB_VMODE_INTERLACED;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ vmode->vmode |= FB_VMODE_DOUBLE;
+}
+EXPORT_SYMBOL(adf_modeinfo_to_fb_videomode);
+
+void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
+ struct drm_mode_modeinfo *mode)
+{
+ memset(mode, 0, sizeof(*mode));
+
+ mode->hdisplay = vmode->xres;
+ mode->hsync_start = mode->hdisplay + vmode->right_margin;
+ mode->hsync_end = mode->hsync_start + vmode->hsync_len;
+ mode->htotal = mode->hsync_end + vmode->left_margin;
+
+ mode->vdisplay = vmode->yres;
+ mode->vsync_start = mode->vdisplay + vmode->lower_margin;
+ mode->vsync_end = mode->vsync_start + vmode->vsync_len;
+ mode->vtotal = mode->vsync_end + vmode->upper_margin;
+
+ mode->clock = vmode->pixclock ? PICOS2KHZ(vmode->pixclock) : 0;
+
+ mode->flags = 0;
+ if (vmode->sync & FB_SYNC_HOR_HIGH_ACT)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (vmode->sync & FB_SYNC_VERT_HIGH_ACT)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ if (vmode->sync & FB_SYNC_COMP_HIGH_ACT)
+ mode->flags |= DRM_MODE_FLAG_PCSYNC;
+ if (vmode->sync & FB_SYNC_BROADCAST)
+ mode->flags |= DRM_MODE_FLAG_BCAST;
+ if (vmode->vmode & FB_VMODE_INTERLACED)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (vmode->vmode & FB_VMODE_DOUBLE)
+ mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+ if (vmode->refresh)
+ mode->vrefresh = vmode->refresh;
+ else
+ adf_modeinfo_set_vrefresh(mode);
+
+ if (vmode->name)
+ strlcpy(mode->name, vmode->name, sizeof(mode->name));
+ else
+ adf_modeinfo_set_name(mode);
+}
+EXPORT_SYMBOL(adf_modeinfo_from_fb_videomode);
+
+static int adf_fbdev_post(struct adf_fbdev *fbdev)
+{
+ struct adf_buffer buf;
+ struct sync_fence *complete_fence;
+ int ret = 0;
+
+ memset(&buf, 0, sizeof(buf));
+ buf.overlay_engine = fbdev->eng;
+ buf.w = fbdev->info->var.xres;
+ buf.h = fbdev->info->var.yres;
+ buf.format = fbdev->format;
+ buf.dma_bufs[0] = fbdev->dma_buf;
+ buf.offset[0] = fbdev->offset +
+ fbdev->info->var.yoffset * fbdev->pitch +
+ fbdev->info->var.xoffset *
+ (fbdev->info->var.bits_per_pixel / 8);
+ buf.pitch[0] = fbdev->pitch;
+ buf.n_planes = 1;
+
+ complete_fence = adf_interface_simple_post(fbdev->intf, &buf);
+ if (IS_ERR(complete_fence)) {
+ ret = PTR_ERR(complete_fence);
+ goto done;
+ }
+
+ sync_fence_put(complete_fence);
+done:
+ return ret;
+}
+
+static const u16 vga_palette[][3] = {
+ {0x0000, 0x0000, 0x0000},
+ {0x0000, 0x0000, 0xAAAA},
+ {0x0000, 0xAAAA, 0x0000},
+ {0x0000, 0xAAAA, 0xAAAA},
+ {0xAAAA, 0x0000, 0x0000},
+ {0xAAAA, 0x0000, 0xAAAA},
+ {0xAAAA, 0x5555, 0x0000},
+ {0xAAAA, 0xAAAA, 0xAAAA},
+ {0x5555, 0x5555, 0x5555},
+ {0x5555, 0x5555, 0xFFFF},
+ {0x5555, 0xFFFF, 0x5555},
+ {0x5555, 0xFFFF, 0xFFFF},
+ {0xFFFF, 0x5555, 0x5555},
+ {0xFFFF, 0x5555, 0xFFFF},
+ {0xFFFF, 0xFFFF, 0x5555},
+ {0xFFFF, 0xFFFF, 0xFFFF},
+};
+
+static int adf_fb_alloc(struct adf_fbdev *fbdev)
+{
+ int ret;
+
+ ret = adf_interface_simple_buffer_alloc(fbdev->intf,
+ fbdev->default_xres_virtual,
+ fbdev->default_yres_virtual,
+ fbdev->default_format,
+ &fbdev->dma_buf, &fbdev->offset, &fbdev->pitch);
+ if (ret < 0) {
+ dev_err(fbdev->info->dev, "allocating fb failed: %d\n", ret);
+ return ret;
+ }
+
+ fbdev->vaddr = dma_buf_vmap(fbdev->dma_buf);
+ if (!fbdev->vaddr) {
+ ret = -ENOMEM;
+ dev_err(fbdev->info->dev, "vmapping fb failed\n");
+ goto err_vmap;
+ }
+ fbdev->info->fix.line_length = fbdev->pitch;
+ fbdev->info->var.xres_virtual = fbdev->default_xres_virtual;
+ fbdev->info->var.yres_virtual = fbdev->default_yres_virtual;
+ fbdev->info->fix.smem_len = fbdev->dma_buf->size;
+ fbdev->info->screen_base = fbdev->vaddr;
+
+ return 0;
+
+err_vmap:
+ dma_buf_put(fbdev->dma_buf);
+ return ret;
+}
+
+static void adf_fb_destroy(struct adf_fbdev *fbdev)
+{
+ dma_buf_vunmap(fbdev->dma_buf, fbdev->vaddr);
+ dma_buf_put(fbdev->dma_buf);
+}
+
+static void adf_fbdev_set_format(struct adf_fbdev *fbdev, u32 format)
+{
+ size_t i;
+ const struct adf_fbdev_format *info = fbdev_format_info(format);
+ for (i = 0; i < ARRAY_SIZE(vga_palette); i++) {
+ u16 r = vga_palette[i][0];
+ u16 g = vga_palette[i][1];
+ u16 b = vga_palette[i][2];
+
+ r >>= (16 - info->r_length);
+ g >>= (16 - info->g_length);
+ b >>= (16 - info->b_length);
+
+ fbdev->pseudo_palette[i] =
+ (r << info->r_offset) |
+ (g << info->g_offset) |
+ (b << info->b_offset);
+
+ if (info->a_length) {
+ u16 a = BIT(info->a_length) - 1;
+ fbdev->pseudo_palette[i] |= (a << info->a_offset);
+ }
+ }
+
+ fbdev->info->var.bits_per_pixel = adf_format_bpp(format);
+ fbdev->info->var.red.length = info->r_length;
+ fbdev->info->var.red.offset = info->r_offset;
+ fbdev->info->var.green.length = info->g_length;
+ fbdev->info->var.green.offset = info->g_offset;
+ fbdev->info->var.blue.length = info->b_length;
+ fbdev->info->var.blue.offset = info->b_offset;
+ fbdev->info->var.transp.length = info->a_length;
+ fbdev->info->var.transp.offset = info->a_offset;
+ fbdev->format = format;
+}
+
+static void adf_fbdev_fill_modelist(struct adf_fbdev *fbdev)
+{
+ struct drm_mode_modeinfo *modelist;
+ struct fb_videomode fbmode;
+ size_t n_modes, i;
+ int ret = 0;
+
+ n_modes = adf_interface_modelist(fbdev->intf, NULL, 0);
+ modelist = kzalloc(sizeof(modelist[0]) * n_modes, GFP_KERNEL);
+ if (!modelist) {
+ dev_warn(fbdev->info->dev, "allocating new modelist failed; keeping old modelist\n");
+ return;
+ }
+ adf_interface_modelist(fbdev->intf, modelist, n_modes);
+
+ fb_destroy_modelist(&fbdev->info->modelist);
+
+ for (i = 0; i < n_modes; i++) {
+ adf_modeinfo_to_fb_videomode(&modelist[i], &fbmode);
+ ret = fb_add_videomode(&fbmode, &fbdev->info->modelist);
+ if (ret < 0)
+ dev_warn(fbdev->info->dev, "adding mode %s to modelist failed: %d\n",
+ modelist[i].name, ret);
+ }
+
+ kfree(modelist);
+}
+
+/**
+ * adf_fbdev_open - default implementation of fbdev open op
+ */
+int adf_fbdev_open(struct fb_info *info, int user)
+{
+ struct adf_fbdev *fbdev = info->par;
+ int ret;
+
+ if (!fbdev->open) {
+ struct drm_mode_modeinfo mode;
+ struct fb_videomode fbmode;
+ struct adf_device *dev = adf_interface_parent(fbdev->intf);
+
+ ret = adf_device_attach(dev, fbdev->eng, fbdev->intf);
+ if (ret < 0 && ret != -EALREADY)
+ return ret;
+
+ ret = adf_fb_alloc(fbdev);
+ if (ret < 0)
+ return ret;
+
+ adf_interface_current_mode(fbdev->intf, &mode);
+ adf_modeinfo_to_fb_videomode(&mode, &fbmode);
+ fb_videomode_to_var(&fbdev->info->var, &fbmode);
+
+ adf_fbdev_set_format(fbdev, fbdev->default_format);
+ adf_fbdev_fill_modelist(fbdev);
+ }
+
+ ret = adf_fbdev_post(fbdev);
+ if (ret < 0) {
+ if (!fbdev->open)
+ adf_fb_destroy(fbdev);
+ return ret;
+ }
+
+ fbdev->open = true;
+ return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_open);
+
+/**
+ * adf_fbdev_release - default implementation of fbdev release op
+ */
+int adf_fbdev_release(struct fb_info *info, int user)
+{
+ struct adf_fbdev *fbdev = info->par;
+ adf_fb_destroy(fbdev);
+ fbdev->open = false;
+ return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_release);
+
+/**
+ * adf_fbdev_check_var - default implementation of fbdev check_var op
+ */
+int adf_fbdev_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct adf_fbdev *fbdev = info->par;
+ bool valid_format = true;
+ u32 format = drm_fourcc_from_fb_var(var);
+ u32 pitch = var->xres_virtual * var->bits_per_pixel / 8;
+
+ if (!format) {
+ dev_dbg(info->dev, "%s: unrecognized format\n", __func__);
+ valid_format = false;
+ }
+
+ if (valid_format && var->grayscale) {
+ dev_dbg(info->dev, "%s: grayscale modes not supported\n",
+ __func__);
+ valid_format = false;
+ }
+
+ if (valid_format && var->nonstd) {
+ dev_dbg(info->dev, "%s: nonstandard formats not supported\n",
+ __func__);
+ valid_format = false;
+ }
+
+ if (valid_format && !adf_overlay_engine_supports_format(fbdev->eng,
+ format)) {
+ char format_str[ADF_FORMAT_STR_SIZE];
+ adf_format_str(format, format_str);
+ dev_dbg(info->dev, "%s: format %s not supported by overlay engine %s\n",
+ __func__, format_str, fbdev->eng->base.name);
+ valid_format = false;
+ }
+
+ if (valid_format && pitch > fbdev->pitch) {
+ dev_dbg(info->dev, "%s: fb pitch too small for var (pitch = %u, xres_virtual = %u, bits_per_pixel = %u)\n",
+ __func__, fbdev->pitch, var->xres_virtual,
+ var->bits_per_pixel);
+ valid_format = false;
+ }
+
+ if (valid_format && var->yres_virtual > fbdev->default_yres_virtual) {
+ dev_dbg(info->dev, "%s: fb height too small for var (h = %u, yres_virtual = %u)\n",
+ __func__, fbdev->default_yres_virtual,
+ var->yres_virtual);
+ valid_format = false;
+ }
+
+ if (valid_format) {
+ var->activate = info->var.activate;
+ var->height = info->var.height;
+ var->width = info->var.width;
+ var->accel_flags = info->var.accel_flags;
+ var->rotate = info->var.rotate;
+ var->colorspace = info->var.colorspace;
+ /* userspace can't change these */
+ } else {
+ /* if any part of the format is invalid then fixing it up is
+ impractical, so save just the modesetting bits and
+ overwrite everything else */
+ struct fb_videomode mode;
+ fb_var_to_videomode(&mode, var);
+ memcpy(var, &info->var, sizeof(*var));
+ fb_videomode_to_var(var, &mode);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_check_var);
+
+/**
+ * adf_fbdev_set_par - default implementation of fbdev set_par op
+ */
+int adf_fbdev_set_par(struct fb_info *info)
+{
+ struct adf_fbdev *fbdev = info->par;
+ struct adf_interface *intf = fbdev->intf;
+ struct fb_videomode vmode;
+ struct drm_mode_modeinfo mode;
+ int ret;
+ u32 format = drm_fourcc_from_fb_var(&info->var);
+
+ fb_var_to_videomode(&vmode, &info->var);
+ adf_modeinfo_from_fb_videomode(&vmode, &mode);
+ ret = adf_interface_set_mode(intf, &mode);
+ if (ret < 0)
+ return ret;
+
+ ret = adf_fbdev_post(fbdev);
+ if (ret < 0)
+ return ret;
+
+ if (format != fbdev->format)
+ adf_fbdev_set_format(fbdev, format);
+
+ return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_set_par);
+
+/**
+ * adf_fbdev_blank - default implementation of fbdev blank op
+ */
+int adf_fbdev_blank(int blank, struct fb_info *info)
+{
+ struct adf_fbdev *fbdev = info->par;
+ struct adf_interface *intf = fbdev->intf;
+ u8 dpms_state;
+
+ switch (blank) {
+ case FB_BLANK_UNBLANK:
+ dpms_state = DRM_MODE_DPMS_ON;
+ break;
+ case FB_BLANK_NORMAL:
+ dpms_state = DRM_MODE_DPMS_STANDBY;
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ dpms_state = DRM_MODE_DPMS_SUSPEND;
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ dpms_state = DRM_MODE_DPMS_STANDBY;
+ break;
+ case FB_BLANK_POWERDOWN:
+ dpms_state = DRM_MODE_DPMS_OFF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return adf_interface_blank(intf, dpms_state);
+}
+EXPORT_SYMBOL(adf_fbdev_blank);
+
+/**
+ * adf_fbdev_pan_display - default implementation of fbdev pan_display op
+ */
+int adf_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct adf_fbdev *fbdev = info->par;
+ return adf_fbdev_post(fbdev);
+}
+EXPORT_SYMBOL(adf_fbdev_pan_display);
+
+/**
+ * adf_fbdev_mmap - default implementation of fbdev mmap op
+ */
+int adf_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct adf_fbdev *fbdev = info->par;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ return dma_buf_mmap(fbdev->dma_buf, vma, 0);
+}
+EXPORT_SYMBOL(adf_fbdev_mmap);
+
+/**
+ * adf_fbdev_init - initialize helper to wrap ADF device in fbdev API
+ *
+ * @fbdev: the fbdev helper
+ * @interface: the ADF interface that will display the framebuffer
+ * @eng: the ADF overlay engine that will scan out the framebuffer
+ * @xres_virtual: the virtual width of the framebuffer
+ * @yres_virtual: the virtual height of the framebuffer
+ * @format: the format of the framebuffer
+ * @fbops: the device's fbdev ops
+ * @fmt: formatting for the framebuffer identification string
+ * @...: variable arguments
+ *
+ * @format must be a standard, non-indexed RGB format, i.e.,
+ * adf_format_is_rgb(@format) && @format != @DRM_FORMAT_C8.
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_fbdev_init(struct adf_fbdev *fbdev, struct adf_interface *interface,
+ struct adf_overlay_engine *eng,
+ u16 xres_virtual, u16 yres_virtual, u32 format,
+ struct fb_ops *fbops, const char *fmt, ...)
+{
+ struct adf_device *parent = adf_interface_parent(interface);
+ struct device *dev = &parent->base.dev;
+ u16 width_mm, height_mm;
+ va_list args;
+ int ret;
+
+ if (!adf_format_is_rgb(format) ||
+ format == DRM_FORMAT_C8) {
+ dev_err(dev, "fbdev helper does not support format %u\n",
+ format);
+ return -EINVAL;
+ }
+
+ memset(fbdev, 0, sizeof(*fbdev));
+ fbdev->intf = interface;
+ fbdev->eng = eng;
+ fbdev->info = framebuffer_alloc(0, dev);
+ if (!fbdev->info) {
+ dev_err(dev, "allocating framebuffer device failed\n");
+ return -ENOMEM;
+ }
+ fbdev->default_xres_virtual = xres_virtual;
+ fbdev->default_yres_virtual = yres_virtual;
+ fbdev->default_format = format;
+
+ fbdev->info->flags = FBINFO_FLAG_DEFAULT;
+ ret = adf_interface_get_screen_size(interface, &width_mm, &height_mm);
+ if (ret < 0) {
+ width_mm = 0;
+ height_mm = 0;
+ }
+ fbdev->info->var.width = width_mm;
+ fbdev->info->var.height = height_mm;
+ fbdev->info->var.activate = FB_ACTIVATE_VBL;
+ va_start(args, fmt);
+ vsnprintf(fbdev->info->fix.id, sizeof(fbdev->info->fix.id), fmt, args);
+ va_end(args);
+ fbdev->info->fix.type = FB_TYPE_PACKED_PIXELS;
+ fbdev->info->fix.visual = FB_VISUAL_TRUECOLOR;
+ fbdev->info->fix.xpanstep = 1;
+ fbdev->info->fix.ypanstep = 1;
+ INIT_LIST_HEAD(&fbdev->info->modelist);
+ fbdev->info->fbops = fbops;
+ fbdev->info->pseudo_palette = fbdev->pseudo_palette;
+ fbdev->info->par = fbdev;
+
+ ret = register_framebuffer(fbdev->info);
+ if (ret < 0) {
+ dev_err(dev, "registering framebuffer failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_init);
+
+/**
+ * adf_fbdev_destroy - destroy helper to wrap ADF device in fbdev API
+ *
+ * @fbdev: the fbdev helper
+ */
+void adf_fbdev_destroy(struct adf_fbdev *fbdev)
+{
+ unregister_framebuffer(fbdev->info);
+ if (WARN_ON(fbdev->open))
+ adf_fb_destroy(fbdev);
+ framebuffer_release(fbdev->info);
+}
+EXPORT_SYMBOL(adf_fbdev_destroy);
diff --git a/drivers/video/adf/adf_fops.c b/drivers/video/adf/adf_fops.c
new file mode 100644
index 000000000000..abec58ea2ed8
--- /dev/null
+++ b/drivers/video/adf/adf_fops.c
@@ -0,0 +1,957 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/circ_buf.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <video/adf_client.h>
+#include <video/adf_format.h>
+
+#include "sw_sync.h"
+#include "sync.h"
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+#ifdef CONFIG_COMPAT
+#include "adf_fops32.h"
+#endif
+
+static int adf_obj_set_event(struct adf_obj *obj, struct adf_file *file,
+ struct adf_set_event __user *arg)
+{
+ struct adf_set_event data;
+ bool enabled;
+ unsigned long flags;
+ int err;
+
+ if (copy_from_user(&data, arg, sizeof(data)))
+ return -EFAULT;
+
+ err = adf_obj_check_supports_event(obj, data.type);
+ if (err < 0)
+ return err;
+
+ spin_lock_irqsave(&obj->file_lock, flags);
+ if (data.enabled)
+ enabled = test_and_set_bit(data.type,
+ file->event_subscriptions);
+ else
+ enabled = test_and_clear_bit(data.type,
+ file->event_subscriptions);
+ spin_unlock_irqrestore(&obj->file_lock, flags);
+
+ if (data.enabled == enabled)
+ return -EALREADY;
+
+ if (data.enabled)
+ adf_event_get(obj, data.type);
+ else
+ adf_event_put(obj, data.type);
+
+ return 0;
+}
+
+static int adf_obj_copy_custom_data_to_user(struct adf_obj *obj,
+ void __user *dst, size_t *dst_size)
+{
+ void *custom_data;
+ size_t custom_data_size;
+ int ret;
+
+ if (!obj->ops || !obj->ops->custom_data) {
+ dev_dbg(&obj->dev, "%s: no custom_data op\n", __func__);
+ return 0;
+ }
+
+ custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
+ if (!custom_data)
+ return -ENOMEM;
+
+ ret = obj->ops->custom_data(obj, custom_data, &custom_data_size);
+ if (ret < 0)
+ goto done;
+
+ if (copy_to_user(dst, custom_data, min(*dst_size, custom_data_size))) {
+ ret = -EFAULT;
+ goto done;
+ }
+ *dst_size = custom_data_size;
+
+done:
+ kfree(custom_data);
+ return ret;
+}
+
+static int adf_eng_get_data(struct adf_overlay_engine *eng,
+ struct adf_overlay_engine_data __user *arg)
+{
+ struct adf_device *dev = adf_overlay_engine_parent(eng);
+ struct adf_overlay_engine_data data;
+ size_t n_supported_formats;
+ u32 *supported_formats = NULL;
+ int ret = 0;
+
+ if (copy_from_user(&data, arg, sizeof(data)))
+ return -EFAULT;
+
+ strlcpy(data.name, eng->base.name, sizeof(data.name));
+
+ if (data.n_supported_formats > ADF_MAX_SUPPORTED_FORMATS)
+ return -EINVAL;
+
+ n_supported_formats = data.n_supported_formats;
+ data.n_supported_formats = eng->ops->n_supported_formats;
+
+ if (n_supported_formats) {
+ supported_formats = kzalloc(n_supported_formats *
+ sizeof(supported_formats[0]), GFP_KERNEL);
+ if (!supported_formats)
+ return -ENOMEM;
+ }
+
+ memcpy(supported_formats, eng->ops->supported_formats,
+ sizeof(u32) * min(n_supported_formats,
+ eng->ops->n_supported_formats));
+
+ mutex_lock(&dev->client_lock);
+ ret = adf_obj_copy_custom_data_to_user(&eng->base, arg->custom_data,
+ &data.custom_data_size);
+ mutex_unlock(&dev->client_lock);
+
+ if (ret < 0)
+ goto done;
+
+ if (copy_to_user(arg, &data, sizeof(data))) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (supported_formats && copy_to_user(arg->supported_formats,
+ supported_formats,
+ n_supported_formats * sizeof(supported_formats[0])))
+ ret = -EFAULT;
+
+done:
+ kfree(supported_formats);
+ return ret;
+}
+
+static int adf_buffer_import(struct adf_device *dev,
+ struct adf_buffer_config __user *cfg, struct adf_buffer *buf)
+{
+ struct adf_buffer_config user_buf;
+ size_t i;
+ int ret = 0;
+
+ if (copy_from_user(&user_buf, cfg, sizeof(user_buf)))
+ return -EFAULT;
+
+ memset(buf, 0, sizeof(*buf));
+
+ if (user_buf.n_planes > ADF_MAX_PLANES) {
+ dev_err(&dev->base.dev, "invalid plane count %u\n",
+ user_buf.n_planes);
+ return -EINVAL;
+ }
+
+ buf->overlay_engine = idr_find(&dev->overlay_engines,
+ user_buf.overlay_engine);
+ if (!buf->overlay_engine) {
+ dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
+ user_buf.overlay_engine);
+ return -ENOENT;
+ }
+
+ buf->w = user_buf.w;
+ buf->h = user_buf.h;
+ buf->format = user_buf.format;
+ for (i = 0; i < user_buf.n_planes; i++) {
+ buf->dma_bufs[i] = dma_buf_get(user_buf.fd[i]);
+ if (IS_ERR(buf->dma_bufs[i])) {
+ ret = PTR_ERR(buf->dma_bufs[i]);
+ dev_err(&dev->base.dev, "importing dma_buf fd %llu failed: %d\n",
+ user_buf.fd[i], ret);
+ buf->dma_bufs[i] = NULL;
+ goto done;
+ }
+ buf->offset[i] = user_buf.offset[i];
+ buf->pitch[i] = user_buf.pitch[i];
+ }
+ buf->n_planes = user_buf.n_planes;
+
+ if (user_buf.acquire_fence >= 0) {
+ buf->acquire_fence = sync_fence_fdget(user_buf.acquire_fence);
+ if (!buf->acquire_fence) {
+ dev_err(&dev->base.dev, "getting fence fd %lld failed\n",
+ user_buf.acquire_fence);
+ ret = -EINVAL;
+ goto done;
+ }
+ }
+
+done:
+ if (ret < 0)
+ adf_buffer_cleanup(buf);
+ return ret;
+}
+
+static int adf_device_post_config(struct adf_device *dev,
+ struct adf_post_config __user *arg)
+{
+ struct sync_fence *complete_fence;
+ int complete_fence_fd;
+ struct adf_buffer *bufs = NULL;
+ struct adf_interface **intfs = NULL;
+ size_t n_intfs, n_bufs, i;
+ void *custom_data = NULL;
+ size_t custom_data_size;
+ int ret = 0;
+
+ complete_fence_fd = get_unused_fd();
+ if (complete_fence_fd < 0)
+ return complete_fence_fd;
+
+ if (get_user(n_intfs, &arg->n_interfaces)) {
+ ret = -EFAULT;
+ goto err_get_user;
+ }
+
+ if (n_intfs > ADF_MAX_INTERFACES) {
+ ret = -EINVAL;
+ goto err_get_user;
+ }
+
+ if (get_user(n_bufs, &arg->n_bufs)) {
+ ret = -EFAULT;
+ goto err_get_user;
+ }
+
+ if (n_bufs > ADF_MAX_BUFFERS) {
+ ret = -EINVAL;
+ goto err_get_user;
+ }
+
+ if (get_user(custom_data_size, &arg->custom_data_size)) {
+ ret = -EFAULT;
+ goto err_get_user;
+ }
+
+ if (custom_data_size > ADF_MAX_CUSTOM_DATA_SIZE) {
+ ret = -EINVAL;
+ goto err_get_user;
+ }
+
+ if (n_intfs) {
+ intfs = kmalloc(sizeof(intfs[0]) * n_intfs, GFP_KERNEL);
+ if (!intfs) {
+ ret = -ENOMEM;
+ goto err_get_user;
+ }
+ }
+
+ for (i = 0; i < n_intfs; i++) {
+ u32 intf_id;
+ if (get_user(intf_id, &arg->interfaces[i])) {
+ ret = -EFAULT;
+ goto err_get_user;
+ }
+
+ intfs[i] = idr_find(&dev->interfaces, intf_id);
+ if (!intfs[i]) {
+ ret = -EINVAL;
+ goto err_get_user;
+ }
+ }
+
+ if (n_bufs) {
+ bufs = kzalloc(sizeof(bufs[0]) * n_bufs, GFP_KERNEL);
+ if (!bufs) {
+ ret = -ENOMEM;
+ goto err_get_user;
+ }
+ }
+
+ for (i = 0; i < n_bufs; i++) {
+ ret = adf_buffer_import(dev, &arg->bufs[i], &bufs[i]);
+ if (ret < 0) {
+ memset(&bufs[i], 0, sizeof(bufs[i]));
+ goto err_import;
+ }
+ }
+
+ if (custom_data_size) {
+ custom_data = kzalloc(custom_data_size, GFP_KERNEL);
+ if (!custom_data) {
+ ret = -ENOMEM;
+ goto err_import;
+ }
+
+ if (copy_from_user(custom_data, arg->custom_data,
+ custom_data_size)) {
+ ret = -EFAULT;
+ goto err_import;
+ }
+ }
+
+ if (put_user(complete_fence_fd, &arg->complete_fence)) {
+ ret = -EFAULT;
+ goto err_import;
+ }
+
+ complete_fence = adf_device_post_nocopy(dev, intfs, n_intfs, bufs,
+ n_bufs, custom_data, custom_data_size);
+ if (IS_ERR(complete_fence)) {
+ ret = PTR_ERR(complete_fence);
+ goto err_import;
+ }
+
+ sync_fence_install(complete_fence, complete_fence_fd);
+ return 0;
+
+err_import:
+ for (i = 0; i < n_bufs; i++)
+ adf_buffer_cleanup(&bufs[i]);
+
+err_get_user:
+ kfree(custom_data);
+ kfree(bufs);
+ kfree(intfs);
+ put_unused_fd(complete_fence_fd);
+ return ret;
+}
+
+static int adf_intf_simple_post_config(struct adf_interface *intf,
+ struct adf_simple_post_config __user *arg)
+{
+ struct adf_device *dev = intf->base.parent;
+ struct sync_fence *complete_fence;
+ int complete_fence_fd;
+ struct adf_buffer buf;
+ int ret = 0;
+
+ complete_fence_fd = get_unused_fd();
+ if (complete_fence_fd < 0)
+ return complete_fence_fd;
+
+ ret = adf_buffer_import(dev, &arg->buf, &buf);
+ if (ret < 0)
+ goto err_import;
+
+ if (put_user(complete_fence_fd, &arg->complete_fence)) {
+ ret = -EFAULT;
+ goto err_put_user;
+ }
+
+ complete_fence = adf_interface_simple_post(intf, &buf);
+ if (IS_ERR(complete_fence)) {
+ ret = PTR_ERR(complete_fence);
+ goto err_put_user;
+ }
+
+ sync_fence_install(complete_fence, complete_fence_fd);
+ return 0;
+
+err_put_user:
+ adf_buffer_cleanup(&buf);
+err_import:
+ put_unused_fd(complete_fence_fd);
+ return ret;
+}
+
+static int adf_intf_simple_buffer_alloc(struct adf_interface *intf,
+ struct adf_simple_buffer_alloc __user *arg)
+{
+ struct adf_simple_buffer_alloc data;
+ struct dma_buf *dma_buf;
+ int ret = 0;
+
+ if (copy_from_user(&data, arg, sizeof(data)))
+ return -EFAULT;
+
+ data.fd = get_unused_fd_flags(O_CLOEXEC);
+ if (data.fd < 0)
+ return data.fd;
+
+ ret = adf_interface_simple_buffer_alloc(intf, data.w, data.h,
+ data.format, &dma_buf, &data.offset, &data.pitch);
+ if (ret < 0)
+ goto err_alloc;
+
+ if (copy_to_user(arg, &data, sizeof(*arg))) {
+ ret = -EFAULT;
+ goto err_copy;
+ }
+
+ fd_install(data.fd, dma_buf->file);
+ return 0;
+
+err_copy:
+ dma_buf_put(dma_buf);
+
+err_alloc:
+ put_unused_fd(data.fd);
+ return ret;
+}
+
+static int adf_copy_attachment_list_to_user(
+ struct adf_attachment_config __user *to, size_t n_to,
+ struct adf_attachment *from, size_t n_from)
+{
+ struct adf_attachment_config *temp;
+ size_t n = min(n_to, n_from);
+ size_t i;
+ int ret = 0;
+
+ if (!n)
+ return 0;
+
+ temp = kzalloc(n * sizeof(temp[0]), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++) {
+ temp[i].interface = from[i].interface->base.id;
+ temp[i].overlay_engine = from[i].overlay_engine->base.id;
+ }
+
+ if (copy_to_user(to, temp, n * sizeof(to[0]))) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+done:
+ kfree(temp);
+ return ret;
+}
+
+static int adf_device_get_data(struct adf_device *dev,
+ struct adf_device_data __user *arg)
+{
+ struct adf_device_data data;
+ size_t n_attach;
+ struct adf_attachment *attach = NULL;
+ size_t n_allowed_attach;
+ struct adf_attachment *allowed_attach = NULL;
+ int ret = 0;
+
+ if (copy_from_user(&data, arg, sizeof(data)))
+ return -EFAULT;
+
+ if (data.n_attachments > ADF_MAX_ATTACHMENTS ||
+ data.n_allowed_attachments > ADF_MAX_ATTACHMENTS)
+ return -EINVAL;
+
+ strlcpy(data.name, dev->base.name, sizeof(data.name));
+
+ if (data.n_attachments) {
+ attach = kzalloc(data.n_attachments * sizeof(attach[0]),
+ GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+ }
+ n_attach = adf_device_attachments(dev, attach, data.n_attachments);
+
+ if (data.n_allowed_attachments) {
+ allowed_attach = kzalloc(data.n_allowed_attachments *
+ sizeof(allowed_attach[0]), GFP_KERNEL);
+ if (!allowed_attach) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ }
+ n_allowed_attach = adf_device_attachments_allowed(dev, allowed_attach,
+ data.n_allowed_attachments);
+
+ mutex_lock(&dev->client_lock);
+ ret = adf_obj_copy_custom_data_to_user(&dev->base, arg->custom_data,
+ &data.custom_data_size);
+ mutex_unlock(&dev->client_lock);
+
+ if (ret < 0)
+ goto done;
+
+ ret = adf_copy_attachment_list_to_user(arg->attachments,
+ data.n_attachments, attach, n_attach);
+ if (ret < 0)
+ goto done;
+
+ ret = adf_copy_attachment_list_to_user(arg->allowed_attachments,
+ data.n_allowed_attachments, allowed_attach,
+ n_allowed_attach);
+ if (ret < 0)
+ goto done;
+
+ data.n_attachments = n_attach;
+ data.n_allowed_attachments = n_allowed_attach;
+
+ if (copy_to_user(arg, &data, sizeof(data)))
+ ret = -EFAULT;
+
+done:
+ kfree(allowed_attach);
+ kfree(attach);
+ return ret;
+}
+
+static int adf_device_handle_attachment(struct adf_device *dev,
+ struct adf_attachment_config __user *arg, bool attach)
+{
+ struct adf_attachment_config data;
+ struct adf_overlay_engine *eng;
+ struct adf_interface *intf;
+
+ if (copy_from_user(&data, arg, sizeof(data)))
+ return -EFAULT;
+
+ eng = idr_find(&dev->overlay_engines, data.overlay_engine);
+ if (!eng) {
+ dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
+ data.overlay_engine);
+ return -EINVAL;
+ }
+
+ intf = idr_find(&dev->interfaces, data.interface);
+ if (!intf) {
+ dev_err(&dev->base.dev, "invalid interface id %u\n",
+ data.interface);
+ return -EINVAL;
+ }
+
+ if (attach)
+ return adf_device_attach(dev, eng, intf);
+ else
+ return adf_device_detach(dev, eng, intf);
+}
+
+static int adf_intf_set_mode(struct adf_interface *intf,
+ struct drm_mode_modeinfo __user *arg)
+{
+ struct drm_mode_modeinfo mode;
+
+ if (copy_from_user(&mode, arg, sizeof(mode)))
+ return -EFAULT;
+
+ return adf_interface_set_mode(intf, &mode);
+}
+
+static int adf_intf_get_data(struct adf_interface *intf,
+ struct adf_interface_data __user *arg)
+{
+ struct adf_device *dev = adf_interface_parent(intf);
+ struct adf_interface_data data;
+ struct drm_mode_modeinfo *modelist;
+ size_t modelist_size;
+ int err;
+ int ret = 0;
+ unsigned long flags;
+
+ if (copy_from_user(&data, arg, sizeof(data)))
+ return -EFAULT;
+
+ strlcpy(data.name, intf->base.name, sizeof(data.name));
+
+ data.type = intf->type;
+ data.id = intf->idx;
+ data.flags = intf->flags;
+
+ err = adf_interface_get_screen_size(intf, &data.width_mm,
+ &data.height_mm);
+ if (err < 0) {
+ data.width_mm = 0;
+ data.height_mm = 0;
+ }
+
+ modelist = kmalloc(sizeof(modelist[0]) * ADF_MAX_MODES, GFP_KERNEL);
+ if (!modelist)
+ return -ENOMEM;
+
+ mutex_lock(&dev->client_lock);
+ read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+ data.hotplug_detect = intf->hotplug_detect;
+ modelist_size = min(data.n_available_modes, intf->n_modes) *
+ sizeof(intf->modelist[0]);
+ memcpy(modelist, intf->modelist, modelist_size);
+ data.n_available_modes = intf->n_modes;
+ read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+ if (copy_to_user(arg->available_modes, modelist, modelist_size)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ data.dpms_state = intf->dpms_state;
+ memcpy(&data.current_mode, &intf->current_mode,
+ sizeof(intf->current_mode));
+
+ ret = adf_obj_copy_custom_data_to_user(&intf->base, arg->custom_data,
+ &data.custom_data_size);
+done:
+ mutex_unlock(&dev->client_lock);
+ kfree(modelist);
+
+ if (ret < 0)
+ return ret;
+
+ if (copy_to_user(arg, &data, sizeof(data)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static inline long adf_obj_custom_ioctl(struct adf_obj *obj, unsigned int cmd,
+ unsigned long arg)
+{
+ if (obj->ops && obj->ops->ioctl)
+ return obj->ops->ioctl(obj, cmd, arg);
+ return -ENOTTY;
+}
+
+static long adf_overlay_engine_ioctl(struct adf_overlay_engine *eng,
+ struct adf_file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case ADF_SET_EVENT:
+ return adf_obj_set_event(&eng->base, file,
+ (struct adf_set_event __user *)arg);
+
+ case ADF_GET_OVERLAY_ENGINE_DATA:
+ return adf_eng_get_data(eng,
+ (struct adf_overlay_engine_data __user *)arg);
+
+ case ADF_BLANK:
+ case ADF_POST_CONFIG:
+ case ADF_SET_MODE:
+ case ADF_GET_DEVICE_DATA:
+ case ADF_GET_INTERFACE_DATA:
+ case ADF_SIMPLE_POST_CONFIG:
+ case ADF_SIMPLE_BUFFER_ALLOC:
+ case ADF_ATTACH:
+ case ADF_DETACH:
+ return -EINVAL;
+
+ default:
+ return adf_obj_custom_ioctl(&eng->base, cmd, arg);
+ }
+}
+
+static long adf_interface_ioctl(struct adf_interface *intf,
+ struct adf_file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case ADF_SET_EVENT:
+ return adf_obj_set_event(&intf->base, file,
+ (struct adf_set_event __user *)arg);
+
+ case ADF_BLANK:
+ return adf_interface_blank(intf, arg);
+
+ case ADF_SET_MODE:
+ return adf_intf_set_mode(intf,
+ (struct drm_mode_modeinfo __user *)arg);
+
+ case ADF_GET_INTERFACE_DATA:
+ return adf_intf_get_data(intf,
+ (struct adf_interface_data __user *)arg);
+
+ case ADF_SIMPLE_POST_CONFIG:
+ return adf_intf_simple_post_config(intf,
+ (struct adf_simple_post_config __user *)arg);
+
+ case ADF_SIMPLE_BUFFER_ALLOC:
+ return adf_intf_simple_buffer_alloc(intf,
+ (struct adf_simple_buffer_alloc __user *)arg);
+
+ case ADF_POST_CONFIG:
+ case ADF_GET_DEVICE_DATA:
+ case ADF_GET_OVERLAY_ENGINE_DATA:
+ case ADF_ATTACH:
+ case ADF_DETACH:
+ return -EINVAL;
+
+ default:
+ return adf_obj_custom_ioctl(&intf->base, cmd, arg);
+ }
+}
+
+static long adf_device_ioctl(struct adf_device *dev, struct adf_file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case ADF_SET_EVENT:
+ return adf_obj_set_event(&dev->base, file,
+ (struct adf_set_event __user *)arg);
+
+ case ADF_POST_CONFIG:
+ return adf_device_post_config(dev,
+ (struct adf_post_config __user *)arg);
+
+ case ADF_GET_DEVICE_DATA:
+ return adf_device_get_data(dev,
+ (struct adf_device_data __user *)arg);
+
+ case ADF_ATTACH:
+ return adf_device_handle_attachment(dev,
+ (struct adf_attachment_config __user *)arg,
+ true);
+
+ case ADF_DETACH:
+ return adf_device_handle_attachment(dev,
+ (struct adf_attachment_config __user *)arg,
+ false);
+
+ case ADF_BLANK:
+ case ADF_SET_MODE:
+ case ADF_GET_INTERFACE_DATA:
+ case ADF_GET_OVERLAY_ENGINE_DATA:
+ case ADF_SIMPLE_POST_CONFIG:
+ case ADF_SIMPLE_BUFFER_ALLOC:
+ return -EINVAL;
+
+ default:
+ return adf_obj_custom_ioctl(&dev->base, cmd, arg);
+ }
+}
+
+static int adf_file_open(struct inode *inode, struct file *file)
+{
+ struct adf_obj *obj;
+ struct adf_file *fpriv = NULL;
+ unsigned long flags;
+ int ret = 0;
+
+ obj = adf_obj_sysfs_find(iminor(inode));
+ if (!obj)
+ return -ENODEV;
+
+ dev_dbg(&obj->dev, "opening %s\n", dev_name(&obj->dev));
+
+ if (!try_module_get(obj->parent->ops->owner)) {
+ dev_err(&obj->dev, "getting owner module failed\n");
+ return -ENODEV;
+ }
+
+ fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+ if (!fpriv) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ INIT_LIST_HEAD(&fpriv->head);
+ fpriv->obj = obj;
+ init_waitqueue_head(&fpriv->event_wait);
+
+ file->private_data = fpriv;
+
+ if (obj->ops && obj->ops->open) {
+ ret = obj->ops->open(obj, inode, file);
+ if (ret < 0)
+ goto done;
+ }
+
+ spin_lock_irqsave(&obj->file_lock, flags);
+ list_add_tail(&fpriv->head, &obj->file_list);
+ spin_unlock_irqrestore(&obj->file_lock, flags);
+
+done:
+ if (ret < 0) {
+ kfree(fpriv);
+ module_put(obj->parent->ops->owner);
+ }
+ return ret;
+}
+
+static int adf_file_release(struct inode *inode, struct file *file)
+{
+ struct adf_file *fpriv = file->private_data;
+ struct adf_obj *obj = fpriv->obj;
+ enum adf_event_type event_type;
+ unsigned long flags;
+
+ if (obj->ops && obj->ops->release)
+ obj->ops->release(obj, inode, file);
+
+ spin_lock_irqsave(&obj->file_lock, flags);
+ list_del(&fpriv->head);
+ spin_unlock_irqrestore(&obj->file_lock, flags);
+
+ for_each_set_bit(event_type, fpriv->event_subscriptions,
+ ADF_EVENT_TYPE_MAX) {
+ adf_event_put(obj, event_type);
+ }
+
+ kfree(fpriv);
+ module_put(obj->parent->ops->owner);
+
+ dev_dbg(&obj->dev, "released %s\n", dev_name(&obj->dev));
+ return 0;
+}
+
+long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct adf_file *fpriv = file->private_data;
+ struct adf_obj *obj = fpriv->obj;
+ long ret = -EINVAL;
+
+ dev_dbg(&obj->dev, "%s ioctl %u\n", dev_name(&obj->dev), _IOC_NR(cmd));
+
+ switch (obj->type) {
+ case ADF_OBJ_OVERLAY_ENGINE:
+ ret = adf_overlay_engine_ioctl(adf_obj_to_overlay_engine(obj),
+ fpriv, cmd, arg);
+ break;
+
+ case ADF_OBJ_INTERFACE:
+ ret = adf_interface_ioctl(adf_obj_to_interface(obj), fpriv, cmd,
+ arg);
+ break;
+
+ case ADF_OBJ_DEVICE:
+ ret = adf_device_ioctl(adf_obj_to_device(obj), fpriv, cmd, arg);
+ break;
+ }
+
+ return ret;
+}
+
+static inline bool adf_file_event_available(struct adf_file *fpriv)
+{
+ int head = fpriv->event_head;
+ int tail = fpriv->event_tail;
+ return CIRC_CNT(head, tail, sizeof(fpriv->event_buf)) != 0;
+}
+
+void adf_file_queue_event(struct adf_file *fpriv, struct adf_event *event)
+{
+ int head = fpriv->event_head;
+ int tail = fpriv->event_tail;
+ size_t space = CIRC_SPACE(head, tail, sizeof(fpriv->event_buf));
+ size_t space_to_end =
+ CIRC_SPACE_TO_END(head, tail, sizeof(fpriv->event_buf));
+
+ if (space < event->length) {
+ dev_dbg(&fpriv->obj->dev,
+ "insufficient buffer space for event %u\n",
+ event->type);
+ return;
+ }
+
+ if (space_to_end >= event->length) {
+ memcpy(fpriv->event_buf + head, event, event->length);
+ } else {
+ memcpy(fpriv->event_buf + head, event, space_to_end);
+ memcpy(fpriv->event_buf, (u8 *)event + space_to_end,
+ event->length - space_to_end);
+ }
+
+ smp_wmb();
+ fpriv->event_head = (fpriv->event_head + event->length) &
+ (sizeof(fpriv->event_buf) - 1);
+ wake_up_interruptible_all(&fpriv->event_wait);
+}
+
+static ssize_t adf_file_copy_to_user(struct adf_file *fpriv,
+ char __user *buffer, size_t buffer_size)
+{
+ int head, tail;
+ u8 *event_buf;
+ size_t cnt, cnt_to_end, copy_size = 0;
+ ssize_t ret = 0;
+ unsigned long flags;
+
+ event_buf = kmalloc(min(buffer_size, sizeof(fpriv->event_buf)),
+ GFP_KERNEL);
+ if (!event_buf)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&fpriv->obj->file_lock, flags);
+
+ if (!adf_file_event_available(fpriv))
+ goto out;
+
+ head = fpriv->event_head;
+ tail = fpriv->event_tail;
+
+ cnt = CIRC_CNT(head, tail, sizeof(fpriv->event_buf));
+ cnt_to_end = CIRC_CNT_TO_END(head, tail, sizeof(fpriv->event_buf));
+ copy_size = min(buffer_size, cnt);
+
+ if (cnt_to_end >= copy_size) {
+ memcpy(event_buf, fpriv->event_buf + tail, copy_size);
+ } else {
+ memcpy(event_buf, fpriv->event_buf + tail, cnt_to_end);
+ memcpy(event_buf + cnt_to_end, fpriv->event_buf,
+ copy_size - cnt_to_end);
+ }
+
+ fpriv->event_tail = (fpriv->event_tail + copy_size) &
+ (sizeof(fpriv->event_buf) - 1);
+
+out:
+ spin_unlock_irqrestore(&fpriv->obj->file_lock, flags);
+ if (copy_size) {
+ if (copy_to_user(buffer, event_buf, copy_size))
+ ret = -EFAULT;
+ else
+ ret = copy_size;
+ }
+ kfree(event_buf);
+ return ret;
+}
+
+ssize_t adf_file_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset)
+{
+ struct adf_file *fpriv = filp->private_data;
+ int err;
+
+ err = wait_event_interruptible(fpriv->event_wait,
+ adf_file_event_available(fpriv));
+ if (err < 0)
+ return err;
+
+ return adf_file_copy_to_user(fpriv, buffer, count);
+}
+
+unsigned int adf_file_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct adf_file *fpriv = filp->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &fpriv->event_wait, wait);
+
+ if (adf_file_event_available(fpriv))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+const struct file_operations adf_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = adf_file_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = adf_file_compat_ioctl,
+#endif
+ .open = adf_file_open,
+ .release = adf_file_release,
+ .llseek = default_llseek,
+ .read = adf_file_read,
+ .poll = adf_file_poll,
+};
diff --git a/drivers/video/adf/adf_fops.h b/drivers/video/adf/adf_fops.h
new file mode 100644
index 000000000000..90a3a74796d6
--- /dev/null
+++ b/drivers/video/adf/adf_fops.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_FOPS_H
+#define __VIDEO_ADF_ADF_FOPS_H
+
+#include <linux/bitmap.h>
+#include <linux/fs.h>
+
+extern const struct file_operations adf_fops;
+
+struct adf_file {
+ struct list_head head;
+ struct adf_obj *obj;
+
+ DECLARE_BITMAP(event_subscriptions, ADF_EVENT_TYPE_MAX);
+ u8 event_buf[4096];
+ int event_head;
+ int event_tail;
+ wait_queue_head_t event_wait;
+};
+
+void adf_file_queue_event(struct adf_file *file, struct adf_event *event);
+long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+#endif /* __VIDEO_ADF_ADF_FOPS_H */
diff --git a/drivers/video/adf/adf_fops32.c b/drivers/video/adf/adf_fops32.c
new file mode 100644
index 000000000000..60a47cf5a781
--- /dev/null
+++ b/drivers/video/adf/adf_fops32.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/uaccess.h>
+#include <video/adf.h>
+
+#include "adf_fops.h"
+#include "adf_fops32.h"
+
+long adf_compat_post_config(struct file *file,
+ struct adf_post_config32 __user *arg)
+{
+ struct adf_post_config32 cfg32;
+ struct adf_post_config __user *cfg;
+ int ret;
+
+ if (copy_from_user(&cfg32, arg, sizeof(cfg32)))
+ return -EFAULT;
+
+ cfg = compat_alloc_user_space(sizeof(*cfg));
+ if (!access_ok(VERIFY_WRITE, cfg, sizeof(*cfg)))
+ return -EFAULT;
+
+ if (put_user(cfg32.n_interfaces, &cfg->n_interfaces) ||
+ put_user(compat_ptr(cfg32.interfaces),
+ &cfg->interfaces) ||
+ put_user(cfg32.n_bufs, &cfg->n_bufs) ||
+ put_user(compat_ptr(cfg32.bufs), &cfg->bufs) ||
+ put_user(cfg32.custom_data_size,
+ &cfg->custom_data_size) ||
+ put_user(compat_ptr(cfg32.custom_data),
+ &cfg->custom_data))
+ return -EFAULT;
+
+ ret = adf_file_ioctl(file, ADF_POST_CONFIG, (unsigned long)cfg);
+ if (ret < 0)
+ return ret;
+
+ if (copy_in_user(&arg->complete_fence, &cfg->complete_fence,
+ sizeof(cfg->complete_fence)))
+ return -EFAULT;
+
+ return 0;
+}
+
+long adf_compat_get_device_data(struct file *file,
+ struct adf_device_data32 __user *arg)
+{
+ struct adf_device_data32 data32;
+ struct adf_device_data __user *data;
+ int ret;
+
+ if (copy_from_user(&data32, arg, sizeof(data32)))
+ return -EFAULT;
+
+ data = compat_alloc_user_space(sizeof(*data));
+ if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+ return -EFAULT;
+
+ if (put_user(data32.n_attachments, &data->n_attachments) ||
+ put_user(compat_ptr(data32.attachments),
+ &data->attachments) ||
+ put_user(data32.n_allowed_attachments,
+ &data->n_allowed_attachments) ||
+ put_user(compat_ptr(data32.allowed_attachments),
+ &data->allowed_attachments) ||
+ put_user(data32.custom_data_size,
+ &data->custom_data_size) ||
+ put_user(compat_ptr(data32.custom_data),
+ &data->custom_data))
+ return -EFAULT;
+
+ ret = adf_file_ioctl(file, ADF_GET_DEVICE_DATA32, (unsigned long)data);
+ if (ret < 0)
+ return ret;
+
+ if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+ copy_in_user(&arg->n_attachments, &data->n_attachments,
+ sizeof(arg->n_attachments)) ||
+ copy_in_user(&arg->n_allowed_attachments,
+ &data->n_allowed_attachments,
+ sizeof(arg->n_allowed_attachments)) ||
+ copy_in_user(&arg->custom_data_size,
+ &data->custom_data_size,
+ sizeof(arg->custom_data_size)))
+ return -EFAULT;
+
+ return 0;
+}
+
+long adf_compat_get_interface_data(struct file *file,
+ struct adf_interface_data32 __user *arg)
+{
+ struct adf_interface_data32 data32;
+ struct adf_interface_data __user *data;
+ int ret;
+
+ if (copy_from_user(&data32, arg, sizeof(data32)))
+ return -EFAULT;
+
+ data = compat_alloc_user_space(sizeof(*data));
+ if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+ return -EFAULT;
+
+ if (put_user(data32.n_available_modes, &data->n_available_modes) ||
+ put_user(compat_ptr(data32.available_modes),
+ &data->available_modes) ||
+ put_user(data32.custom_data_size,
+ &data->custom_data_size) ||
+ put_user(compat_ptr(data32.custom_data),
+ &data->custom_data))
+ return -EFAULT;
+
+ ret = adf_file_ioctl(file, ADF_GET_DEVICE_DATA32, (unsigned long)data);
+ if (ret < 0)
+ return ret;
+
+ if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+ copy_in_user(&arg->type, &data->type,
+ sizeof(arg->type)) ||
+ copy_in_user(&arg->id, &data->id, sizeof(arg->id)) ||
+ copy_in_user(&arg->flags, &data->flags,
+ sizeof(arg->flags)) ||
+ copy_in_user(&arg->dpms_state, &data->dpms_state,
+ sizeof(arg->dpms_state)) ||
+ copy_in_user(&arg->hotplug_detect,
+ &data->hotplug_detect,
+ sizeof(arg->hotplug_detect)) ||
+ copy_in_user(&arg->width_mm, &data->width_mm,
+ sizeof(arg->width_mm)) ||
+ copy_in_user(&arg->height_mm, &data->height_mm,
+ sizeof(arg->height_mm)) ||
+ copy_in_user(&arg->current_mode, &data->current_mode,
+ sizeof(arg->current_mode)) ||
+ copy_in_user(&arg->n_available_modes,
+ &data->n_available_modes,
+ sizeof(arg->n_available_modes)) ||
+ copy_in_user(&arg->custom_data_size,
+ &data->custom_data_size,
+ sizeof(arg->custom_data_size)))
+ return -EFAULT;
+
+ return 0;
+}
+
+long adf_compat_get_overlay_engine_data(struct file *file,
+ struct adf_overlay_engine_data32 __user *arg)
+{
+ struct adf_overlay_engine_data32 data32;
+ struct adf_overlay_engine_data __user *data;
+ int ret;
+
+ if (copy_from_user(&data32, arg, sizeof(data32)))
+ return -EFAULT;
+
+ data = compat_alloc_user_space(sizeof(*data));
+ if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+ return -EFAULT;
+
+ if (put_user(data32.n_supported_formats, &data->n_supported_formats) ||
+ put_user(compat_ptr(data32.supported_formats),
+ &data->supported_formats) ||
+ put_user(data32.custom_data_size,
+ &data->custom_data_size) ||
+ put_user(compat_ptr(data32.custom_data),
+ &data->custom_data))
+ return -EFAULT;
+
+ ret = adf_file_ioctl(file, ADF_GET_OVERLAY_ENGINE_DATA,
+ (unsigned long)data);
+ if (ret < 0)
+ return ret;
+
+ if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+ copy_in_user(&arg->n_supported_formats,
+ &data->n_supported_formats,
+ sizeof(arg->n_supported_formats)) ||
+ copy_in_user(&arg->custom_data_size,
+ &data->custom_data_size,
+ sizeof(arg->custom_data_size)))
+ return -EFAULT;
+
+ return 0;
+}
+
+long adf_file_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case ADF_POST_CONFIG32:
+ return adf_compat_post_config(file, compat_ptr(arg));
+
+ case ADF_GET_DEVICE_DATA32:
+ return adf_compat_get_device_data(file, compat_ptr(arg));
+
+ case ADF_GET_INTERFACE_DATA32:
+ return adf_compat_get_interface_data(file, compat_ptr(arg));
+
+ case ADF_GET_OVERLAY_ENGINE_DATA32:
+ return adf_compat_get_overlay_engine_data(file,
+ compat_ptr(arg));
+
+ default:
+ return adf_file_ioctl(file, cmd, arg);
+ }
+}
diff --git a/drivers/video/adf/adf_fops32.h b/drivers/video/adf/adf_fops32.h
new file mode 100644
index 000000000000..18c673dc5e2e
--- /dev/null
+++ b/drivers/video/adf/adf_fops32.h
@@ -0,0 +1,78 @@
+#ifndef __VIDEO_ADF_ADF_FOPS32_H
+#define __VIDEO_ADF_ADF_FOPS32_H
+
+#include <linux/compat.h>
+#include <linux/ioctl.h>
+
+#include <video/adf.h>
+
+#define ADF_POST_CONFIG32 \
+ _IOW('D', 2, struct adf_post_config32)
+#define ADF_GET_DEVICE_DATA32 \
+ _IOR('D', 4, struct adf_device_data32)
+#define ADF_GET_INTERFACE_DATA32 \
+ _IOR('D', 5, struct adf_interface_data32)
+#define ADF_GET_OVERLAY_ENGINE_DATA32 \
+ _IOR('D', 6, struct adf_overlay_engine_data32)
+
+struct adf_post_config32 {
+ compat_size_t n_interfaces;
+ compat_uptr_t interfaces;
+
+ compat_size_t n_bufs;
+ compat_uptr_t bufs;
+
+ compat_size_t custom_data_size;
+ compat_uptr_t custom_data;
+
+ __s64 complete_fence;
+};
+
+struct adf_device_data32 {
+ char name[ADF_NAME_LEN];
+
+ compat_size_t n_attachments;
+ compat_uptr_t attachments;
+
+ compat_size_t n_allowed_attachments;
+ compat_uptr_t allowed_attachments;
+
+ compat_size_t custom_data_size;
+ compat_uptr_t custom_data;
+};
+
+struct adf_interface_data32 {
+ char name[ADF_NAME_LEN];
+
+ __u8 type;
+ __u32 id;
+ /* e.g. type=ADF_INTF_TYPE_DSI, id=1 => DSI.1 */
+ __u32 flags;
+
+ __u8 dpms_state;
+ __u8 hotplug_detect;
+ __u16 width_mm;
+ __u16 height_mm;
+
+ struct drm_mode_modeinfo current_mode;
+ compat_size_t n_available_modes;
+ compat_uptr_t available_modes;
+
+ compat_size_t custom_data_size;
+ compat_uptr_t custom_data;
+};
+
+struct adf_overlay_engine_data32 {
+ char name[ADF_NAME_LEN];
+
+ compat_size_t n_supported_formats;
+ compat_uptr_t supported_formats;
+
+ compat_size_t custom_data_size;
+ compat_uptr_t custom_data;
+};
+
+long adf_file_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+
+#endif /* __VIDEO_ADF_ADF_FOPS32_H */
diff --git a/drivers/video/adf/adf_format.c b/drivers/video/adf/adf_format.c
new file mode 100644
index 000000000000..e3f22c7c85d9
--- /dev/null
+++ b/drivers/video/adf/adf_format.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ * modified from drivers/gpu/drm/drm_crtc.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <drm/drm_fourcc.h>
+#include <video/adf_format.h>
+
+bool adf_format_is_standard(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_XBGR4444:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_BGRX4444:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ABGR4444:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_AYUV:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(adf_format_is_standard);
+
+bool adf_format_is_rgb(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ return true;
+
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(adf_format_is_rgb);
+
+u8 adf_format_num_planes(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 3;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(adf_format_num_planes);
+
+u8 adf_format_bpp(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ return 8;
+
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ return 16;
+
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ return 24;
+
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ return 32;
+
+ default:
+ pr_debug("%s: unsupported pixel format %u\n", __func__, format);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(adf_format_bpp);
+
+u8 adf_format_plane_cpp(u32 format, int plane)
+{
+ if (plane >= adf_format_num_planes(format))
+ return 0;
+
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ return 2;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return plane ? 2 : 1;
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 1;
+ default:
+ return adf_format_bpp(format) / 8;
+ }
+}
+EXPORT_SYMBOL(adf_format_plane_cpp);
+
+u8 adf_format_horz_chroma_subsampling(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(adf_format_horz_chroma_subsampling);
+
+u8 adf_format_vert_chroma_subsampling(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(adf_format_vert_chroma_subsampling);
diff --git a/drivers/video/adf/adf_memblock.c b/drivers/video/adf/adf_memblock.c
new file mode 100644
index 000000000000..3c99f27388db
--- /dev/null
+++ b/drivers/video/adf/adf_memblock.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+struct adf_memblock_pdata {
+ phys_addr_t base;
+};
+
+static struct sg_table *adf_memblock_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct adf_memblock_pdata *pdata = attach->dmabuf->priv;
+ unsigned long pfn = PFN_DOWN(pdata->base);
+ struct page *page = pfn_to_page(pfn);
+ struct sg_table *table;
+ int ret;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret < 0)
+ goto err;
+
+ sg_set_page(table->sgl, page, attach->dmabuf->size, 0);
+ return table;
+
+err:
+ kfree(table);
+ return ERR_PTR(ret);
+}
+
+static void adf_memblock_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *table, enum dma_data_direction direction)
+{
+ sg_free_table(table);
+}
+
+static void __init_memblock adf_memblock_release(struct dma_buf *buf)
+{
+ struct adf_memblock_pdata *pdata = buf->priv;
+ int err = memblock_free(pdata->base, buf->size);
+
+ if (err < 0)
+ pr_warn("%s: freeing memblock failed: %d\n", __func__, err);
+ kfree(pdata);
+}
+
+static void *adf_memblock_do_kmap(struct dma_buf *buf, unsigned long pgoffset,
+ bool atomic)
+{
+ struct adf_memblock_pdata *pdata = buf->priv;
+ unsigned long pfn = PFN_DOWN(pdata->base) + pgoffset;
+ struct page *page = pfn_to_page(pfn);
+
+ if (atomic)
+ return kmap_atomic(page);
+ else
+ return kmap(page);
+}
+
+static void *adf_memblock_kmap_atomic(struct dma_buf *buf,
+ unsigned long pgoffset)
+{
+ return adf_memblock_do_kmap(buf, pgoffset, true);
+}
+
+static void adf_memblock_kunmap_atomic(struct dma_buf *buf,
+ unsigned long pgoffset, void *vaddr)
+{
+ kunmap_atomic(vaddr);
+}
+
+static void *adf_memblock_kmap(struct dma_buf *buf, unsigned long pgoffset)
+{
+ return adf_memblock_do_kmap(buf, pgoffset, false);
+}
+
+static void adf_memblock_kunmap(struct dma_buf *buf, unsigned long pgoffset,
+ void *vaddr)
+{
+ kunmap(vaddr);
+}
+
+static int adf_memblock_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+ struct adf_memblock_pdata *pdata = buf->priv;
+
+ return remap_pfn_range(vma, vma->vm_start, PFN_DOWN(pdata->base),
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+struct dma_buf_ops adf_memblock_ops = {
+ .map_dma_buf = adf_memblock_map,
+ .unmap_dma_buf = adf_memblock_unmap,
+ .release = adf_memblock_release,
+ .kmap_atomic = adf_memblock_kmap_atomic,
+ .kunmap_atomic = adf_memblock_kunmap_atomic,
+ .kmap = adf_memblock_kmap,
+ .kunmap = adf_memblock_kunmap,
+ .mmap = adf_memblock_mmap,
+};
+
+/**
+ * adf_memblock_export - export a memblock reserved area as a dma-buf
+ *
+ * @base: base physical address
+ * @size: memblock size
+ * @flags: mode flags for the dma-buf's file
+ *
+ * @base and @size must be page-aligned.
+ *
+ * Returns a dma-buf on success or ERR_PTR(-errno) on failure.
+ */
+struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags)
+{
+ struct adf_memblock_pdata *pdata;
+ struct dma_buf *buf;
+
+ if (PAGE_ALIGN(base) != base || PAGE_ALIGN(size) != size)
+ return ERR_PTR(-EINVAL);
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->base = base;
+ buf = dma_buf_export(pdata, &adf_memblock_ops, size, flags);
+ if (IS_ERR(buf))
+ kfree(pdata);
+
+ return buf;
+}
diff --git a/drivers/video/adf/adf_sysfs.c b/drivers/video/adf/adf_sysfs.c
new file mode 100644
index 000000000000..8c659c71ffa8
--- /dev/null
+++ b/drivers/video/adf/adf_sysfs.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <video/adf_client.h>
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+static struct class *adf_class;
+static int adf_major;
+static DEFINE_IDR(adf_minors);
+
+#define dev_to_adf_interface(p) \
+ adf_obj_to_interface(container_of(p, struct adf_obj, dev))
+
+static ssize_t dpms_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adf_interface *intf = dev_to_adf_interface(dev);
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ adf_interface_dpms_state(intf));
+}
+
+static ssize_t dpms_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct adf_interface *intf = dev_to_adf_interface(dev);
+ u8 dpms_state;
+ int err;
+
+ err = kstrtou8(buf, 0, &dpms_state);
+ if (err < 0)
+ return err;
+
+ err = adf_interface_blank(intf, dpms_state);
+ if (err < 0)
+ return err;
+
+ return count;
+}
+
+static ssize_t current_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adf_interface *intf = dev_to_adf_interface(dev);
+ struct drm_mode_modeinfo mode;
+
+ adf_interface_current_mode(intf, &mode);
+
+ if (mode.name[0]) {
+ return scnprintf(buf, PAGE_SIZE, "%s\n", mode.name);
+ } else {
+ bool interlaced = !!(mode.flags & DRM_MODE_FLAG_INTERLACE);
+ return scnprintf(buf, PAGE_SIZE, "%ux%u%s\n", mode.hdisplay,
+ mode.vdisplay, interlaced ? "i" : "");
+ }
+}
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adf_interface *intf = dev_to_adf_interface(dev);
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ adf_interface_type_str(intf));
+}
+
+static ssize_t vsync_timestamp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adf_interface *intf = dev_to_adf_interface(dev);
+ ktime_t timestamp;
+ unsigned long flags;
+
+ read_lock_irqsave(&intf->vsync_lock, flags);
+ memcpy(&timestamp, &intf->vsync_timestamp, sizeof(timestamp));
+ read_unlock_irqrestore(&intf->vsync_lock, flags);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", ktime_to_ns(timestamp));
+}
+
+static ssize_t hotplug_detect_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adf_interface *intf = dev_to_adf_interface(dev);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", intf->hotplug_detect);
+}
+
+static struct device_attribute adf_interface_attrs[] = {
+ __ATTR(dpms_state, S_IRUGO|S_IWUSR, dpms_state_show, dpms_state_store),
+ __ATTR_RO(current_mode),
+ __ATTR_RO(hotplug_detect),
+ __ATTR_RO(type),
+ __ATTR_RO(vsync_timestamp),
+};
+
+int adf_obj_sysfs_init(struct adf_obj *obj, struct device *parent)
+{
+ int ret = idr_alloc(&adf_minors, obj, 0, 0, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("%s: allocating adf minor failed: %d\n", __func__,
+ ret);
+ return ret;
+ }
+
+ obj->minor = ret;
+ obj->dev.parent = parent;
+ obj->dev.class = adf_class;
+ obj->dev.devt = MKDEV(adf_major, obj->minor);
+
+ ret = device_register(&obj->dev);
+ if (ret < 0) {
+ pr_err("%s: registering adf object failed: %d\n", __func__,
+ ret);
+ goto err_device_register;
+ }
+
+ return 0;
+
+err_device_register:
+ idr_remove(&adf_minors, obj->minor);
+ return ret;
+}
+
+static char *adf_device_devnode(struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid)
+{
+ struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+ return kasprintf(GFP_KERNEL, "adf%d", obj->id);
+}
+
+static char *adf_interface_devnode(struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid)
+{
+ struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+ struct adf_interface *intf = adf_obj_to_interface(obj);
+ struct adf_device *parent = adf_interface_parent(intf);
+ return kasprintf(GFP_KERNEL, "adf-interface%d.%d",
+ parent->base.id, intf->base.id);
+}
+
+static char *adf_overlay_engine_devnode(struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid)
+{
+ struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+ struct adf_overlay_engine *eng = adf_obj_to_overlay_engine(obj);
+ struct adf_device *parent = adf_overlay_engine_parent(eng);
+ return kasprintf(GFP_KERNEL, "adf-overlay-engine%d.%d",
+ parent->base.id, eng->base.id);
+}
+
+static void adf_noop_release(struct device *dev)
+{
+}
+
+static struct device_type adf_device_type = {
+ .name = "adf_device",
+ .devnode = adf_device_devnode,
+ .release = adf_noop_release,
+};
+
+static struct device_type adf_interface_type = {
+ .name = "adf_interface",
+ .devnode = adf_interface_devnode,
+ .release = adf_noop_release,
+};
+
+static struct device_type adf_overlay_engine_type = {
+ .name = "adf_overlay_engine",
+ .devnode = adf_overlay_engine_devnode,
+ .release = adf_noop_release,
+};
+
+int adf_device_sysfs_init(struct adf_device *dev)
+{
+ dev->base.dev.type = &adf_device_type;
+ dev_set_name(&dev->base.dev, "%s", dev->base.name);
+ return adf_obj_sysfs_init(&dev->base, dev->dev);
+}
+
+int adf_interface_sysfs_init(struct adf_interface *intf)
+{
+ struct adf_device *parent = adf_interface_parent(intf);
+ size_t i, j;
+ int ret;
+
+ intf->base.dev.type = &adf_interface_type;
+ dev_set_name(&intf->base.dev, "%s-interface%d", parent->base.name,
+ intf->base.id);
+
+ ret = adf_obj_sysfs_init(&intf->base, &parent->base.dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++) {
+ ret = device_create_file(&intf->base.dev,
+ &adf_interface_attrs[i]);
+ if (ret < 0) {
+ dev_err(&intf->base.dev, "creating sysfs attribute %s failed: %d\n",
+ adf_interface_attrs[i].attr.name, ret);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ for (j = 0; j < i; j++)
+ device_remove_file(&intf->base.dev, &adf_interface_attrs[j]);
+ return ret;
+}
+
+int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng)
+{
+ struct adf_device *parent = adf_overlay_engine_parent(eng);
+
+ eng->base.dev.type = &adf_overlay_engine_type;
+ dev_set_name(&eng->base.dev, "%s-overlay-engine%d", parent->base.name,
+ eng->base.id);
+
+ return adf_obj_sysfs_init(&eng->base, &parent->base.dev);
+}
+
+struct adf_obj *adf_obj_sysfs_find(int minor)
+{
+ return idr_find(&adf_minors, minor);
+}
+
+void adf_obj_sysfs_destroy(struct adf_obj *obj)
+{
+ idr_remove(&adf_minors, obj->minor);
+ device_unregister(&obj->dev);
+}
+
+void adf_device_sysfs_destroy(struct adf_device *dev)
+{
+ adf_obj_sysfs_destroy(&dev->base);
+}
+
+void adf_interface_sysfs_destroy(struct adf_interface *intf)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++)
+ device_remove_file(&intf->base.dev, &adf_interface_attrs[i]);
+ adf_obj_sysfs_destroy(&intf->base);
+}
+
+void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng)
+{
+ adf_obj_sysfs_destroy(&eng->base);
+}
+
+int adf_sysfs_init(void)
+{
+ struct class *class;
+ int ret;
+
+ class = class_create(THIS_MODULE, "adf");
+ if (IS_ERR(class)) {
+ ret = PTR_ERR(class);
+ pr_err("%s: creating class failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = register_chrdev(0, "adf", &adf_fops);
+ if (ret < 0) {
+ pr_err("%s: registering device failed: %d\n", __func__, ret);
+ goto err_chrdev;
+ }
+
+ adf_class = class;
+ adf_major = ret;
+ return 0;
+
+err_chrdev:
+ class_destroy(adf_class);
+ return ret;
+}
+
+void adf_sysfs_destroy(void)
+{
+ idr_destroy(&adf_minors);
+ class_destroy(adf_class);
+}
diff --git a/drivers/video/adf/adf_sysfs.h b/drivers/video/adf/adf_sysfs.h
new file mode 100644
index 000000000000..0613ac364f8d
--- /dev/null
+++ b/drivers/video/adf/adf_sysfs.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_SYSFS_H
+#define __VIDEO_ADF_ADF_SYSFS_H
+
+struct adf_device;
+struct adf_interface;
+struct adf_overlay_engine;
+
+int adf_device_sysfs_init(struct adf_device *dev);
+void adf_device_sysfs_destroy(struct adf_device *dev);
+int adf_interface_sysfs_init(struct adf_interface *intf);
+void adf_interface_sysfs_destroy(struct adf_interface *intf);
+int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng);
+void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng);
+struct adf_obj *adf_obj_sysfs_find(int minor);
+
+int adf_sysfs_init(void);
+void adf_sysfs_destroy(void);
+
+#endif /* __VIDEO_ADF_ADF_SYSFS_H */
diff --git a/drivers/video/adf/adf_trace.h b/drivers/video/adf/adf_trace.h
new file mode 100644
index 000000000000..3cb2a84d728c
--- /dev/null
+++ b/drivers/video/adf/adf_trace.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM adf
+
+#if !defined(__VIDEO_ADF_ADF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __VIDEO_ADF_ADF_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <video/adf.h>
+
+TRACE_EVENT(adf_event,
+ TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+ TP_ARGS(obj, type),
+
+ TP_STRUCT__entry(
+ __string(name, obj->name)
+ __field(enum adf_event_type, type)
+ __array(char, type_str, 32)
+ ),
+ TP_fast_assign(
+ __assign_str(name, obj->name);
+ __entry->type = type;
+ strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+ sizeof(__entry->type_str));
+ ),
+ TP_printk("obj=%s type=%u (%s)",
+ __get_str(name),
+ __entry->type,
+ __entry->type_str)
+);
+
+TRACE_EVENT(adf_event_enable,
+ TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+ TP_ARGS(obj, type),
+
+ TP_STRUCT__entry(
+ __string(name, obj->name)
+ __field(enum adf_event_type, type)
+ __array(char, type_str, 32)
+ ),
+ TP_fast_assign(
+ __assign_str(name, obj->name);
+ __entry->type = type;
+ strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+ sizeof(__entry->type_str));
+ ),
+ TP_printk("obj=%s type=%u (%s)",
+ __get_str(name),
+ __entry->type,
+ __entry->type_str)
+);
+
+TRACE_EVENT(adf_event_disable,
+ TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+ TP_ARGS(obj, type),
+
+ TP_STRUCT__entry(
+ __string(name, obj->name)
+ __field(enum adf_event_type, type)
+ __array(char, type_str, 32)
+ ),
+ TP_fast_assign(
+ __assign_str(name, obj->name);
+ __entry->type = type;
+ strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+ sizeof(__entry->type_str));
+ ),
+ TP_printk("obj=%s type=%u (%s)",
+ __get_str(name),
+ __entry->type,
+ __entry->type_str)
+);
+
+#endif /* __VIDEO_ADF_ADF_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE adf_trace
+#include <trace/define_trace.h>