summaryrefslogtreecommitdiffstats
path: root/driver
diff options
context:
space:
mode:
Diffstat (limited to 'driver')
-rw-r--r--driver/aim-cdev/cdev.c550
-rw-r--r--driver/aim-network/networking.c575
-rw-r--r--driver/aim-network/networking.h21
-rw-r--r--driver/aim-sound/sound.c768
-rw-r--r--driver/aim-v4l2/video.c616
-rw-r--r--driver/hdm-dim2/dim2_errors.h57
-rw-r--r--driver/hdm-dim2/dim2_hal.c890
-rw-r--r--driver/hdm-dim2/dim2_hal.h107
-rw-r--r--driver/hdm-dim2/dim2_hdm.c913
-rw-r--r--driver/hdm-dim2/dim2_hdm.h27
-rw-r--r--driver/hdm-dim2/dim2_reg.h159
-rw-r--r--driver/hdm-dim2/dim2_sysfs.c115
-rw-r--r--driver/hdm-dim2/dim2_sysfs.h36
-rw-r--r--driver/hdm-i2c/hdm_i2c.c430
-rw-r--r--driver/hdm-usb/hdm_usb.c1407
-rw-r--r--driver/mostcore/core.c1961
-rw-r--r--driver/mostcore/mostcore.h320
17 files changed, 8952 insertions, 0 deletions
diff --git a/driver/aim-cdev/cdev.c b/driver/aim-cdev/cdev.c
new file mode 100644
index 0000000..de4f76a
--- /dev/null
+++ b/driver/aim-cdev/cdev.c
@@ -0,0 +1,550 @@
+/*
+ * cdev.c - Application interfacing module for character devices
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/kfifo.h>
+#include <linux/uaccess.h>
+#include <linux/idr.h>
+#include "mostcore.h"
+
+static dev_t aim_devno;
+static struct class *aim_class;
+static struct ida minor_id;
+static unsigned int major;
+static struct most_aim cdev_aim;
+
+struct aim_channel {
+ wait_queue_head_t wq;
+ spinlock_t unlink; /* synchronization lock to unlink channels */
+ struct cdev cdev;
+ struct device *dev;
+ struct mutex io_mutex;
+ struct most_interface *iface;
+ struct most_channel_config *cfg;
+ unsigned int channel_id;
+ dev_t devno;
+ size_t mbo_offs;
+ DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
+ int access_ref;
+ struct list_head list;
+};
+
+#define to_channel(d) container_of(d, struct aim_channel, cdev)
+static struct list_head channel_list;
+static spinlock_t ch_list_lock;
+
+static inline bool ch_has_mbo(struct aim_channel *c)
+{
+ return channel_has_mbo(c->iface, c->channel_id, &cdev_aim) > 0;
+}
+
+static inline bool ch_get_mbo(struct aim_channel *c, struct mbo **mbo)
+{
+ *mbo = most_get_mbo(c->iface, c->channel_id, &cdev_aim);
+ return *mbo;
+}
+
+static struct aim_channel *get_channel(struct most_interface *iface, int id)
+{
+ struct aim_channel *c, *tmp;
+ unsigned long flags;
+ int found_channel = 0;
+
+ spin_lock_irqsave(&ch_list_lock, flags);
+ list_for_each_entry_safe(c, tmp, &channel_list, list) {
+ if ((c->iface == iface) && (c->channel_id == id)) {
+ found_channel = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ch_list_lock, flags);
+ if (!found_channel)
+ return NULL;
+ return c;
+}
+
+static void stop_channel(struct aim_channel *c)
+{
+ struct mbo *mbo;
+
+ while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
+ most_put_mbo(mbo);
+ most_stop_channel(c->iface, c->channel_id, &cdev_aim);
+}
+
+static void destroy_cdev(struct aim_channel *c)
+{
+ unsigned long flags;
+
+ device_destroy(aim_class, c->devno);
+ cdev_del(&c->cdev);
+ kfifo_free(&c->fifo);
+ spin_lock_irqsave(&ch_list_lock, flags);
+ list_del(&c->list);
+ spin_unlock_irqrestore(&ch_list_lock, flags);
+ ida_simple_remove(&minor_id, MINOR(c->devno));
+}
+
+/**
+ * aim_open - implements the syscall to open the device
+ * @inode: inode pointer
+ * @filp: file pointer
+ *
+ * This stores the channel pointer in the private data field of
+ * the file structure and activates the channel within the core.
+ */
+static int aim_open(struct inode *inode, struct file *filp)
+{
+ struct aim_channel *c;
+ int ret;
+
+ c = to_channel(inode->i_cdev);
+ filp->private_data = c;
+
+ if (((c->cfg->direction == MOST_CH_RX) &&
+ ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
+ ((c->cfg->direction == MOST_CH_TX) &&
+ ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
+ pr_info("WARN: Access flags mismatch\n");
+ return -EACCES;
+ }
+
+ mutex_lock(&c->io_mutex);
+ if (!c->dev) {
+ pr_info("WARN: Device is destroyed\n");
+ mutex_unlock(&c->io_mutex);
+ return -EBUSY;
+ }
+
+ if (c->access_ref) {
+ pr_info("WARN: Device is busy\n");
+ mutex_unlock(&c->io_mutex);
+ return -EBUSY;
+ }
+
+ c->mbo_offs = 0;
+ ret = most_start_channel(c->iface, c->channel_id, &cdev_aim);
+ if (!ret)
+ c->access_ref = 1;
+ mutex_unlock(&c->io_mutex);
+ return ret;
+}
+
+/**
+ * aim_close - implements the syscall to close the device
+ * @inode: inode pointer
+ * @filp: file pointer
+ *
+ * This stops the channel within the core.
+ */
+static int aim_close(struct inode *inode, struct file *filp)
+{
+ struct aim_channel *c = to_channel(inode->i_cdev);
+
+ mutex_lock(&c->io_mutex);
+ spin_lock(&c->unlink);
+ c->access_ref = 0;
+ spin_unlock(&c->unlink);
+ if (c->dev) {
+ stop_channel(c);
+ mutex_unlock(&c->io_mutex);
+ } else {
+ destroy_cdev(c);
+ mutex_unlock(&c->io_mutex);
+ kfree(c);
+ }
+ return 0;
+}
+
+/**
+ * aim_write - implements the syscall to write to the device
+ * @filp: file pointer
+ * @buf: pointer to user buffer
+ * @count: number of bytes to write
+ * @offset: offset from where to start writing
+ */
+static ssize_t aim_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ int ret;
+ size_t actual_len;
+ size_t max_len;
+ struct mbo *mbo = NULL;
+ struct aim_channel *c = filp->private_data;
+
+ mutex_lock(&c->io_mutex);
+ while (c->dev && !ch_get_mbo(c, &mbo)) {
+ mutex_unlock(&c->io_mutex);
+
+ if ((filp->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+ if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
+ return -ERESTARTSYS;
+ mutex_lock(&c->io_mutex);
+ }
+
+ if (unlikely(!c->dev)) {
+ ret = -EPIPE;
+ goto unlock;
+ }
+
+ max_len = c->cfg->buffer_size;
+ actual_len = min(count, max_len);
+ mbo->buffer_length = actual_len;
+
+ if (copy_from_user(mbo->virt_address, buf, mbo->buffer_length)) {
+ ret = -EFAULT;
+ goto put_mbo;
+ }
+
+ ret = most_submit_mbo(mbo);
+ if (ret)
+ goto put_mbo;
+
+ mutex_unlock(&c->io_mutex);
+ return actual_len;
+put_mbo:
+ most_put_mbo(mbo);
+unlock:
+ mutex_unlock(&c->io_mutex);
+ return ret;
+}
+
+/**
+ * aim_read - implements the syscall to read from the device
+ * @filp: file pointer
+ * @buf: pointer to user buffer
+ * @count: number of bytes to read
+ * @offset: offset from where to start reading
+ */
+static ssize_t
+aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
+{
+ size_t to_copy, not_copied, copied;
+ struct mbo *mbo;
+ struct aim_channel *c = filp->private_data;
+
+ mutex_lock(&c->io_mutex);
+ while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
+ mutex_unlock(&c->io_mutex);
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ if (wait_event_interruptible(c->wq,
+ (!kfifo_is_empty(&c->fifo) ||
+ (!c->dev))))
+ return -ERESTARTSYS;
+ mutex_lock(&c->io_mutex);
+ }
+
+ /* make sure we don't submit to gone devices */
+ if (unlikely(!c->dev)) {
+ mutex_unlock(&c->io_mutex);
+ return -EIO;
+ }
+
+ to_copy = min_t(size_t,
+ count,
+ mbo->processed_length - c->mbo_offs);
+
+ not_copied = copy_to_user(buf,
+ mbo->virt_address + c->mbo_offs,
+ to_copy);
+
+ copied = to_copy - not_copied;
+
+ c->mbo_offs += copied;
+ if (c->mbo_offs >= mbo->processed_length) {
+ kfifo_skip(&c->fifo);
+ most_put_mbo(mbo);
+ c->mbo_offs = 0;
+ }
+ mutex_unlock(&c->io_mutex);
+ return copied;
+}
+
+static unsigned int aim_poll(struct file *filp, poll_table *wait)
+{
+ struct aim_channel *c = filp->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &c->wq, wait);
+
+ if (c->cfg->direction == MOST_CH_RX) {
+ if (!kfifo_is_empty(&c->fifo))
+ mask |= POLLIN | POLLRDNORM;
+ } else {
+ if (ch_has_mbo(c))
+ mask |= POLLOUT | POLLWRNORM;
+ }
+ return mask;
+}
+
+/**
+ * Initialization of struct file_operations
+ */
+static const struct file_operations channel_fops = {
+ .owner = THIS_MODULE,
+ .read = aim_read,
+ .write = aim_write,
+ .open = aim_open,
+ .release = aim_close,
+ .poll = aim_poll,
+};
+
+/**
+ * aim_disconnect_channel - disconnect a channel
+ * @iface: pointer to interface instance
+ * @channel_id: channel index
+ *
+ * This frees allocated memory and removes the cdev that represents this
+ * channel in user space.
+ */
+static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
+{
+ struct aim_channel *c;
+
+ if (!iface) {
+ pr_info("Bad interface pointer\n");
+ return -EINVAL;
+ }
+
+ c = get_channel(iface, channel_id);
+ if (!c)
+ return -ENXIO;
+
+ mutex_lock(&c->io_mutex);
+ spin_lock(&c->unlink);
+ c->dev = NULL;
+ spin_unlock(&c->unlink);
+ if (c->access_ref) {
+ stop_channel(c);
+ wake_up_interruptible(&c->wq);
+ mutex_unlock(&c->io_mutex);
+ } else {
+ destroy_cdev(c);
+ mutex_unlock(&c->io_mutex);
+ kfree(c);
+ }
+ return 0;
+}
+
+/**
+ * aim_rx_completion - completion handler for rx channels
+ * @mbo: pointer to buffer object that has completed
+ *
+ * This searches for the channel linked to this MBO and stores it in the local
+ * fifo buffer.
+ */
+static int aim_rx_completion(struct mbo *mbo)
+{
+ struct aim_channel *c;
+
+ if (!mbo)
+ return -EINVAL;
+
+ c = get_channel(mbo->ifp, mbo->hdm_channel_id);
+ if (!c)
+ return -ENXIO;
+
+ spin_lock(&c->unlink);
+ if (!c->access_ref || !c->dev) {
+ spin_unlock(&c->unlink);
+ return -EFAULT;
+ }
+ kfifo_in(&c->fifo, &mbo, 1);
+ spin_unlock(&c->unlink);
+#ifdef DEBUG_MESG
+ if (kfifo_is_full(&c->fifo))
+ pr_info("WARN: Fifo is full\n");
+#endif
+ wake_up_interruptible(&c->wq);
+ return 0;
+}
+
+/**
+ * aim_tx_completion - completion handler for tx channels
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ *
+ * This wakes sleeping processes in the wait-queue.
+ */
+static int aim_tx_completion(struct most_interface *iface, int channel_id)
+{
+ struct aim_channel *c;
+
+ if (!iface) {
+ pr_info("Bad interface pointer\n");
+ return -EINVAL;
+ }
+ if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
+ pr_info("Channel ID out of range\n");
+ return -EINVAL;
+ }
+
+ c = get_channel(iface, channel_id);
+ if (!c)
+ return -ENXIO;
+ wake_up_interruptible(&c->wq);
+ return 0;
+}
+
+/**
+ * aim_probe - probe function of the driver module
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ * @cfg: pointer to actual channel configuration
+ * @parent: pointer to kobject (needed for sysfs hook-up)
+ * @name: name of the device to be created
+ *
+ * This allocates achannel object and creates the device node in /dev
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int aim_probe(struct most_interface *iface, int channel_id,
+ struct most_channel_config *cfg,
+ struct kobject *parent, char *name)
+{
+ struct aim_channel *c;
+ unsigned long cl_flags;
+ int retval;
+ int current_minor;
+
+ if ((!iface) || (!cfg) || (!parent) || (!name)) {
+ pr_info("Probing AIM with bad arguments");
+ return -EINVAL;
+ }
+ c = get_channel(iface, channel_id);
+ if (c)
+ return -EEXIST;
+
+ current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
+ if (current_minor < 0)
+ return current_minor;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
+ retval = -ENOMEM;
+ goto error_alloc_channel;
+ }
+
+ c->devno = MKDEV(major, current_minor);
+ cdev_init(&c->cdev, &channel_fops);
+ c->cdev.owner = THIS_MODULE;
+ cdev_add(&c->cdev, c->devno, 1);
+ c->iface = iface;
+ c->cfg = cfg;
+ c->channel_id = channel_id;
+ c->access_ref = 0;
+ spin_lock_init(&c->unlink);
+ INIT_KFIFO(c->fifo);
+ retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
+ if (retval) {
+ pr_info("failed to alloc channel kfifo");
+ goto error_alloc_kfifo;
+ }
+ init_waitqueue_head(&c->wq);
+ mutex_init(&c->io_mutex);
+ spin_lock_irqsave(&ch_list_lock, cl_flags);
+ list_add_tail(&c->list, &channel_list);
+ spin_unlock_irqrestore(&ch_list_lock, cl_flags);
+ c->dev = device_create(aim_class,
+ NULL,
+ c->devno,
+ NULL,
+ "%s", name);
+
+ if (IS_ERR(c->dev)) {
+ retval = PTR_ERR(c->dev);
+ pr_info("failed to create new device node %s\n", name);
+ goto error_create_device;
+ }
+ kobject_uevent(&c->dev->kobj, KOBJ_ADD);
+ return 0;
+
+error_create_device:
+ kfifo_free(&c->fifo);
+ list_del(&c->list);
+error_alloc_kfifo:
+ cdev_del(&c->cdev);
+ kfree(c);
+error_alloc_channel:
+ ida_simple_remove(&minor_id, current_minor);
+ return retval;
+}
+
+static struct most_aim cdev_aim = {
+ .name = "cdev",
+ .probe_channel = aim_probe,
+ .disconnect_channel = aim_disconnect_channel,
+ .rx_completion = aim_rx_completion,
+ .tx_completion = aim_tx_completion,
+};
+
+static int __init mod_init(void)
+{
+ pr_info("init()\n");
+
+ INIT_LIST_HEAD(&channel_list);
+ spin_lock_init(&ch_list_lock);
+ ida_init(&minor_id);
+
+ if (alloc_chrdev_region(&aim_devno, 0, 50, "cdev") < 0)
+ return -EIO;
+ major = MAJOR(aim_devno);
+
+ aim_class = class_create(THIS_MODULE, "most_cdev_aim");
+ if (IS_ERR(aim_class)) {
+ pr_err("no udev support\n");
+ goto free_cdev;
+ }
+
+ if (most_register_aim(&cdev_aim))
+ goto dest_class;
+ return 0;
+
+dest_class:
+ class_destroy(aim_class);
+free_cdev:
+ unregister_chrdev_region(aim_devno, 1);
+ return -EIO;
+}
+
+static void __exit mod_exit(void)
+{
+ struct aim_channel *c, *tmp;
+
+ pr_info("exit module\n");
+
+ most_deregister_aim(&cdev_aim);
+
+ list_for_each_entry_safe(c, tmp, &channel_list, list) {
+ destroy_cdev(c);
+ kfree(c);
+ }
+ class_destroy(aim_class);
+ unregister_chrdev_region(aim_devno, 1);
+ ida_destroy(&minor_id);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("character device AIM for mostcore");
diff --git a/driver/aim-network/networking.c b/driver/aim-network/networking.c
new file mode 100644
index 0000000..2f42de4
--- /dev/null
+++ b/driver/aim-network/networking.c
@@ -0,0 +1,575 @@
+/*
+ * Networking AIM - Networking Application Interface Module for MostCore
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/kobject.h>
+#include "mostcore.h"
+#include "networking.h"
+
+#define MEP_HDR_LEN 8
+#define MDP_HDR_LEN 16
+#define MAMAC_DATA_LEN (1024 - MDP_HDR_LEN)
+
+#define PMHL 5
+
+#define PMS_TELID_UNSEGM_MAMAC 0x0A
+#define PMS_FIFONO_MDP 0x01
+#define PMS_FIFONO_MEP 0x04
+#define PMS_MSGTYPE_DATA 0x04
+#define PMS_DEF_PRIO 0
+#define MEP_DEF_RETRY 15
+
+#define PMS_FIFONO_MASK 0x07
+#define PMS_FIFONO_SHIFT 3
+#define PMS_RETRY_SHIFT 4
+#define PMS_TELID_MASK 0x0F
+#define PMS_TELID_SHIFT 4
+
+#define HB(value) ((u8)((u16)(value) >> 8))
+#define LB(value) ((u8)(value))
+
+#define EXTRACT_BIT_SET(bitset_name, value) \
+ (((value) >> bitset_name##_SHIFT) & bitset_name##_MASK)
+
+#define PMS_IS_MEP(buf, len) \
+ ((len) > MEP_HDR_LEN && \
+ EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MEP)
+
+#define PMS_IS_MAMAC(buf, len) \
+ ((len) > MDP_HDR_LEN && \
+ EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MDP && \
+ EXTRACT_BIT_SET(PMS_TELID, (buf)[14]) == PMS_TELID_UNSEGM_MAMAC)
+
+struct net_dev_channel {
+ bool linked;
+ int ch_id;
+};
+
+struct net_dev_context {
+ struct most_interface *iface;
+ bool channels_opened;
+ bool is_mamac;
+ unsigned char link_stat;
+ struct net_device *dev;
+ struct net_dev_channel rx;
+ struct net_dev_channel tx;
+ struct list_head list;
+};
+
+static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
+static struct spinlock list_lock;
+static struct most_aim aim;
+
+static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
+{
+ u8 *buff = mbo->virt_address;
+ const u8 broadcast[] = { 0x03, 0xFF };
+ const u8 *dest_addr = skb->data + 4;
+ const u8 *eth_type = skb->data + 12;
+ unsigned int payload_len = skb->len - ETH_HLEN;
+ unsigned int mdp_len = payload_len + MDP_HDR_LEN;
+
+ if (mbo->buffer_length < mdp_len) {
+ pr_err("drop: too small buffer! (%d for %d)\n",
+ mbo->buffer_length, mdp_len);
+ return -EINVAL;
+ }
+
+ if (skb->len < ETH_HLEN) {
+ pr_err("drop: too small packet! (%d)\n", skb->len);
+ return -EINVAL;
+ }
+
+ if (dest_addr[0] == 0xFF && dest_addr[1] == 0xFF)
+ dest_addr = broadcast;
+
+ *buff++ = HB(mdp_len - 2);
+ *buff++ = LB(mdp_len - 2);
+
+ *buff++ = PMHL;
+ *buff++ = (PMS_FIFONO_MDP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
+ *buff++ = PMS_DEF_PRIO;
+ *buff++ = dest_addr[0];
+ *buff++ = dest_addr[1];
+ *buff++ = 0x00;
+
+ *buff++ = HB(payload_len + 6);
+ *buff++ = LB(payload_len + 6);
+
+ /* end of FPH here */
+
+ *buff++ = eth_type[0];
+ *buff++ = eth_type[1];
+ *buff++ = 0;
+ *buff++ = 0;
+
+ *buff++ = PMS_TELID_UNSEGM_MAMAC << 4 | HB(payload_len);
+ *buff++ = LB(payload_len);
+
+ memcpy(buff, skb->data + ETH_HLEN, payload_len);
+ mbo->buffer_length = mdp_len;
+ return 0;
+}
+
+static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
+{
+ u8 *buff = mbo->virt_address;
+ unsigned int mep_len = skb->len + MEP_HDR_LEN;
+
+ if (mbo->buffer_length < mep_len) {
+ pr_err("drop: too small buffer! (%d for %d)\n",
+ mbo->buffer_length, mep_len);
+ return -EINVAL;
+ }
+
+ *buff++ = HB(mep_len - 2);
+ *buff++ = LB(mep_len - 2);
+
+ *buff++ = PMHL;
+ *buff++ = (PMS_FIFONO_MEP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
+ *buff++ = (MEP_DEF_RETRY << PMS_RETRY_SHIFT) | PMS_DEF_PRIO;
+ *buff++ = 0;
+ *buff++ = 0;
+ *buff++ = 0;
+
+ memcpy(buff, skb->data, skb->len);
+ mbo->buffer_length = mep_len;
+ return 0;
+}
+
+static int most_nd_set_mac_address(struct net_device *dev, void *p)
+{
+ struct net_dev_context *nd = dev->ml_priv;
+ int err = eth_mac_addr(dev, p);
+
+ if (err)
+ return err;
+
+ BUG_ON(nd->dev != dev);
+
+ nd->is_mamac =
+ (dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0 &&
+ dev->dev_addr[2] == 0 && dev->dev_addr[3] == 0);
+
+ /*
+ * Set default MTU for the given packet type.
+ * It is still possible to change MTU using ip tools afterwards.
+ */
+ dev->mtu = nd->is_mamac ? MAMAC_DATA_LEN : ETH_DATA_LEN;
+
+ return 0;
+}
+
+static int most_nd_open(struct net_device *dev)
+{
+ struct net_dev_context *nd = dev->ml_priv;
+
+ netdev_info(dev, "open net device\n");
+
+ BUG_ON(nd->dev != dev);
+
+ if (nd->channels_opened)
+ return -EFAULT;
+
+ BUG_ON(!nd->tx.linked || !nd->rx.linked);
+
+ if (most_start_channel(nd->iface, nd->rx.ch_id, &aim)) {
+ netdev_err(dev, "most_start_channel() failed\n");
+ return -EBUSY;
+ }
+
+ if (most_start_channel(nd->iface, nd->tx.ch_id, &aim)) {
+ netdev_err(dev, "most_start_channel() failed\n");
+ most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
+ return -EBUSY;
+ }
+
+ nd->channels_opened = true;
+
+ if (nd->is_mamac) {
+ nd->link_stat = 1;
+ netif_wake_queue(dev);
+ } else {
+ nd->iface->request_netinfo(nd->iface, nd->tx.ch_id);
+ }
+
+ return 0;
+}
+
+static int most_nd_stop(struct net_device *dev)
+{
+ struct net_dev_context *nd = dev->ml_priv;
+
+ netdev_info(dev, "stop net device\n");
+
+ BUG_ON(nd->dev != dev);
+ netif_stop_queue(dev);
+
+ if (nd->channels_opened) {
+ most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
+ most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
+ nd->channels_opened = false;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct net_dev_context *nd = dev->ml_priv;
+ struct mbo *mbo;
+ int ret;
+
+ BUG_ON(nd->dev != dev);
+
+ mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &aim);
+
+ if (!mbo) {
+ netif_stop_queue(dev);
+ dev->stats.tx_fifo_errors++;
+ return NETDEV_TX_BUSY;
+ }
+
+ if (nd->is_mamac)
+ ret = skb_to_mamac(skb, mbo);
+ else
+ ret = skb_to_mep(skb, mbo);
+
+ if (ret) {
+ most_put_mbo(mbo);
+ dev->stats.tx_dropped++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ most_submit_mbo(mbo);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops most_nd_ops = {
+ .ndo_open = most_nd_open,
+ .ndo_stop = most_nd_stop,
+ .ndo_start_xmit = most_nd_start_xmit,
+ .ndo_set_mac_address = most_nd_set_mac_address,
+};
+
+static void most_nd_setup(struct net_device *dev)
+{
+ netdev_info(dev, "setup net device\n");
+ ether_setup(dev);
+ dev->netdev_ops = &most_nd_ops;
+}
+
+static void most_net_rm_netdev_safe(struct net_dev_context *nd)
+{
+ if (!nd->dev)
+ return;
+
+ pr_info("remove net device %p\n", nd->dev);
+
+ unregister_netdev(nd->dev);
+ free_netdev(nd->dev);
+ nd->dev = NULL;
+}
+
+static struct net_dev_context *get_net_dev_context(
+ struct most_interface *iface)
+{
+ struct net_dev_context *nd, *tmp;
+
+ spin_lock(&list_lock);
+ list_for_each_entry_safe(nd, tmp, &net_devices, list) {
+ if (nd->iface == iface) {
+ spin_unlock(&list_lock);
+ return nd;
+ }
+ }
+ spin_unlock(&list_lock);
+ return NULL;
+}
+
+static int aim_probe_channel(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *ccfg,
+ struct kobject *parent, char *name)
+{
+ struct net_dev_context *nd;
+ struct net_dev_channel *ch;
+
+ if (!iface)
+ return -EINVAL;
+
+ if (ccfg->data_type != MOST_CH_ASYNC)
+ return -EINVAL;
+
+ nd = get_net_dev_context(iface);
+
+ if (!nd) {
+ nd = kzalloc(sizeof(*nd), GFP_KERNEL);
+ if (!nd)
+ return -ENOMEM;
+
+ nd->iface = iface;
+
+ spin_lock(&list_lock);
+ list_add(&nd->list, &net_devices);
+ spin_unlock(&list_lock);
+ }
+
+ ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
+ if (ch->linked) {
+ pr_err("only one channel per instance & direction allowed\n");
+ return -EINVAL;
+ }
+
+ if (nd->tx.linked || nd->rx.linked) {
+ struct net_device *dev =
+ alloc_netdev(0, "meth%d", NET_NAME_UNKNOWN,
+ most_nd_setup);
+
+ if (!dev) {
+ pr_err("no memory for net_device\n");
+ return -ENOMEM;
+ }
+
+ nd->dev = dev;
+ ch->ch_id = channel_idx;
+ ch->linked = true;
+
+ dev->ml_priv = nd;
+ if (register_netdev(dev)) {
+ pr_err("registering net device failed\n");
+ ch->linked = false;
+ free_netdev(dev);
+ return -EINVAL;
+ }
+ }
+
+ ch->ch_id = channel_idx;
+ ch->linked = true;
+
+ return 0;
+}
+
+static int aim_disconnect_channel(struct most_interface *iface,
+ int channel_idx)
+{
+ struct net_dev_context *nd;
+ struct net_dev_channel *ch;
+
+ nd = get_net_dev_context(iface);
+ if (!nd)
+ return -EINVAL;
+
+ if (nd->rx.linked && channel_idx == nd->rx.ch_id)
+ ch = &nd->rx;
+ else if (nd->tx.linked && channel_idx == nd->tx.ch_id)
+ ch = &nd->tx;
+ else
+ return -EINVAL;
+
+ ch->linked = false;
+
+ /*
+ * do not call most_stop_channel() here, because channels are
+ * going to be closed in ndo_stop() after unregister_netdev()
+ */
+ most_net_rm_netdev_safe(nd);
+
+ if (!nd->rx.linked && !nd->tx.linked) {
+ spin_lock(&list_lock);
+ list_del(&nd->list);
+ spin_unlock(&list_lock);
+ kfree(nd);
+ }
+
+ return 0;
+}
+
+static int aim_resume_tx_channel(struct most_interface *iface,
+ int channel_idx)
+{
+ struct net_dev_context *nd;
+
+ nd = get_net_dev_context(iface);
+ if (!nd || !nd->channels_opened || nd->tx.ch_id != channel_idx)
+ return 0;
+
+ if (!nd->dev)
+ return 0;
+
+ netif_wake_queue(nd->dev);
+ return 0;
+}
+
+static int aim_rx_data(struct mbo *mbo)
+{
+ const u32 zero = 0;
+ struct net_dev_context *nd;
+ char *buf = mbo->virt_address;
+ u32 len = mbo->processed_length;
+ struct sk_buff *skb;
+ struct net_device *dev;
+ unsigned int skb_len;
+
+ nd = get_net_dev_context(mbo->ifp);
+ if (!nd || !nd->channels_opened || nd->rx.ch_id != mbo->hdm_channel_id)
+ return -EIO;
+
+ dev = nd->dev;
+ if (!dev) {
+ pr_err_once("drop packet: missing net_device\n");
+ return -EIO;
+ }
+
+ if (nd->is_mamac) {
+ if (!PMS_IS_MAMAC(buf, len))
+ return -EIO;
+
+ skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
+ } else {
+ if (!PMS_IS_MEP(buf, len))
+ return -EIO;
+
+ skb = dev_alloc_skb(len - MEP_HDR_LEN);
+ }
+
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ pr_err_once("drop packet: no memory for skb\n");
+ goto out;
+ }
+
+ skb->dev = dev;
+
+ if (nd->is_mamac) {
+ /* dest */
+ ether_addr_copy(skb_put(skb, ETH_ALEN), dev->dev_addr);
+
+ /* src */
+ memcpy(skb_put(skb, 4), &zero, 4);
+ memcpy(skb_put(skb, 2), buf + 5, 2);
+
+ /* eth type */
+ memcpy(skb_put(skb, 2), buf + 10, 2);
+
+ buf += MDP_HDR_LEN;
+ len -= MDP_HDR_LEN;
+ } else {
+ buf += MEP_HDR_LEN;
+ len -= MEP_HDR_LEN;
+ }
+
+ memcpy(skb_put(skb, len), buf, len);
+ skb->protocol = eth_type_trans(skb, dev);
+ skb_len = skb->len;
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb_len;
+ } else {
+ dev->stats.rx_dropped++;
+ }
+
+out:
+ most_put_mbo(mbo);
+ return 0;
+}
+
+static struct most_aim aim = {
+ .name = "networking",
+ .probe_channel = aim_probe_channel,
+ .disconnect_channel = aim_disconnect_channel,
+ .tx_completion = aim_resume_tx_channel,
+ .rx_completion = aim_rx_data,
+};
+
+static int __init most_net_init(void)
+{
+ pr_info("most_net_init()\n");
+ spin_lock_init(&list_lock);
+ return most_register_aim(&aim);
+}
+
+static void __exit most_net_exit(void)
+{
+ struct net_dev_context *nd, *tmp;
+
+ spin_lock(&list_lock);
+ list_for_each_entry_safe(nd, tmp, &net_devices, list) {
+ list_del(&nd->list);
+ spin_unlock(&list_lock);
+ /*
+ * do not call most_stop_channel() here, because channels are
+ * going to be closed in ndo_stop() after unregister_netdev()
+ */
+ most_net_rm_netdev_safe(nd);
+ kfree(nd);
+ spin_lock(&list_lock);
+ }
+ spin_unlock(&list_lock);
+
+ most_deregister_aim(&aim);
+ pr_info("most_net_exit()\n");
+}
+
+/**
+ * most_deliver_netinfo - callback for HDM to be informed about HW's MAC
+ * @param iface - most interface instance
+ * @param link_stat - link status
+ * @param mac_addr - MAC address
+ */
+void most_deliver_netinfo(struct most_interface *iface,
+ unsigned char link_stat, unsigned char *mac_addr)
+{
+ struct net_dev_context *nd;
+ struct net_device *dev;
+
+ pr_info("Received netinfo from %s\n", iface->description);
+
+ nd = get_net_dev_context(iface);
+ if (!nd)
+ return;
+
+ dev = nd->dev;
+ if (!dev)
+ return;
+
+ if (mac_addr)
+ ether_addr_copy(dev->dev_addr, mac_addr);
+
+ if (nd->link_stat != link_stat) {
+ nd->link_stat = link_stat;
+ if (nd->link_stat)
+ netif_wake_queue(dev);
+ else
+ netif_stop_queue(dev);
+ }
+}
+EXPORT_SYMBOL(most_deliver_netinfo);
+
+module_init(most_net_init);
+module_exit(most_net_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_DESCRIPTION("Networking Application Interface Module for MostCore");
diff --git a/driver/aim-network/networking.h b/driver/aim-network/networking.h
new file mode 100644
index 0000000..6f346d4
--- /dev/null
+++ b/driver/aim-network/networking.h
@@ -0,0 +1,21 @@
+/*
+ * Networking AIM - Networking Application Interface Module for MostCore
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+#ifndef _NETWORKING_H_
+#define _NETWORKING_H_
+
+#include "mostcore.h"
+
+void most_deliver_netinfo(struct most_interface *iface,
+ unsigned char link_stat, unsigned char *mac_addr);
+
+#endif
diff --git a/driver/aim-sound/sound.c b/driver/aim-sound/sound.c
new file mode 100644
index 0000000..9c64580
--- /dev/null
+++ b/driver/aim-sound/sound.c
@@ -0,0 +1,768 @@
+/*
+ * sound.c - Audio Application Interface Module for Mostcore
+ *
+ * Copyright (C) 2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <mostcore.h>
+
+#define DRIVER_NAME "sound"
+
+static struct list_head dev_list;
+static struct most_aim audio_aim;
+
+/**
+ * struct channel - private structure to keep channel specific data
+ * @substream: stores the substream structure
+ * @iface: interface for which the channel belongs to
+ * @cfg: channel configuration
+ * @card: registered sound card
+ * @list: list for private use
+ * @id: channel index
+ * @period_pos: current period position (ring buffer)
+ * @buffer_pos: current buffer position (ring buffer)
+ * @is_stream_running: identifies whether a stream is running or not
+ * @opened: set when the stream is opened
+ * @playback_task: playback thread
+ * @playback_waitq: waitq used by playback thread
+ */
+struct channel {
+ struct snd_pcm_substream *substream;
+ struct snd_pcm_hardware pcm_hardware;
+ struct most_interface *iface;
+ struct most_channel_config *cfg;
+ struct snd_card *card;
+ struct list_head list;
+ int id;
+ unsigned int period_pos;
+ unsigned int buffer_pos;
+ bool is_stream_running;
+
+ struct task_struct *playback_task;
+ wait_queue_head_t playback_waitq;
+
+ void (*copy_fn)(void *alsa, void *most, unsigned int bytes);
+};
+
+#define MOST_PCM_INFO (SNDRV_PCM_INFO_MMAP | \
+ SNDRV_PCM_INFO_MMAP_VALID | \
+ SNDRV_PCM_INFO_BATCH | \
+ SNDRV_PCM_INFO_INTERLEAVED | \
+ SNDRV_PCM_INFO_BLOCK_TRANSFER)
+
+#define swap16(val) ( \
+ (((u16)(val) << 8) & (u16)0xFF00) | \
+ (((u16)(val) >> 8) & (u16)0x00FF))
+
+#define swap32(val) ( \
+ (((u32)(val) << 24) & (u32)0xFF000000) | \
+ (((u32)(val) << 8) & (u32)0x00FF0000) | \
+ (((u32)(val) >> 8) & (u32)0x0000FF00) | \
+ (((u32)(val) >> 24) & (u32)0x000000FF))
+
+static void swap_copy16(u16 *dest, const u16 *source, unsigned int bytes)
+{
+ unsigned int i = 0;
+
+ while (i < (bytes / 2)) {
+ dest[i] = swap16(source[i]);
+ i++;
+ }
+}
+
+static void swap_copy24(u8 *dest, const u8 *source, unsigned int bytes)
+{
+ unsigned int i = 0;
+
+ while (i < bytes - 2) {
+ dest[i] = source[i + 2];
+ dest[i + 1] = source[i + 1];
+ dest[i + 2] = source[i];
+ i += 3;
+ }
+}
+
+static void swap_copy32(u32 *dest, const u32 *source, unsigned int bytes)
+{
+ unsigned int i = 0;
+
+ while (i < bytes / 4) {
+ dest[i] = swap32(source[i]);
+ i++;
+ }
+}
+
+static void alsa_to_most_memcpy(void *alsa, void *most, unsigned int bytes)
+{
+ memcpy(most, alsa, bytes);
+}
+
+static void alsa_to_most_copy16(void *alsa, void *most, unsigned int bytes)
+{
+ swap_copy16(most, alsa, bytes);
+}
+
+static void alsa_to_most_copy24(void *alsa, void *most, unsigned int bytes)
+{
+ swap_copy24(most, alsa, bytes);
+}
+
+static void alsa_to_most_copy32(void *alsa, void *most, unsigned int bytes)
+{
+ swap_copy32(most, alsa, bytes);
+}
+
+static void most_to_alsa_memcpy(void *alsa, void *most, unsigned int bytes)
+{
+ memcpy(alsa, most, bytes);
+}
+
+static void most_to_alsa_copy16(void *alsa, void *most, unsigned int bytes)
+{
+ swap_copy16(alsa, most, bytes);
+}
+
+static void most_to_alsa_copy24(void *alsa, void *most, unsigned int bytes)
+{
+ swap_copy24(alsa, most, bytes);
+}
+
+static void most_to_alsa_copy32(void *alsa, void *most, unsigned int bytes)
+{
+ swap_copy32(alsa, most, bytes);
+}
+
+/**
+ * get_channel - get pointer to channel
+ * @iface: interface structure
+ * @channel_id: channel ID
+ *
+ * This traverses the channel list and returns the channel matching the
+ * ID and interface.
+ *
+ * Returns pointer to channel on success or NULL otherwise.
+ */
+static struct channel *get_channel(struct most_interface *iface,
+ int channel_id)
+{
+ struct channel *channel, *tmp;
+
+ list_for_each_entry_safe(channel, tmp, &dev_list, list) {
+ if ((channel->iface == iface) && (channel->id == channel_id))
+ return channel;
+ }
+
+ return NULL;
+}
+
+/**
+ * copy_data - implements data copying function
+ * @channel: channel
+ * @mbo: MBO from core
+ *
+ * Copy data from/to ring buffer to/from MBO and update the buffer position
+ */
+static bool copy_data(struct channel *channel, struct mbo *mbo)
+{
+ struct snd_pcm_runtime *const runtime = channel->substream->runtime;
+ unsigned int const frame_bytes = channel->cfg->subbuffer_size;
+ unsigned int const buffer_size = runtime->buffer_size;
+ unsigned int frames;
+ unsigned int fr0;
+
+ if (channel->cfg->direction & MOST_CH_RX)
+ frames = mbo->processed_length / frame_bytes;
+ else
+ frames = mbo->buffer_length / frame_bytes;
+ fr0 = min(buffer_size - channel->buffer_pos, frames);
+
+ channel->copy_fn(runtime->dma_area + channel->buffer_pos * frame_bytes,
+ mbo->virt_address,
+ fr0 * frame_bytes);
+
+ if (frames > fr0) {
+ /* wrap around at end of ring buffer */
+ channel->copy_fn(runtime->dma_area,
+ mbo->virt_address + fr0 * frame_bytes,
+ (frames - fr0) * frame_bytes);
+ }
+
+ channel->buffer_pos += frames;
+ if (channel->buffer_pos >= buffer_size)
+ channel->buffer_pos -= buffer_size;
+ channel->period_pos += frames;
+ if (channel->period_pos >= runtime->period_size) {
+ channel->period_pos -= runtime->period_size;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * playback_thread - function implements the playback thread
+ * @data: private data
+ *
+ * Thread which does the playback functionality in a loop. It waits for a free
+ * MBO from mostcore for a particular channel and copy the data from ring buffer
+ * to MBO. Submit the MBO back to mostcore, after copying the data.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int playback_thread(void *data)
+{
+ struct channel *const channel = data;
+
+ while (!kthread_should_stop()) {
+ struct mbo *mbo = NULL;
+ bool period_elapsed = false;
+ int ret;
+
+ wait_event_interruptible(
+ channel->playback_waitq,
+ kthread_should_stop() ||
+ (channel->is_stream_running &&
+ (mbo = most_get_mbo(channel->iface, channel->id,
+ &audio_aim))));
+ if (!mbo)
+ continue;
+
+ if (channel->is_stream_running)
+ period_elapsed = copy_data(channel, mbo);
+ else
+ memset(mbo->virt_address, 0, mbo->buffer_length);
+
+ ret = most_submit_mbo(mbo);
+ if (ret)
+ channel->is_stream_running = false;
+
+ if (period_elapsed)
+ snd_pcm_period_elapsed(channel->substream);
+ }
+
+ return 0;
+}
+
+/**
+ * pcm_open - implements open callback function for PCM middle layer
+ * @substream: pointer to ALSA PCM substream
+ *
+ * This is called when a PCM substream is opened. At least, the function should
+ * initialize the runtime->hw record.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_open(struct snd_pcm_substream *substream)
+{
+ struct channel *channel = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct most_channel_config *cfg = channel->cfg;
+
+ channel->substream = substream;
+
+ if (cfg->direction == MOST_CH_TX) {
+ channel->playback_task = kthread_run(playback_thread, channel,
+ "most_audio_playback");
+ if (IS_ERR(channel->playback_task)) {
+ pr_err("Couldn't start thread\n");
+ return PTR_ERR(channel->playback_task);
+ }
+ }
+
+ if (most_start_channel(channel->iface, channel->id, &audio_aim)) {
+ pr_err("most_start_channel() failed!\n");
+ if (cfg->direction == MOST_CH_TX)
+ kthread_stop(channel->playback_task);
+ return -EBUSY;
+ }
+
+ runtime->hw = channel->pcm_hardware;
+ return 0;
+}
+
+/**
+ * pcm_close - implements close callback function for PCM middle layer
+ * @substream: sub-stream pointer
+ *
+ * Obviously, this is called when a PCM substream is closed. Any private
+ * instance for a PCM substream allocated in the open callback will be
+ * released here.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_close(struct snd_pcm_substream *substream)
+{
+ struct channel *channel = substream->private_data;
+
+ if (channel->cfg->direction == MOST_CH_TX)
+ kthread_stop(channel->playback_task);
+ most_stop_channel(channel->iface, channel->id, &audio_aim);
+
+ return 0;
+}
+
+/**
+ * pcm_hw_params - implements hw_params callback function for PCM middle layer
+ * @substream: sub-stream pointer
+ * @hw_params: contains the hardware parameters set by the application
+ *
+ * This is called when the hardware parameters is set by the application, that
+ * is, once when the buffer size, the period size, the format, etc. are defined
+ * for the PCM substream. Many hardware setups should be done is this callback,
+ * including the allocation of buffers.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct channel *channel = substream->private_data;
+
+ if ((params_channels(hw_params) > channel->pcm_hardware.channels_max) ||
+ (params_channels(hw_params) < channel->pcm_hardware.channels_min)) {
+ pr_err("Requested number of channels not supported.\n");
+ return -EINVAL;
+ }
+ return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(hw_params));
+}
+
+/**
+ * pcm_hw_free - implements hw_free callback function for PCM middle layer
+ * @substream: substream pointer
+ *
+ * This is called to release the resources allocated via hw_params.
+ * This function will be always called before the close callback is called.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+/**
+ * pcm_prepare - implements prepare callback function for PCM middle layer
+ * @substream: substream pointer
+ *
+ * This callback is called when the PCM is "prepared". Format rate, sample rate,
+ * etc., can be set here. This callback can be called many times at each setup.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct channel *channel = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct most_channel_config *cfg = channel->cfg;
+ int width = snd_pcm_format_physical_width(runtime->format);
+
+ channel->copy_fn = NULL;
+
+ if (cfg->direction == MOST_CH_TX) {
+ if (snd_pcm_format_big_endian(runtime->format) || width == 8)
+ channel->copy_fn = alsa_to_most_memcpy;
+ else if (width == 16)
+ channel->copy_fn = alsa_to_most_copy16;
+ else if (width == 24)
+ channel->copy_fn = alsa_to_most_copy24;
+ else if (width == 32)
+ channel->copy_fn = alsa_to_most_copy32;
+ } else {
+ if (snd_pcm_format_big_endian(runtime->format) || width == 8)
+ channel->copy_fn = most_to_alsa_memcpy;
+ else if (width == 16)
+ channel->copy_fn = most_to_alsa_copy16;
+ else if (width == 24)
+ channel->copy_fn = most_to_alsa_copy24;
+ else if (width == 32)
+ channel->copy_fn = most_to_alsa_copy32;
+ }
+
+ if (!channel->copy_fn) {
+ pr_err("unsupported format\n");
+ return -EINVAL;
+ }
+
+ channel->period_pos = 0;
+ channel->buffer_pos = 0;
+
+ return 0;
+}
+
+/**
+ * pcm_trigger - implements trigger callback function for PCM middle layer
+ * @substream: substream pointer
+ * @cmd: action to perform
+ *
+ * This is called when the PCM is started, stopped or paused. The action will be
+ * specified in the second argument, SNDRV_PCM_TRIGGER_XXX
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct channel *channel = substream->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ channel->is_stream_running = true;
+ wake_up_interruptible(&channel->playback_waitq);
+ return 0;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ channel->is_stream_running = false;
+ return 0;
+
+ default:
+ pr_info("pcm_trigger(), invalid\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * pcm_pointer - implements pointer callback function for PCM middle layer
+ * @substream: substream pointer
+ *
+ * This callback is called when the PCM middle layer inquires the current
+ * hardware position on the buffer. The position must be returned in frames,
+ * ranging from 0 to buffer_size-1.
+ */
+static snd_pcm_uframes_t pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct channel *channel = substream->private_data;
+
+ return channel->buffer_pos;
+}
+
+/**
+ * Initialization of struct snd_pcm_ops
+ */
+static struct snd_pcm_ops pcm_ops = {
+ .open = pcm_open,
+ .close = pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = pcm_hw_params,
+ .hw_free = pcm_hw_free,
+ .prepare = pcm_prepare,
+ .trigger = pcm_trigger,
+ .pointer = pcm_pointer,
+ .page = snd_pcm_lib_get_vmalloc_page,
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+};
+
+static int split_arg_list(char *buf, char **card_name, char **pcm_format)
+{
+ *card_name = strsep(&buf, ".");
+ if (!*card_name)
+ return -EIO;
+ *pcm_format = strsep(&buf, ".\n");
+ if (!*pcm_format)
+ return -EIO;
+ return 0;
+}
+
+static int audio_set_hw_params(struct snd_pcm_hardware *pcm_hw,
+ char *pcm_format,
+ struct most_channel_config *cfg)
+{
+ pcm_hw->info = MOST_PCM_INFO;
+ pcm_hw->rates = SNDRV_PCM_RATE_48000;
+ pcm_hw->rate_min = 48000;
+ pcm_hw->rate_max = 48000;
+ pcm_hw->buffer_bytes_max = cfg->num_buffers * cfg->buffer_size;
+ pcm_hw->period_bytes_min = cfg->buffer_size;
+ pcm_hw->period_bytes_max = cfg->buffer_size;
+ pcm_hw->periods_min = 1;
+ pcm_hw->periods_max = cfg->num_buffers;
+
+ if (!strcmp(pcm_format, "1x8")) {
+ if (cfg->subbuffer_size != 1)
+ goto error;
+ pr_info("PCM format is 8-bit mono\n");
+ pcm_hw->channels_min = 1;
+ pcm_hw->channels_max = 1;
+ pcm_hw->formats = SNDRV_PCM_FMTBIT_S8;
+ } else if (!strcmp(pcm_format, "2x16")) {
+ if (cfg->subbuffer_size != 4)
+ goto error;
+ pr_info("PCM format is 16-bit stereo\n");
+ pcm_hw->channels_min = 2;
+ pcm_hw->channels_max = 2;
+ pcm_hw->formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S16_BE;
+ } else if (!strcmp(pcm_format, "2x24")) {
+ if (cfg->subbuffer_size != 6)
+ goto error;
+ pr_info("PCM format is 24-bit stereo\n");
+ pcm_hw->channels_min = 2;
+ pcm_hw->channels_max = 2;
+ pcm_hw->formats = SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S24_3BE;
+ } else if (!strcmp(pcm_format, "2x32")) {
+ if (cfg->subbuffer_size != 8)
+ goto error;
+ pr_info("PCM format is 32-bit stereo\n");
+ pcm_hw->channels_min = 2;
+ pcm_hw->channels_max = 2;
+ pcm_hw->formats = SNDRV_PCM_FMTBIT_S32_LE |
+ SNDRV_PCM_FMTBIT_S32_BE;
+ } else if (!strcmp(pcm_format, "6x16")) {
+ if (cfg->subbuffer_size != 12)
+ goto error;
+ pr_info("PCM format is 16-bit 5.1 multi channel\n");
+ pcm_hw->channels_min = 6;
+ pcm_hw->channels_max = 6;
+ pcm_hw->formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S16_BE;
+ } else {
+ pr_err("PCM format %s not supported\n", pcm_format);
+ return -EIO;
+ }
+ return 0;
+error:
+ pr_err("Audio resolution doesn't fit subbuffer size\n");
+ return -EINVAL;
+}
+
+/**
+ * audio_probe_channel - probe function of the driver module
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ * @cfg: pointer to actual channel configuration
+ * @parent: pointer to kobject (needed for sysfs hook-up)
+ * @arg_list: string that provides the name of the device to be created in /dev
+ * plus the desired audio resolution
+ *
+ * Creates sound card, pcm device, sets pcm ops and registers sound card.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_probe_channel(struct most_interface *iface, int channel_id,
+ struct most_channel_config *cfg,
+ struct kobject *parent, char *arg_list)
+{
+ struct channel *channel;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ int playback_count = 0;
+ int capture_count = 0;
+ int ret;
+ int direction;
+ char *card_name;
+ char *pcm_format;
+
+ if (!iface)
+ return -EINVAL;
+
+ if (cfg->data_type != MOST_CH_SYNC) {
+ pr_err("Incompatible channel type\n");
+ return -EINVAL;
+ }
+
+ if (get_channel(iface, channel_id)) {
+ pr_err("channel (%s:%d) is already linked\n",
+ iface->description, channel_id);
+ return -EINVAL;
+ }
+
+ if (cfg->direction == MOST_CH_TX) {
+ playback_count = 1;
+ direction = SNDRV_PCM_STREAM_PLAYBACK;
+ } else {
+ capture_count = 1;
+ direction = SNDRV_PCM_STREAM_CAPTURE;
+ }
+
+ ret = split_arg_list(arg_list, &card_name, &pcm_format);
+ if (ret < 0) {
+ pr_info("PCM format missing\n");
+ return ret;
+ }
+
+ ret = snd_card_new(NULL, -1, card_name, THIS_MODULE,
+ sizeof(*channel), &card);
+ if (ret < 0)
+ return ret;
+
+ channel = card->private_data;
+ channel->card = card;
+ channel->cfg = cfg;
+ channel->iface = iface;
+ channel->id = channel_id;
+ init_waitqueue_head(&channel->playback_waitq);
+
+ if (audio_set_hw_params(&channel->pcm_hardware, pcm_format, cfg))
+ goto err_free_card;
+
+ snprintf(card->driver, sizeof(card->driver), "%s", DRIVER_NAME);
+ snprintf(card->shortname, sizeof(card->shortname), "Microchip MOST:%d",
+ card->number);
+ snprintf(card->longname, sizeof(card->longname), "%s at %s, ch %d",
+ card->shortname, iface->description, channel_id);
+
+ ret = snd_pcm_new(card, card_name, 0, playback_count,
+ capture_count, &pcm);
+ if (ret < 0)
+ goto err_free_card;
+
+ pcm->private_data = channel;
+
+ snd_pcm_set_ops(pcm, direction, &pcm_ops);
+
+ ret = snd_card_register(card);
+ if (ret < 0)
+ goto err_free_card;
+
+ list_add_tail(&channel->list, &dev_list);
+
+ return 0;
+
+err_free_card:
+ snd_card_free(card);
+ return ret;
+}
+
+/**
+ * audio_disconnect_channel - function to disconnect a channel
+ * @iface: pointer to interface instance
+ * @channel_id: channel index
+ *
+ * This frees allocated memory and removes the sound card from ALSA
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_disconnect_channel(struct most_interface *iface,
+ int channel_id)
+{
+ struct channel *channel;
+
+ channel = get_channel(iface, channel_id);
+ if (!channel) {
+ pr_err("sound_disconnect_channel(), invalid channel %d\n",
+ channel_id);
+ return -EINVAL;
+ }
+
+ list_del(&channel->list);
+ snd_card_free(channel->card);
+
+ return 0;
+}
+
+/**
+ * audio_rx_completion - completion handler for rx channels
+ * @mbo: pointer to buffer object that has completed
+ *
+ * This searches for the channel this MBO belongs to and copy the data from MBO
+ * to ring buffer
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_rx_completion(struct mbo *mbo)
+{
+ struct channel *channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
+ bool period_elapsed = false;
+
+ if (!channel) {
+ pr_err("sound_rx_completion(), invalid channel %d\n",
+ mbo->hdm_channel_id);
+ return -EINVAL;
+ }
+
+ if (channel->is_stream_running)
+ period_elapsed = copy_data(channel, mbo);
+
+ most_put_mbo(mbo);
+
+ if (period_elapsed)
+ snd_pcm_period_elapsed(channel->substream);
+
+ return 0;
+}
+
+/**
+ * audio_tx_completion - completion handler for tx channels
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ *
+ * This searches the channel that belongs to this combination of interface
+ * pointer and channel ID and wakes a process sitting in the wait queue of
+ * this channel.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_tx_completion(struct most_interface *iface, int channel_id)
+{
+ struct channel *channel = get_channel(iface, channel_id);
+
+ if (!channel) {
+ pr_err("sound_tx_completion(), invalid channel %d\n",
+ channel_id);
+ return -EINVAL;
+ }
+
+ wake_up_interruptible(&channel->playback_waitq);
+
+ return 0;
+}
+
+/**
+ * Initialization of the struct most_aim
+ */
+static struct most_aim audio_aim = {
+ .name = DRIVER_NAME,
+ .probe_channel = audio_probe_channel,
+ .disconnect_channel = audio_disconnect_channel,
+ .rx_completion = audio_rx_completion,
+ .tx_completion = audio_tx_completion,
+};
+
+static int __init audio_init(void)
+{
+ pr_info("init()\n");
+
+ INIT_LIST_HEAD(&dev_list);
+
+ return most_register_aim(&audio_aim);
+}
+
+static void __exit audio_exit(void)
+{
+ struct channel *channel, *tmp;
+
+ pr_info("exit()\n");
+
+ list_for_each_entry_safe(channel, tmp, &dev_list, list) {
+ list_del(&channel->list);
+ snd_card_free(channel->card);
+ }
+
+ most_deregister_aim(&audio_aim);
+}
+
+module_init(audio_init);
+module_exit(audio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
+MODULE_DESCRIPTION("Audio Application Interface Module for MostCore");
diff --git a/driver/aim-v4l2/video.c b/driver/aim-v4l2/video.c
new file mode 100644
index 0000000..150dc89
--- /dev/null
+++ b/driver/aim-v4l2/video.c
@@ -0,0 +1,616 @@
+/*
+ * V4L2 AIM - V4L2 Application Interface Module for MostCore
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/suspend.h>
+#include <linux/videodev2.h>
+#include <linux/mutex.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+
+#include "mostcore.h"
+
+#define V4L2_AIM_MAX_INPUT 1
+
+static struct most_aim aim_info;
+
+struct most_video_dev {
+ struct most_interface *iface;
+ int ch_idx;
+ struct list_head list;
+ bool mute;
+
+ struct list_head pending_mbos;
+ spinlock_t list_lock;
+
+ struct v4l2_device v4l2_dev;
+ atomic_t access_ref;
+ struct video_device *vdev;
+ unsigned int ctrl_input;
+
+ struct mutex lock;
+
+ wait_queue_head_t wait_data;
+};
+
+struct aim_fh {
+ /* must be the first field of this struct! */
+ struct v4l2_fh fh;
+ struct most_video_dev *mdev;
+ u32 offs;
+};
+
+static struct list_head video_devices = LIST_HEAD_INIT(video_devices);
+static struct spinlock list_lock;
+
+static inline bool data_ready(struct most_video_dev *mdev)
+{
+ return !list_empty(&mdev->pending_mbos);
+}
+
+static inline struct mbo *get_top_mbo(struct most_video_dev *mdev)
+{
+ return list_first_entry(&mdev->pending_mbos, struct mbo, list);
+}
+
+static int aim_vdev_open(struct file *filp)
+{
+ int ret;
+ struct video_device *vdev = video_devdata(filp);
+ struct most_video_dev *mdev = video_drvdata(filp);
+ struct aim_fh *fh;
+
+ v4l2_info(&mdev->v4l2_dev, "aim_vdev_open()\n");
+
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (!fh)
+ return -ENOMEM;
+
+ if (!atomic_inc_and_test(&mdev->access_ref)) {
+ v4l2_err(&mdev->v4l2_dev, "too many clients\n");
+ ret = -EBUSY;
+ goto err_dec;
+ }
+
+ fh->mdev = mdev;
+ v4l2_fh_init(&fh->fh, vdev);
+ filp->private_data = fh;
+
+ v4l2_fh_add(&fh->fh);
+
+ ret = most_start_channel(mdev->iface, mdev->ch_idx, &aim_info);
+ if (ret) {
+ v4l2_err(&mdev->v4l2_dev, "most_start_channel() failed\n");
+ goto err_rm;
+ }
+
+ return 0;
+
+err_rm:
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
+
+err_dec:
+ atomic_dec(&mdev->access_ref);
+ kfree(fh);
+ return ret;
+}
+
+static int aim_vdev_close(struct file *filp)
+{
+ struct aim_fh *fh = filp->private_data;
+ struct most_video_dev *mdev = fh->mdev;
+ struct mbo *mbo, *tmp;
+
+ v4l2_info(&mdev->v4l2_dev, "aim_vdev_close()\n");
+
+ /*
+ * We need to put MBOs back before we call most_stop_channel()
+ * to deallocate MBOs.
+ * From the other hand mostcore still calling rx_completion()
+ * to deliver MBOs until most_stop_channel() is called.
+ * Use mute to work around this issue.
+ * This must be implemented in core.
+ */
+
+ spin_lock_irq(&mdev->list_lock);
+ mdev->mute = true;
+ list_for_each_entry_safe(mbo, tmp, &mdev->pending_mbos, list) {
+ list_del(&mbo->list);
+ spin_unlock_irq(&mdev->list_lock);
+ most_put_mbo(mbo);
+ spin_lock_irq(&mdev->list_lock);
+ }
+ spin_unlock_irq(&mdev->list_lock);
+ most_stop_channel(mdev->iface, mdev->ch_idx, &aim_info);
+ mdev->mute = false;
+
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
+
+ atomic_dec(&mdev->access_ref);
+ kfree(fh);
+ return 0;
+}
+
+static ssize_t aim_vdev_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct aim_fh *fh = filp->private_data;
+ struct most_video_dev *mdev = fh->mdev;
+ int ret = 0;
+
+ if (*pos)
+ return -ESPIPE;
+
+ if (!mdev)
+ return -ENODEV;
+
+ /* wait for the first buffer */
+ if (!(filp->f_flags & O_NONBLOCK)) {
+ if (wait_event_interruptible(mdev->wait_data, data_ready(mdev)))
+ return -ERESTARTSYS;
+ }
+
+ if (!data_ready(mdev))
+ return -EAGAIN;
+
+ while (count > 0 && data_ready(mdev)) {
+ struct mbo *const mbo = get_top_mbo(mdev);
+ int const rem = mbo->processed_length - fh->offs;
+ int const cnt = rem < count ? rem : count;
+
+ if (copy_to_user(buf, mbo->virt_address + fh->offs, cnt)) {
+ v4l2_err(&mdev->v4l2_dev, "read: copy_to_user failed\n");
+ if (!ret)
+ ret = -EFAULT;
+ return ret;
+ }
+
+ fh->offs += cnt;
+ count -= cnt;
+ buf += cnt;
+ ret += cnt;
+
+ if (cnt >= rem) {
+ fh->offs = 0;
+ spin_lock_irq(&mdev->list_lock);
+ list_del(&mbo->list);
+ spin_unlock_irq(&mdev->list_lock);
+ most_put_mbo(mbo);
+ }
+ }
+ return ret;
+}
+
+static unsigned int aim_vdev_poll(struct file *filp, poll_table *wait)
+{
+ struct aim_fh *fh = filp->private_data;
+ struct most_video_dev *mdev = fh->mdev;
+ unsigned int mask = 0;
+
+ /* only wait if no data is available */
+ if (!data_ready(mdev))
+ poll_wait(filp, &mdev->wait_data, wait);
+ if (data_ready(mdev))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+static void aim_set_format_struct(struct v4l2_format *f)
+{
+ f->fmt.pix.width = 8;
+ f->fmt.pix.height = 8;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage = 188 * 2;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.priv = 0;
+}
+
+static int aim_set_format(struct most_video_dev *mdev, unsigned int cmd,
+ struct v4l2_format *format)
+{
+ if (format->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG)
+ return -EINVAL;
+
+ if (cmd == VIDIOC_TRY_FMT)
+ return 0;
+
+ aim_set_format_struct(format);
+
+ return 0;
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ v4l2_info(&mdev->v4l2_dev, "vidioc_querycap()\n");
+
+ strlcpy(cap->driver, "v4l2_most_aim", sizeof(cap->driver));
+ strlcpy(cap->card, "MOST", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "%s", mdev->iface->description);
+
+ cap->capabilities =
+ V4L2_CAP_READWRITE |
+ V4L2_CAP_TUNER |
+ V4L2_CAP_VIDEO_CAPTURE;
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ v4l2_info(&mdev->v4l2_dev, "vidioc_enum_fmt_vid_cap() %d\n", f->index);
+
+ if (f->index)
+ return -EINVAL;
+
+ strcpy(f->description, "MPEG");
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ f->flags = V4L2_FMT_FLAG_COMPRESSED;
+ f->pixelformat = V4L2_PIX_FMT_MPEG;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ v4l2_info(&mdev->v4l2_dev, "vidioc_g_fmt_vid_cap()\n");
+
+ aim_set_format_struct(f);
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ return aim_set_format(mdev, VIDIOC_TRY_FMT, f);
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ return aim_set_format(mdev, VIDIOC_S_FMT, f);
+}
+
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ v4l2_info(&mdev->v4l2_dev, "vidioc_g_std()\n");
+
+ *norm = V4L2_STD_UNKNOWN;
+ return 0;
+}
+
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ if (input->index >= V4L2_AIM_MAX_INPUT)
+ return -EINVAL;
+
+ strcpy(input->name, "MOST Video");
+ input->type |= V4L2_INPUT_TYPE_CAMERA;
+ input->audioset = 0;
+
+ input->std = mdev->vdev->tvnorms;
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+ *i = mdev->ctrl_input;
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ v4l2_info(&mdev->v4l2_dev, "vidioc_s_input(%d)\n", index);
+
+ if (index >= V4L2_AIM_MAX_INPUT)
+ return -EINVAL;
+ mdev->ctrl_input = index;
+ return 0;
+}
+
+static struct v4l2_file_operations aim_fops = {
+ .owner = THIS_MODULE,
+ .open = aim_vdev_open,
+ .release = aim_vdev_close,
+ .read = aim_vdev_read,
+ .poll = aim_vdev_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+};
+
+static const struct video_device aim_videodev_template = {
+ .fops = &aim_fops,
+ .release = video_device_release,
+ .ioctl_ops = &video_ioctl_ops,
+ .tvnorms = V4L2_STD_UNKNOWN,
+};
+
+/**************************************************************************/
+
+static struct most_video_dev *get_aim_dev(
+ struct most_interface *iface, int channel_idx)
+{
+ struct most_video_dev *mdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_for_each_entry(mdev, &video_devices, list) {
+ if (mdev->iface == iface && mdev->ch_idx == channel_idx) {
+ spin_unlock_irqrestore(&list_lock, flags);
+ return mdev;
+ }
+ }
+ spin_unlock_irqrestore(&list_lock, flags);
+ return NULL;
+}
+
+static int aim_rx_data(struct mbo *mbo)
+{
+ unsigned long flags;
+ struct most_video_dev *mdev =
+ get_aim_dev(mbo->ifp, mbo->hdm_channel_id);
+
+ if (!mdev)
+ return -EIO;
+
+ spin_lock_irqsave(&mdev->list_lock, flags);
+ if (unlikely(mdev->mute)) {
+ spin_unlock_irqrestore(&mdev->list_lock, flags);
+ return -EIO;
+ }
+
+ list_add_tail(&mbo->list, &mdev->pending_mbos);
+ spin_unlock_irqrestore(&mdev->list_lock, flags);
+ wake_up_interruptible(&mdev->wait_data);
+ return 0;
+}
+
+static int aim_register_videodev(struct most_video_dev *mdev)
+{
+ int ret;
+
+ v4l2_info(&mdev->v4l2_dev, "aim_register_videodev()\n");
+
+ init_waitqueue_head(&mdev->wait_data);
+
+ /* allocate and fill v4l2 video struct */
+ mdev->vdev = video_device_alloc();
+ if (!mdev->vdev)
+ return -ENOMEM;
+
+ /* Fill the video capture device struct */
+ *mdev->vdev = aim_videodev_template;
+ mdev->vdev->v4l2_dev = &mdev->v4l2_dev;
+ mdev->vdev->lock = &mdev->lock;
+ snprintf(mdev->vdev->name, sizeof(mdev->vdev->name), "MOST: %s",
+ mdev->v4l2_dev.name);
+
+ /* Register the v4l2 device */
+ video_set_drvdata(mdev->vdev, mdev);
+ ret = video_register_device(mdev->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&mdev->v4l2_dev, "video_register_device failed (%d)\n",
+ ret);
+ video_device_release(mdev->vdev);
+ }
+
+ return ret;
+}
+
+static void aim_unregister_videodev(struct most_video_dev *mdev)
+{
+ v4l2_info(&mdev->v4l2_dev, "aim_unregister_videodev()\n");
+
+ video_unregister_device(mdev->vdev);
+}
+
+static void aim_v4l2_dev_release(struct v4l2_device *v4l2_dev)
+{
+ struct most_video_dev *mdev =
+ container_of(v4l2_dev, struct most_video_dev, v4l2_dev);
+
+ v4l2_device_unregister(v4l2_dev);
+ kfree(mdev);
+}
+
+static int aim_probe_channel(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *ccfg,
+ struct kobject *parent, char *name)
+{
+ int ret;
+ struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
+
+ pr_info("aim_probe_channel(%s)\n", name);
+
+ if (mdev) {
+ pr_err("channel already linked\n");
+ return -EEXIST;
+ }
+
+ if (ccfg->direction != MOST_CH_RX) {
+ pr_err("wrong direction, expect rx\n");
+ return -EINVAL;
+ }
+
+ if (ccfg->data_type != MOST_CH_SYNC &&
+ ccfg->data_type != MOST_CH_ISOC_AVP) {
+ pr_err("wrong channel type, expect sync or isoc_avp\n");
+ return -EINVAL;
+ }
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mutex_init(&mdev->lock);
+ atomic_set(&mdev->access_ref, -1);
+ spin_lock_init(&mdev->list_lock);
+ INIT_LIST_HEAD(&mdev->pending_mbos);
+ mdev->iface = iface;
+ mdev->ch_idx = channel_idx;
+ mdev->v4l2_dev.release = aim_v4l2_dev_release;
+
+ /* Create the v4l2_device */
+ strlcpy(mdev->v4l2_dev.name, name, sizeof(mdev->v4l2_dev.name));
+ ret = v4l2_device_register(NULL, &mdev->v4l2_dev);
+ if (ret) {
+ pr_err("v4l2_device_register() failed\n");
+ kfree(mdev);
+ return ret;
+ }
+
+ ret = aim_register_videodev(mdev);
+ if (ret)
+ goto err_unreg;
+
+ spin_lock_irq(&list_lock);
+ list_add(&mdev->list, &video_devices);
+ spin_unlock_irq(&list_lock);
+ v4l2_info(&mdev->v4l2_dev, "aim_probe_channel() done\n");
+ return 0;
+
+err_unreg:
+ v4l2_device_disconnect(&mdev->v4l2_dev);
+ v4l2_device_put(&mdev->v4l2_dev);
+ return ret;
+}
+
+static int aim_disconnect_channel(struct most_interface *iface,
+ int channel_idx)
+{
+ struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
+
+ if (!mdev) {
+ pr_err("no such channel is linked\n");
+ return -ENOENT;
+ }
+
+ v4l2_info(&mdev->v4l2_dev, "aim_disconnect_channel()\n");
+
+ spin_lock_irq(&list_lock);
+ list_del(&mdev->list);
+ spin_unlock_irq(&list_lock);
+
+ aim_unregister_videodev(mdev);
+ v4l2_device_disconnect(&mdev->v4l2_dev);
+ v4l2_device_put(&mdev->v4l2_dev);
+ return 0;
+}
+
+static struct most_aim aim_info = {
+ .name = "v4l",
+ .probe_channel = aim_probe_channel,
+ .disconnect_channel = aim_disconnect_channel,
+ .rx_completion = aim_rx_data,
+};
+
+static int __init aim_init(void)
+{
+ spin_lock_init(&list_lock);
+ return most_register_aim(&aim_info);
+}
+
+static void __exit aim_exit(void)
+{
+ struct most_video_dev *mdev, *tmp;
+
+ /*
+ * As the mostcore currently doesn't call disconnect_channel()
+ * for linked channels while we call most_deregister_aim()
+ * we simulate this call here.
+ * This must be fixed in core.
+ */
+ spin_lock_irq(&list_lock);
+ list_for_each_entry_safe(mdev, tmp, &video_devices, list) {
+ list_del(&mdev->list);
+ spin_unlock_irq(&list_lock);
+
+ aim_unregister_videodev(mdev);
+ v4l2_device_disconnect(&mdev->v4l2_dev);
+ v4l2_device_put(&mdev->v4l2_dev);
+ spin_lock_irq(&list_lock);
+ }
+ spin_unlock_irq(&list_lock);
+
+ most_deregister_aim(&aim_info);
+ BUG_ON(!list_empty(&video_devices));
+}
+
+module_init(aim_init);
+module_exit(aim_exit);
+
+MODULE_DESCRIPTION("V4L2 Application Interface Module for MostCore");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_LICENSE("GPL");
diff --git a/driver/hdm-dim2/dim2_errors.h b/driver/hdm-dim2/dim2_errors.h
new file mode 100644
index 0000000..66343ba
--- /dev/null
+++ b/driver/hdm-dim2/dim2_errors.h
@@ -0,0 +1,57 @@
+/*
+ * dim2_errors.h - Definitions of errors for DIM2 HAL API
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef _MOST_DIM_ERRORS_H
+#define _MOST_DIM_ERRORS_H
+
+/**
+ * MOST DIM errors.
+ */
+enum dim_errors_t {
+ /** Not an error */
+ DIM_NO_ERROR = 0,
+
+ /** Bad base address for DIM2 IP */
+ DIM_INIT_ERR_DIM_ADDR = 0x10,
+
+ /**< Bad MediaLB clock */
+ DIM_INIT_ERR_MLB_CLOCK,
+
+ /** Bad channel address */
+ DIM_INIT_ERR_CHANNEL_ADDRESS,
+
+ /** Out of DBR memory */
+ DIM_INIT_ERR_OUT_OF_MEMORY,
+
+ /** DIM API is called while DIM is not initialized successfully */
+ DIM_ERR_DRIVER_NOT_INITIALIZED = 0x20,
+
+ /**
+ * Configuration does not respect hardware limitations
+ * for isochronous or synchronous channels
+ */
+ DIM_ERR_BAD_CONFIG,
+
+ /**
+ * Buffer size does not respect hardware limitations
+ * for isochronous or synchronous channels
+ */
+ DIM_ERR_BAD_BUFFER_SIZE,
+
+ DIM_ERR_UNDERFLOW,
+
+ DIM_ERR_OVERFLOW,
+};
+
+#endif /* _MOST_DIM_ERRORS_H */
diff --git a/driver/hdm-dim2/dim2_hal.c b/driver/hdm-dim2/dim2_hal.c
new file mode 100644
index 0000000..022df5c
--- /dev/null
+++ b/driver/hdm-dim2/dim2_hal.c
@@ -0,0 +1,890 @@
+/*
+ * dim2_hal.c - DIM2 HAL implementation
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
+
+#include "dim2_hal.h"
+#include "dim2_errors.h"
+#include "dim2_reg.h"
+#include <linux/stddef.h>
+
+/*
+ * Size factor for isochronous DBR buffer.
+ * Minimal value is 3.
+ */
+#define ISOC_DBR_FACTOR 3u
+
+/*
+ * Number of 32-bit units for DBR map.
+ *
+ * 1: block size is 512, max allocation is 16K
+ * 2: block size is 256, max allocation is 8K
+ * 4: block size is 128, max allocation is 4K
+ * 8: block size is 64, max allocation is 2K
+ *
+ * Min allocated space is block size.
+ * Max possible allocated space is 32 blocks.
+ */
+#define DBR_MAP_SIZE 2
+
+/* -------------------------------------------------------------------------- */
+/* not configurable area */
+
+#define CDT 0x00
+#define ADT 0x40
+#define MLB_CAT 0x80
+#define AHB_CAT 0x88
+
+#define DBR_SIZE (16 * 1024) /* specified by IP */
+#define DBR_BLOCK_SIZE (DBR_SIZE / 32 / DBR_MAP_SIZE)
+
+/* -------------------------------------------------------------------------- */
+/* generic helper functions and macros */
+
+static inline u32 bit_mask(u8 position)
+{
+ return (u32)1 << position;
+}
+
+static inline bool dim_on_error(u8 error_id, const char *error_message)
+{
+ dimcb_on_error(error_id, error_message);
+ return false;
+}
+
+/* -------------------------------------------------------------------------- */
+/* types and local variables */
+
+struct lld_global_vars_t {
+ bool dim_is_initialized;
+ bool mcm_is_initialized;
+ struct dim2_regs __iomem *dim2; /* DIM2 core base address */
+ u32 fcnt;
+ u32 dbr_map[DBR_MAP_SIZE];
+};
+
+static struct lld_global_vars_t g = { false };
+
+/* -------------------------------------------------------------------------- */
+
+static int dbr_get_mask_size(u16 size)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ if (size <= (DBR_BLOCK_SIZE << i))
+ return 1 << i;
+ return 0;
+}
+
+/**
+ * Allocates DBR memory.
+ * @param size Allocating memory size.
+ * @return Offset in DBR memory by success or DBR_SIZE if out of memory.
+ */
+static int alloc_dbr(u16 size)
+{
+ int mask_size;
+ int i, block_idx = 0;
+
+ if (size <= 0)
+ return DBR_SIZE; /* out of memory */
+
+ mask_size = dbr_get_mask_size(size);
+ if (mask_size == 0)
+ return DBR_SIZE; /* out of memory */
+
+ for (i = 0; i < DBR_MAP_SIZE; i++) {
+ u32 const blocks = (size + DBR_BLOCK_SIZE - 1) / DBR_BLOCK_SIZE;
+ u32 mask = ~((~(u32)0) << blocks);
+
+ do {
+ if ((g.dbr_map[i] & mask) == 0) {
+ g.dbr_map[i] |= mask;
+ return block_idx * DBR_BLOCK_SIZE;
+ }
+ block_idx += mask_size;
+ /* do shift left with 2 steps in case mask_size == 32 */
+ mask <<= mask_size - 1;
+ } while ((mask <<= 1) != 0);
+ }
+
+ return DBR_SIZE; /* out of memory */
+}
+
+static void free_dbr(int offs, int size)
+{
+ int block_idx = offs / DBR_BLOCK_SIZE;
+ u32 const blocks = (size + DBR_BLOCK_SIZE - 1) / DBR_BLOCK_SIZE;
+ u32 mask = ~((~(u32)0) << blocks);
+
+ mask <<= block_idx % 32;
+ g.dbr_map[block_idx / 32] &= ~mask;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
+{
+ dimcb_io_write(&g.dim2->MADR, ctr_addr);
+
+ /* wait till transfer is completed */
+ while ((dimcb_io_read(&g.dim2->MCTL) & 1) != 1)
+ continue;
+
+ dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
+
+ return dimcb_io_read((&g.dim2->MDAT0) + mdat_idx);
+}
+
+static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
+{
+ enum { MADR_WNR_BIT = 31 };
+
+ dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
+
+ if (mask[0] != 0)
+ dimcb_io_write(&g.dim2->MDAT0, value[0]);
+ if (mask[1] != 0)
+ dimcb_io_write(&g.dim2->MDAT1, value[1]);
+ if (mask[2] != 0)
+ dimcb_io_write(&g.dim2->MDAT2, value[2]);
+ if (mask[3] != 0)
+ dimcb_io_write(&g.dim2->MDAT3, value[3]);
+
+ dimcb_io_write(&g.dim2->MDWE0, mask[0]);
+ dimcb_io_write(&g.dim2->MDWE1, mask[1]);
+ dimcb_io_write(&g.dim2->MDWE2, mask[2]);
+ dimcb_io_write(&g.dim2->MDWE3, mask[3]);
+
+ dimcb_io_write(&g.dim2->MADR, bit_mask(MADR_WNR_BIT) | ctr_addr);
+
+ /* wait till transfer is completed */
+ while ((dimcb_io_read(&g.dim2->MCTL) & 1) != 1)
+ continue;
+
+ dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
+}
+
+static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
+{
+ u32 const mask[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static inline void dim2_clear_ctr(u32 ctr_addr)
+{
+ u32 const value[4] = { 0, 0, 0, 0 };
+
+ dim2_write_ctr(ctr_addr, value);
+}
+
+static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
+ bool read_not_write, bool sync_mfe)
+{
+ u16 const cat =
+ (read_not_write << CAT_RNW_BIT) |
+ (ch_type << CAT_CT_SHIFT) |
+ (ch_addr << CAT_CL_SHIFT) |
+ (sync_mfe << CAT_MFE_BIT) |
+ (false << CAT_MT_BIT) |
+ (true << CAT_CE_BIT);
+ u8 const ctr_addr = cat_base + ch_addr / 8;
+ u8 const idx = (ch_addr % 8) / 2;
+ u8 const shift = (ch_addr % 2) * 16;
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 value[4] = { 0, 0, 0, 0 };
+
+ mask[idx] = (u32)0xFFFF << shift;
+ value[idx] = cat << shift;
+ dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static void dim2_clear_cat(u8 cat_base, u8 ch_addr)
+{
+ u8 const ctr_addr = cat_base + ch_addr / 8;
+ u8 const idx = (ch_addr % 8) / 2;
+ u8 const shift = (ch_addr % 2) * 16;
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 value[4] = { 0, 0, 0, 0 };
+
+ mask[idx] = (u32)0xFFFF << shift;
+ dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
+ u16 packet_length)
+{
+ u32 cdt[4] = { 0, 0, 0, 0 };
+
+ if (packet_length)
+ cdt[1] = ((packet_length - 1) << CDT1_BS_ISOC_SHIFT);
+
+ cdt[3] =
+ ((hw_buffer_size - 1) << CDT3_BD_SHIFT) |
+ (dbr_address << CDT3_BA_SHIFT);
+ dim2_write_ctr(CDT + ch_addr, cdt);
+}
+
+static void dim2_clear_cdt(u8 ch_addr)
+{
+ u32 cdt[4] = { 0, 0, 0, 0 };
+
+ dim2_write_ctr(CDT + ch_addr, cdt);
+}
+
+static void dim2_configure_adt(u8 ch_addr)
+{
+ u32 adt[4] = { 0, 0, 0, 0 };
+
+ adt[0] =
+ (true << ADT0_CE_BIT) |
+ (true << ADT0_LE_BIT) |
+ (0 << ADT0_PG_BIT);
+
+ dim2_write_ctr(ADT + ch_addr, adt);
+}
+
+static void dim2_clear_adt(u8 ch_addr)
+{
+ u32 adt[4] = { 0, 0, 0, 0 };
+
+ dim2_write_ctr(ADT + ch_addr, adt);
+}
+
+static void dim2_start_ctrl_async(u8 ch_addr, u8 idx, u32 buf_addr,
+ u16 buffer_size)
+{
+ u8 const shift = idx * 16;
+
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 adt[4] = { 0, 0, 0, 0 };
+
+ mask[1] =
+ bit_mask(ADT1_PS_BIT + shift) |
+ bit_mask(ADT1_RDY_BIT + shift) |
+ (ADT1_CTRL_ASYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
+ adt[1] =
+ (true << (ADT1_PS_BIT + shift)) |
+ (true << (ADT1_RDY_BIT + shift)) |
+ ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
+
+ mask[idx + 2] = 0xFFFFFFFF;
+ adt[idx + 2] = buf_addr;
+
+ dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
+}
+
+static void dim2_start_isoc_sync(u8 ch_addr, u8 idx, u32 buf_addr,
+ u16 buffer_size)
+{
+ u8 const shift = idx * 16;
+
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 adt[4] = { 0, 0, 0, 0 };
+
+ mask[1] =
+ bit_mask(ADT1_RDY_BIT + shift) |
+ (ADT1_ISOC_SYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
+ adt[1] =
+ (true << (ADT1_RDY_BIT + shift)) |
+ ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
+
+ mask[idx + 2] = 0xFFFFFFFF;
+ adt[idx + 2] = buf_addr;
+
+ dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
+}
+
+static void dim2_clear_ctram(void)
+{
+ u32 ctr_addr;
+
+ for (ctr_addr = 0; ctr_addr < 0x90; ctr_addr++)
+ dim2_clear_ctr(ctr_addr);
+}
+
+static void dim2_configure_channel(
+ u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address, u16 hw_buffer_size,
+ u16 packet_length, bool sync_mfe)
+{
+ dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
+ dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0, sync_mfe);
+
+ dim2_configure_adt(ch_addr);
+ dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1, sync_mfe);
+
+ /* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
+ dimcb_io_write(&g.dim2->ACMR0,
+ dimcb_io_read(&g.dim2->ACMR0) | bit_mask(ch_addr));
+}
+
+static void dim2_clear_channel(u8 ch_addr)
+{
+ /* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
+ dimcb_io_write(&g.dim2->ACMR0,
+ dimcb_io_read(&g.dim2->ACMR0) & ~bit_mask(ch_addr));
+
+ dim2_clear_cat(AHB_CAT, ch_addr);
+ dim2_clear_adt(ch_addr);
+
+ dim2_clear_cat(MLB_CAT, ch_addr);
+ dim2_clear_cdt(ch_addr);
+
+ /* clear channel status bit */
+ dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
+}
+
+/* -------------------------------------------------------------------------- */
+/* channel state helpers */
+
+static void state_init(struct int_ch_state *state)
+{
+ state->request_counter = 0;
+ state->service_counter = 0;
+
+ state->idx1 = 0;
+ state->idx2 = 0;
+ state->level = 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* macro helper functions */
+
+static inline bool check_channel_address(u32 ch_address)
+{
+ return ch_address > 0 && (ch_address % 2) == 0 &&
+ (ch_address / 2) <= (u32)CAT_CL_MASK;
+}
+
+static inline bool check_packet_length(u32 packet_length)
+{
+ u16 const max_size = ((u16)CDT3_BD_ISOC_MASK + 1u) / ISOC_DBR_FACTOR;
+
+ if (packet_length <= 0)
+ return false; /* too small */
+
+ if (packet_length > max_size)
+ return false; /* too big */
+
+ if (packet_length - 1u > (u32)CDT1_BS_ISOC_MASK)
+ return false; /* too big */
+
+ return true;
+}
+
+static inline bool check_bytes_per_frame(u32 bytes_per_frame)
+{
+ u16 const bd_factor = g.fcnt + 2;
+ u16 const max_size = ((u16)CDT3_BD_MASK + 1u) >> bd_factor;
+
+ if (bytes_per_frame <= 0)
+ return false; /* too small */
+
+ if (bytes_per_frame > max_size)
+ return false; /* too big */
+
+ return true;
+}
+
+static inline u16 norm_ctrl_async_buffer_size(u16 buf_size)
+{
+ u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
+
+ if (buf_size > max_size)
+ return max_size;
+
+ return buf_size;
+}
+
+static inline u16 norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
+{
+ u16 n;
+ u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
+
+ if (buf_size > max_size)
+ buf_size = max_size;
+
+ n = buf_size / packet_length;
+
+ if (n < 2u)
+ return 0; /* too small buffer for given packet_length */
+
+ return packet_length * n;
+}
+
+static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
+{
+ u16 n;
+ u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
+ u32 const unit = bytes_per_frame << g.fcnt;
+
+ if (buf_size > max_size)
+ buf_size = max_size;
+
+ n = buf_size / unit;
+
+ if (n < 1u)
+ return 0; /* too small buffer for given bytes_per_frame */
+
+ return unit * n;
+}
+
+static void dim2_cleanup(void)
+{
+ /* disable MediaLB */
+ dimcb_io_write(&g.dim2->MLBC0, false << MLBC0_MLBEN_BIT);
+
+ dim2_clear_ctram();
+
+ /* disable mlb_int interrupt */
+ dimcb_io_write(&g.dim2->MIEN, 0);
+
+ /* clear status for all dma channels */
+ dimcb_io_write(&g.dim2->ACSR0, 0xFFFFFFFF);
+ dimcb_io_write(&g.dim2->ACSR1, 0xFFFFFFFF);
+
+ /* mask interrupts for all channels */
+ dimcb_io_write(&g.dim2->ACMR0, 0);
+ dimcb_io_write(&g.dim2->ACMR1, 0);
+}
+
+static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
+{
+ dim2_cleanup();
+
+ /* configure and enable MediaLB */
+ dimcb_io_write(&g.dim2->MLBC0,
+ enable_6pin << MLBC0_MLBPEN_BIT |
+ mlb_clock << MLBC0_MLBCLK_SHIFT |
+ g.fcnt << MLBC0_FCNT_SHIFT |
+ true << MLBC0_MLBEN_BIT);
+
+ /* activate all HBI channels */
+ dimcb_io_write(&g.dim2->HCMR0, 0xFFFFFFFF);
+ dimcb_io_write(&g.dim2->HCMR1, 0xFFFFFFFF);
+
+ /* enable HBI */
+ dimcb_io_write(&g.dim2->HCTL, bit_mask(HCTL_EN_BIT));
+
+ /* configure DMA */
+ dimcb_io_write(&g.dim2->ACTL,
+ ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
+ true << ACTL_SCE_BIT);
+}
+
+static bool dim2_is_mlb_locked(void)
+{
+ u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
+ u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
+ bit_mask(MLBC1_LOCKERR_BIT);
+ u32 const c1 = dimcb_io_read(&g.dim2->MLBC1);
+ u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
+
+ dimcb_io_write(&g.dim2->MLBC1, c1 & nda_mask);
+ return (dimcb_io_read(&g.dim2->MLBC1) & mask1) == 0 &&
+ (dimcb_io_read(&g.dim2->MLBC0) & mask0) != 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* channel help routines */
+
+static inline bool service_channel(u8 ch_addr, u8 idx)
+{
+ u8 const shift = idx * 16;
+ u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
+
+ if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
+ return false;
+
+ {
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 adt_w[4] = { 0, 0, 0, 0 };
+
+ mask[1] =
+ bit_mask(ADT1_DNE_BIT + shift) |
+ bit_mask(ADT1_ERR_BIT + shift) |
+ bit_mask(ADT1_RDY_BIT + shift);
+ dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
+ }
+
+ /* clear channel status bit */
+ dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
+
+ return true;
+}
+
+/* -------------------------------------------------------------------------- */
+/* channel init routines */
+
+static void isoc_init(struct dim_channel *ch, u8 ch_addr, u16 packet_length)
+{
+ state_init(&ch->state);
+
+ ch->addr = ch_addr;
+
+ ch->packet_length = packet_length;
+ ch->bytes_per_frame = 0;
+ ch->done_sw_buffers_number = 0;
+}
+
+static void sync_init(struct dim_channel *ch, u8 ch_addr, u16 bytes_per_frame)
+{
+ state_init(&ch->state);
+
+ ch->addr = ch_addr;
+
+ ch->packet_length = 0;
+ ch->bytes_per_frame = bytes_per_frame;
+ ch->done_sw_buffers_number = 0;
+}
+
+static void channel_init(struct dim_channel *ch, u8 ch_addr)
+{
+ state_init(&ch->state);
+
+ ch->addr = ch_addr;
+
+ ch->packet_length = 0;
+ ch->bytes_per_frame = 0;
+ ch->done_sw_buffers_number = 0;
+}
+
+/* returns true if channel interrupt state is cleared */
+static bool channel_service_interrupt(struct dim_channel *ch)
+{
+ struct int_ch_state *const state = &ch->state;
+
+ if (!service_channel(ch->addr, state->idx2))
+ return false;
+
+ state->idx2 ^= 1;
+ state->request_counter++;
+ return true;
+}
+
+static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
+{
+ struct int_ch_state *const state = &ch->state;
+
+ if (buf_size <= 0)
+ return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
+
+ if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
+ buf_size != norm_ctrl_async_buffer_size(buf_size))
+ return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+ "Bad control/async buffer size");
+
+ if (ch->packet_length &&
+ buf_size != norm_isoc_buffer_size(buf_size, ch->packet_length))
+ return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+ "Bad isochronous buffer size");
+
+ if (ch->bytes_per_frame &&
+ buf_size != norm_sync_buffer_size(buf_size, ch->bytes_per_frame))
+ return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+ "Bad synchronous buffer size");
+
+ if (state->level >= 2u)
+ return dim_on_error(DIM_ERR_OVERFLOW, "Channel overflow");
+
+ ++state->level;
+
+ if (ch->packet_length || ch->bytes_per_frame)
+ dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
+ else
+ dim2_start_ctrl_async(ch->addr, state->idx1, buf_addr,
+ buf_size);
+ state->idx1 ^= 1;
+
+ return true;
+}
+
+static u8 channel_service(struct dim_channel *ch)
+{
+ struct int_ch_state *const state = &ch->state;
+
+ if (state->service_counter != state->request_counter) {
+ state->service_counter++;
+ if (state->level == 0)
+ return DIM_ERR_UNDERFLOW;
+
+ --state->level;
+ ch->done_sw_buffers_number++;
+ }
+
+ return DIM_NO_ERROR;
+}
+
+static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
+{
+ if (buffers_number > ch->done_sw_buffers_number)
+ return dim_on_error(DIM_ERR_UNDERFLOW, "Channel underflow");
+
+ ch->done_sw_buffers_number -= buffers_number;
+ return true;
+}
+
+/* -------------------------------------------------------------------------- */
+/* API */
+
+u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock, u32 fcnt)
+{
+ g.dim_is_initialized = false;
+
+ if (!dim_base_address)
+ return DIM_INIT_ERR_DIM_ADDR;
+
+ /* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
+ /* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
+ if (mlb_clock >= 8)
+ return DIM_INIT_ERR_MLB_CLOCK;
+
+ if (fcnt > MLBC0_FCNT_MAX_VAL)
+ return DIM_INIT_ERR_MLB_CLOCK;
+
+ g.dim2 = dim_base_address;
+ g.fcnt = fcnt;
+ g.dbr_map[0] = 0;
+ g.dbr_map[1] = 0;
+
+ dim2_initialize(mlb_clock >= 3, mlb_clock);
+
+ g.dim_is_initialized = true;
+
+ return DIM_NO_ERROR;
+}
+
+void dim_shutdown(void)
+{
+ g.dim_is_initialized = false;
+ dim2_cleanup();
+}
+
+bool dim_get_lock_state(void)
+{
+ return dim2_is_mlb_locked();
+}
+
+static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
+ u16 ch_address, u16 hw_buffer_size)
+{
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ if (!check_channel_address(ch_address))
+ return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+ ch->dbr_size = hw_buffer_size;
+ ch->dbr_addr = alloc_dbr(ch->dbr_size);
+ if (ch->dbr_addr >= DBR_SIZE)
+ return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+ channel_init(ch, ch_address / 2);
+
+ dim2_configure_channel(ch->addr, type, is_tx,
+ ch->dbr_addr, ch->dbr_size, 0, false);
+
+ return DIM_NO_ERROR;
+}
+
+u16 dim_norm_ctrl_async_buffer_size(u16 buf_size)
+{
+ return norm_ctrl_async_buffer_size(buf_size);
+}
+
+/**
+ * Retrieves maximal possible correct buffer size for isochronous data type
+ * conform to given packet length and not bigger than given buffer size.
+ *
+ * Returns non-zero correct buffer size or zero by error.
+ */
+u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
+{
+ if (!check_packet_length(packet_length))
+ return 0;
+
+ return norm_isoc_buffer_size(buf_size, packet_length);
+}
+
+/**
+ * Retrieves maximal possible correct buffer size for synchronous data type
+ * conform to given bytes per frame and not bigger than given buffer size.
+ *
+ * Returns non-zero correct buffer size or zero by error.
+ */
+u16 dim_norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
+{
+ if (!check_bytes_per_frame(bytes_per_frame))
+ return 0;
+
+ return norm_sync_buffer_size(buf_size, bytes_per_frame);
+}
+
+u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 max_buffer_size)
+{
+ return init_ctrl_async(ch, CAT_CT_VAL_CONTROL, is_tx, ch_address,
+ max_buffer_size);
+}
+
+u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 max_buffer_size)
+{
+ return init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
+ max_buffer_size);
+}
+
+u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 packet_length)
+{
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ if (!check_channel_address(ch_address))
+ return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+ if (!check_packet_length(packet_length))
+ return DIM_ERR_BAD_CONFIG;
+
+ ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
+ ch->dbr_addr = alloc_dbr(ch->dbr_size);
+ if (ch->dbr_addr >= DBR_SIZE)
+ return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+ isoc_init(ch, ch_address / 2, packet_length);
+
+ dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
+ ch->dbr_size, packet_length, false);
+
+ return DIM_NO_ERROR;
+}
+
+u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 bytes_per_frame)
+{
+ u16 bd_factor = g.fcnt + 2;
+
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ if (!check_channel_address(ch_address))
+ return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+ if (!check_bytes_per_frame(bytes_per_frame))
+ return DIM_ERR_BAD_CONFIG;
+
+ ch->dbr_size = bytes_per_frame << bd_factor;
+ ch->dbr_addr = alloc_dbr(ch->dbr_size);
+ if (ch->dbr_addr >= DBR_SIZE)
+ return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+ sync_init(ch, ch_address / 2, bytes_per_frame);
+
+ dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
+ ch->dbr_addr, ch->dbr_size, 0, true);
+
+ return DIM_NO_ERROR;
+}
+
+u8 dim_destroy_channel(struct dim_channel *ch)
+{
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ dim2_clear_channel(ch->addr);
+ if (ch->dbr_addr < DBR_SIZE)
+ free_dbr(ch->dbr_addr, ch->dbr_size);
+ ch->dbr_addr = DBR_SIZE;
+
+ return DIM_NO_ERROR;
+}
+
+void dim_service_irq(struct dim_channel *const *channels)
+{
+ bool state_changed;
+
+ if (!g.dim_is_initialized) {
+ dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
+ "DIM is not initialized");
+ return;
+ }
+
+ if (!channels) {
+ dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channels");
+ return;
+ }
+
+ /*
+ * Use while-loop and a flag to make sure the age is changed back at
+ * least once, otherwise the interrupt may never come if CPU generates
+ * interrupt on changing age.
+ * This cycle runs not more than number of channels, because
+ * channel_service_interrupt() routine doesn't start the channel again.
+ */
+ do {
+ struct dim_channel *const *ch = channels;
+
+ state_changed = false;
+
+ while (*ch) {
+ state_changed |= channel_service_interrupt(*ch);
+ ++ch;
+ }
+ } while (state_changed);
+
+ /* clear pending Interrupts */
+ dimcb_io_write(&g.dim2->MS0, 0);
+ dimcb_io_write(&g.dim2->MS1, 0);
+}
+
+u8 dim_service_channel(struct dim_channel *ch)
+{
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ return channel_service(ch);
+}
+
+struct dim_ch_state_t *dim_get_channel_state(struct dim_channel *ch,
+ struct dim_ch_state_t *state_ptr)
+{
+ if (!ch || !state_ptr)
+ return NULL;
+
+ state_ptr->ready = ch->state.level < 2;
+ state_ptr->done_buffers = ch->done_sw_buffers_number;
+
+ return state_ptr;
+}
+
+bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
+ u16 buffer_size)
+{
+ if (!ch)
+ return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
+ "Bad channel");
+
+ return channel_start(ch, buffer_addr, buffer_size);
+}
+
+bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number)
+{
+ if (!ch)
+ return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
+ "Bad channel");
+
+ return channel_detach_buffers(ch, buffers_number);
+}
diff --git a/driver/hdm-dim2/dim2_hal.h b/driver/hdm-dim2/dim2_hal.h
new file mode 100644
index 0000000..453634a
--- /dev/null
+++ b/driver/hdm-dim2/dim2_hal.h
@@ -0,0 +1,107 @@
+/*
+ * dim2_hal.h - DIM2 HAL interface
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef _DIM2_HAL_H
+#define _DIM2_HAL_H
+
+#include <linux/types.h>
+#include "dim2_reg.h"
+
+/*
+ * The values below are specified in the hardware specification.
+ * So, they should not be changed until the hardware specification changes.
+ */
+enum mlb_clk_speed {
+ CLK_256FS = 0,
+ CLK_512FS = 1,
+ CLK_1024FS = 2,
+ CLK_2048FS = 3,
+ CLK_3072FS = 4,
+ CLK_4096FS = 5,
+ CLK_6144FS = 6,
+ CLK_8192FS = 7,
+};
+
+struct dim_ch_state_t {
+ bool ready; /* Shows readiness to enqueue next buffer */
+ u16 done_buffers; /* Number of completed buffers */
+};
+
+struct int_ch_state {
+ /* changed only in interrupt context */
+ volatile int request_counter;
+
+ /* changed only in task context */
+ volatile int service_counter;
+
+ u8 idx1;
+ u8 idx2;
+ u8 level; /* [0..2], buffering level */
+};
+
+struct dim_channel {
+ struct int_ch_state state;
+ u8 addr;
+ u16 dbr_addr;
+ u16 dbr_size;
+ u16 packet_length; /*< Isochronous packet length in bytes. */
+ u16 bytes_per_frame; /*< Synchronous bytes per frame. */
+ u16 done_sw_buffers_number; /*< Done software buffers number. */
+};
+
+u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock, u32 fcnt);
+
+void dim_shutdown(void);
+
+bool dim_get_lock_state(void);
+
+u16 dim_norm_ctrl_async_buffer_size(u16 buf_size);
+
+u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length);
+
+u16 dim_norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame);
+
+u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 max_buffer_size);
+
+u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 max_buffer_size);
+
+u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 packet_length);
+
+u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 bytes_per_frame);
+
+u8 dim_destroy_channel(struct dim_channel *ch);
+
+void dim_service_irq(struct dim_channel *const *channels);
+
+u8 dim_service_channel(struct dim_channel *ch);
+
+struct dim_ch_state_t *dim_get_channel_state(struct dim_channel *ch,
+ struct dim_ch_state_t *state_ptr);
+
+bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
+ u16 buffer_size);
+
+bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number);
+
+u32 dimcb_io_read(u32 __iomem *ptr32);
+
+void dimcb_io_write(u32 __iomem *ptr32, u32 value);
+
+void dimcb_on_error(u8 error_id, const char *error_message);
+
+#endif /* _DIM2_HAL_H */
diff --git a/driver/hdm-dim2/dim2_hdm.c b/driver/hdm-dim2/dim2_hdm.c
new file mode 100644
index 0000000..87039d9
--- /dev/null
+++ b/driver/hdm-dim2/dim2_hdm.c
@@ -0,0 +1,913 @@
+/*
+ * dim2_hdm.c - MediaLB DIM2 Hardware Dependent Module
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+
+#include <mostcore.h>
+#include <networking.h>
+#include "dim2_hal.h"
+#include "dim2_hdm.h"
+#include "dim2_errors.h"
+#include "dim2_sysfs.h"
+
+#define DMA_CHANNELS (32 - 1) /* channel 0 is a system channel */
+
+#define MAX_BUFFERS_PACKET 32
+#define MAX_BUFFERS_STREAMING 32
+#define MAX_BUF_SIZE_PACKET 2048
+#define MAX_BUF_SIZE_STREAMING (8 * 1024)
+
+/* command line parameter to select clock speed */
+static char *clock_speed;
+module_param(clock_speed, charp, 0);
+MODULE_PARM_DESC(clock_speed, "MediaLB Clock Speed");
+
+/*
+ * The parameter representing the number of frames per sub-buffer for
+ * synchronous channels. Valid values: [0 .. 6].
+ *
+ * The values 0, 1, 2, 3, 4, 5, 6 represent corresponding number of frames per
+ * sub-buffer 1, 2, 4, 8, 16, 32, 64.
+ */
+static u8 fcnt = 4; /* (1 << fcnt) frames per subbuffer */
+module_param(fcnt, byte, 0);
+MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a power of 2");
+
+/*
+ * #############################################################################
+ *
+ * The define below activates an utility function used by HAL-simu
+ * for calling DIM interrupt handler.
+ * It is used only for TEST PURPOSE and shall be commented before release.
+ *
+ * #############################################################################
+ */
+/* #define ENABLE_HDM_TEST */
+
+static DEFINE_SPINLOCK(dim_lock);
+
+static void dim2_tasklet_fn(unsigned long data);
+static DECLARE_TASKLET(dim2_tasklet, dim2_tasklet_fn, 0);
+
+/**
+ * struct hdm_channel - private structure to keep channel specific data
+ * @is_initialized: identifier to know whether the channel is initialized
+ * @ch: HAL specific channel data
+ * @pending_list: list to keep MBO's before starting transfer
+ * @started_list: list to keep MBO's after starting transfer
+ * @direction: channel direction (TX or RX)
+ * @data_type: channel data type
+ */
+struct hdm_channel {
+ char name[sizeof "caNNN"];
+ bool is_initialized;
+ struct dim_channel ch;
+ struct list_head pending_list; /* before dim_enqueue_buffer() */
+ struct list_head started_list; /* after dim_enqueue_buffer() */
+ enum most_channel_direction direction;
+ enum most_channel_data_type data_type;
+};
+
+/**
+ * struct dim2_hdm - private structure to keep interface specific data
+ * @hch: an array of channel specific data
+ * @most_iface: most interface structure
+ * @capabilities: an array of channel capability data
+ * @io_base: I/O register base address
+ * @irq_ahb0: dim2 AHB0 irq number
+ * @clk_speed: user selectable (through command line parameter) clock speed
+ * @netinfo_task: thread to deliver network status
+ * @netinfo_waitq: waitq for the thread to sleep
+ * @deliver_netinfo: to identify whether network status received
+ * @mac_addrs: INIC mac address
+ * @link_state: network link state
+ * @atx_idx: index of async tx channel
+ */
+struct dim2_hdm {
+ struct hdm_channel hch[DMA_CHANNELS];
+ struct most_channel_capability capabilities[DMA_CHANNELS];
+ struct most_interface most_iface;
+ char name[16 + sizeof "dim2-"];
+ void __iomem *io_base;
+ unsigned int irq_ahb0;
+ int clk_speed;
+ struct task_struct *netinfo_task;
+ wait_queue_head_t netinfo_waitq;
+ int deliver_netinfo;
+ unsigned char mac_addrs[6];
+ unsigned char link_state;
+ int atx_idx;
+ struct medialb_bus bus;
+};
+
+#define iface_to_hdm(iface) container_of(iface, struct dim2_hdm, most_iface)
+
+/* Macro to identify a network status message */
+#define PACKET_IS_NET_INFO(p) \
+ (((p)[1] == 0x18) && ((p)[2] == 0x05) && ((p)[3] == 0x0C) && \
+ ((p)[13] == 0x3C) && ((p)[14] == 0x00) && ((p)[15] == 0x0A))
+
+#if defined(ENABLE_HDM_TEST)
+static struct dim2_hdm *test_dev;
+#endif
+
+bool dim2_sysfs_get_state_cb(void)
+{
+ bool state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ state = dim_get_lock_state();
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ return state;
+}
+
+/**
+ * dimcb_io_read - callback from HAL to read an I/O register
+ * @ptr32: register address
+ */
+u32 dimcb_io_read(u32 __iomem *ptr32)
+{
+ return readl(ptr32);
+}
+
+/**
+ * dimcb_io_write - callback from HAL to write value to an I/O register
+ * @ptr32: register address
+ * @value: value to write
+ */
+void dimcb_io_write(u32 __iomem *ptr32, u32 value)
+{
+ writel(value, ptr32);
+}
+
+/**
+ * dimcb_on_error - callback from HAL to report miscommunication between
+ * HDM and HAL
+ * @error_id: Error ID
+ * @error_message: Error message. Some text in a free format
+ */
+void dimcb_on_error(u8 error_id, const char *error_message)
+{
+ pr_err("dimcb_on_error: error_id - %d, error_message - %s\n", error_id,
+ error_message);
+}
+
+/**
+ * startup_dim - initialize the dim2 interface
+ * @pdev: platform device
+ *
+ * Get the value of command line parameter "clock_speed" if given or use the
+ * default value, enable the clock and PLL, and initialize the dim2 interface.
+ */
+static int startup_dim(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+ struct dim2_platform_data *pdata = pdev->dev.platform_data;
+ u8 hal_ret;
+
+ dev->clk_speed = -1;
+
+ if (clock_speed) {
+ if (!strcmp(clock_speed, "256fs"))
+ dev->clk_speed = CLK_256FS;
+ else if (!strcmp(clock_speed, "512fs"))
+ dev->clk_speed = CLK_512FS;
+ else if (!strcmp(clock_speed, "1024fs"))
+ dev->clk_speed = CLK_1024FS;
+ else if (!strcmp(clock_speed, "2048fs"))
+ dev->clk_speed = CLK_2048FS;
+ else if (!strcmp(clock_speed, "3072fs"))
+ dev->clk_speed = CLK_3072FS;
+ else if (!strcmp(clock_speed, "4096fs"))
+ dev->clk_speed = CLK_4096FS;
+ else if (!strcmp(clock_speed, "6144fs"))
+ dev->clk_speed = CLK_6144FS;
+ else if (!strcmp(clock_speed, "8192fs"))
+ dev->clk_speed = CLK_8192FS;
+ }
+
+ if (dev->clk_speed == -1) {
+ pr_info("Bad or missing clock speed parameter, using default value: 3072fs\n");
+ dev->clk_speed = CLK_3072FS;
+ } else {
+ pr_info("Selected clock speed: %s\n", clock_speed);
+ }
+ if (pdata && pdata->init) {
+ int ret = pdata->init(pdata, dev->io_base, dev->clk_speed);
+
+ if (ret)
+ return ret;
+ }
+
+ pr_info("sync: num of frames per sub-buffer: %u\n", fcnt);
+ hal_ret = dim_startup(dev->io_base, dev->clk_speed, fcnt);
+ if (hal_ret != DIM_NO_ERROR) {
+ pr_err("dim_startup failed: %d\n", hal_ret);
+ if (pdata && pdata->destroy)
+ pdata->destroy(pdata);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * try_start_dim_transfer - try to transfer a buffer on a channel
+ * @hdm_ch: channel specific data
+ *
+ * Transfer a buffer from pending_list if the channel is ready
+ */
+static int try_start_dim_transfer(struct hdm_channel *hdm_ch)
+{
+ u16 buf_size;
+ struct list_head *head = &hdm_ch->pending_list;
+ struct mbo *mbo;
+ unsigned long flags;
+ struct dim_ch_state_t st;
+
+ BUG_ON(!hdm_ch);
+ BUG_ON(!hdm_ch->is_initialized);
+
+ spin_lock_irqsave(&dim_lock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ return -EAGAIN;
+ }
+
+ if (!dim_get_channel_state(&hdm_ch->ch, &st)->ready) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ return -EAGAIN;
+ }
+
+ mbo = list_first_entry(head, struct mbo, list);
+ buf_size = mbo->buffer_length;
+
+ BUG_ON(mbo->bus_address == 0);
+ if (!dim_enqueue_buffer(&hdm_ch->ch, mbo->bus_address, buf_size)) {
+ list_del(head->next);
+ spin_unlock_irqrestore(&dim_lock, flags);
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_INVAL;
+ mbo->complete(mbo);
+ return -EFAULT;
+ }
+
+ list_move_tail(head->next, &hdm_ch->started_list);
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ return 0;
+}
+
+/**
+ * deliver_netinfo_thread - thread to deliver network status to mostcore
+ * @data: private data
+ *
+ * Wait for network status and deliver it to mostcore once it is received
+ */
+static int deliver_netinfo_thread(void *data)
+{
+ struct dim2_hdm *dev = data;
+
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(dev->netinfo_waitq,
+ dev->deliver_netinfo ||
+ kthread_should_stop());
+
+ if (dev->deliver_netinfo) {
+ dev->deliver_netinfo--;
+ most_deliver_netinfo(&dev->most_iface, dev->link_state,
+ dev->mac_addrs);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * retrieve_netinfo - retrieve network status from received buffer
+ * @dev: private data
+ * @mbo: received MBO
+ *
+ * Parse the message in buffer and get node address, link state, MAC address.
+ * Wake up a thread to deliver this status to mostcore
+ */
+static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
+{
+ u8 *data = mbo->virt_address;
+ u8 *mac = dev->mac_addrs;
+
+ pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
+ dev->link_state = data[18];
+ pr_info("NIState: %d\n", dev->link_state);
+ memcpy(mac, data + 19, 6);
+ pr_info("MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ dev->deliver_netinfo++;
+ wake_up_interruptible(&dev->netinfo_waitq);
+}
+
+/**
+ * service_done_flag - handle completed buffers
+ * @dev: private data
+ * @ch_idx: channel index
+ *
+ * Return back the completed buffers to mostcore, using completion callback
+ */
+static void service_done_flag(struct dim2_hdm *dev, int ch_idx)
+{
+ struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+ struct dim_ch_state_t st;
+ struct list_head *head;
+ struct mbo *mbo;
+ int done_buffers;
+ unsigned long flags;
+ u8 *data;
+
+ BUG_ON(!hdm_ch);
+ BUG_ON(!hdm_ch->is_initialized);
+
+ spin_lock_irqsave(&dim_lock, flags);
+
+ done_buffers = dim_get_channel_state(&hdm_ch->ch, &st)->done_buffers;
+ if (!done_buffers) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ return;
+ }
+
+ if (!dim_detach_buffers(&hdm_ch->ch, done_buffers)) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ head = &hdm_ch->started_list;
+
+ while (done_buffers) {
+ spin_lock_irqsave(&dim_lock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ pr_crit("hard error: started_mbo list is empty whereas DIM2 has sent buffers\n");
+ break;
+ }
+
+ mbo = list_first_entry(head, struct mbo, list);
+ list_del(head->next);
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ data = mbo->virt_address;
+
+ if (hdm_ch->data_type == MOST_CH_ASYNC &&
+ hdm_ch->direction == MOST_CH_RX &&
+ PACKET_IS_NET_INFO(data)) {
+ retrieve_netinfo(dev, mbo);
+
+ spin_lock_irqsave(&dim_lock, flags);
+ list_add_tail(&mbo->list, &hdm_ch->pending_list);
+ spin_unlock_irqrestore(&dim_lock, flags);
+ } else {
+ if (hdm_ch->data_type == MOST_CH_CONTROL ||
+ hdm_ch->data_type == MOST_CH_ASYNC) {
+ u32 const data_size =
+ (u32)data[0] * 256 + data[1] + 2;
+
+ mbo->processed_length =
+ min_t(u32, data_size,
+ mbo->buffer_length);
+ } else {
+ mbo->processed_length = mbo->buffer_length;
+ }
+ mbo->status = MBO_SUCCESS;
+ mbo->complete(mbo);
+ }
+
+ done_buffers--;
+ }
+}
+
+static struct dim_channel **get_active_channels(struct dim2_hdm *dev,
+ struct dim_channel **buffer)
+{
+ int idx = 0;
+ int ch_idx;
+
+ for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
+ if (dev->hch[ch_idx].is_initialized)
+ buffer[idx++] = &dev->hch[ch_idx].ch;
+ }
+ buffer[idx++] = NULL;
+
+ return buffer;
+}
+
+/**
+ * dim2_tasklet_fn - tasklet function
+ * @data: private data
+ *
+ * Service each initialized channel, if needed
+ */
+static void dim2_tasklet_fn(unsigned long data)
+{
+ struct dim2_hdm *dev = (struct dim2_hdm *)data;
+ unsigned long flags;
+ int ch_idx;
+
+ for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
+ if (!dev->hch[ch_idx].is_initialized)
+ continue;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ dim_service_channel(&dev->hch[ch_idx].ch);
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ service_done_flag(dev, ch_idx);
+ while (!try_start_dim_transfer(dev->hch + ch_idx))
+ continue;
+ }
+}
+
+/**
+ * dim2_ahb_isr - interrupt service routine
+ * @irq: irq number
+ * @_dev: private data
+ *
+ * Acknowledge the interrupt and schedule a tasklet to service channels.
+ * Return IRQ_HANDLED.
+ */
+static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
+{
+ struct dim2_hdm *dev = _dev;
+ struct dim_channel *buffer[DMA_CHANNELS + 1];
+ unsigned long flags;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ dim_service_irq(get_active_channels(dev, buffer));
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+#if !defined(ENABLE_HDM_TEST)
+ dim2_tasklet.data = (unsigned long)dev;
+ tasklet_schedule(&dim2_tasklet);
+#else
+ dim2_tasklet_fn((unsigned long)dev);
+#endif
+ return IRQ_HANDLED;
+}
+
+#if defined(ENABLE_HDM_TEST)
+
+/*
+ * Utility function used by HAL-simu for calling DIM interrupt handler.
+ * It is used only for TEST PURPOSE.
+ */
+void raise_dim_interrupt(void)
+{
+ (void)dim2_ahb_isr(0, test_dev);
+}
+#endif
+
+/**
+ * complete_all_mbos - complete MBO's in a list
+ * @head: list head
+ *
+ * Delete all the entries in list and return back MBO's to mostcore using
+ * completion call back.
+ */
+static void complete_all_mbos(struct list_head *head)
+{
+ unsigned long flags;
+ struct mbo *mbo;
+
+ for (;;) {
+ spin_lock_irqsave(&dim_lock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ break;
+ }
+
+ mbo = list_first_entry(head, struct mbo, list);
+ list_del(head->next);
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_CLOSE;
+ mbo->complete(mbo);
+ }
+}
+
+/**
+ * configure_channel - initialize a channel
+ * @iface: interface the channel belongs to
+ * @channel: channel to be configured
+ * @channel_config: structure that holds the configuration information
+ *
+ * Receives configuration information from mostcore and initialize
+ * the corresponding channel. Return 0 on success, negative on failure.
+ */
+static int configure_channel(struct most_interface *most_iface, int ch_idx,
+ struct most_channel_config *ccfg)
+{
+ struct dim2_hdm *dev = iface_to_hdm(most_iface);
+ bool const is_tx = ccfg->direction == MOST_CH_TX;
+ u16 const sub_size = ccfg->subbuffer_size;
+ u16 const buf_size = ccfg->buffer_size;
+ u16 new_size;
+ unsigned long flags;
+ u8 hal_ret;
+ int const ch_addr = ch_idx * 2 + 2;
+ struct hdm_channel *const hdm_ch = dev->hch + ch_idx;
+
+ BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+ if (hdm_ch->is_initialized)
+ return -EPERM;
+
+ switch (ccfg->data_type) {
+ case MOST_CH_CONTROL:
+ new_size = dim_norm_ctrl_async_buffer_size(buf_size);
+ if (new_size == 0) {
+ pr_err("%s: too small buffer size\n", hdm_ch->name);
+ return -EINVAL;
+ }
+ ccfg->buffer_size = new_size;
+ if (new_size != buf_size)
+ pr_warn("%s: fixed buffer size (%d -> %d)\n",
+ hdm_ch->name, buf_size, new_size);
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = dim_init_control(&hdm_ch->ch, is_tx, ch_addr,
+ buf_size);
+ break;
+ case MOST_CH_ASYNC:
+ new_size = dim_norm_ctrl_async_buffer_size(buf_size);
+ if (new_size == 0) {
+ pr_err("%s: too small buffer size\n", hdm_ch->name);
+ return -EINVAL;
+ }
+ ccfg->buffer_size = new_size;
+ if (new_size != buf_size)
+ pr_warn("%s: fixed buffer size (%d -> %d)\n",
+ hdm_ch->name, buf_size, new_size);
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = dim_init_async(&hdm_ch->ch, is_tx, ch_addr, buf_size);
+ break;
+ case MOST_CH_ISOC_AVP:
+ new_size = dim_norm_isoc_buffer_size(buf_size, sub_size);
+ if (new_size == 0) {
+ pr_err("%s: invalid sub-buffer size or too small buffer size\n",
+ hdm_ch->name);
+ return -EINVAL;
+ }
+ ccfg->buffer_size = new_size;
+ if (new_size != buf_size)
+ pr_warn("%s: fixed buffer size (%d -> %d)\n",
+ hdm_ch->name, buf_size, new_size);
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = dim_init_isoc(&hdm_ch->ch, is_tx, ch_addr, sub_size);
+ break;
+ case MOST_CH_SYNC:
+ new_size = dim_norm_sync_buffer_size(buf_size, sub_size);
+ if (new_size == 0) {
+ pr_err("%s: invalid sub-buffer size or too small buffer size\n",
+ hdm_ch->name);
+ return -EINVAL;
+ }
+ ccfg->buffer_size = new_size;
+ if (new_size != buf_size)
+ pr_warn("%s: fixed buffer size (%d -> %d)\n",
+ hdm_ch->name, buf_size, new_size);
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = dim_init_sync(&hdm_ch->ch, is_tx, ch_addr, sub_size);
+ break;
+ default:
+ pr_err("%s: configure failed, bad channel type: %d\n",
+ hdm_ch->name, ccfg->data_type);
+ return -EINVAL;
+ }
+
+ if (hal_ret != DIM_NO_ERROR) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ pr_err("%s: configure failed (%d), type: %d, is_tx: %d\n",
+ hdm_ch->name, hal_ret, ccfg->data_type, (int)is_tx);
+ return -ENODEV;
+ }
+
+ hdm_ch->data_type = ccfg->data_type;
+ hdm_ch->direction = ccfg->direction;
+ hdm_ch->is_initialized = true;
+
+ if (hdm_ch->data_type == MOST_CH_ASYNC &&
+ hdm_ch->direction == MOST_CH_TX &&
+ dev->atx_idx < 0)
+ dev->atx_idx = ch_idx;
+
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ return 0;
+}
+
+/**
+ * enqueue - enqueue a buffer for data transfer
+ * @iface: intended interface
+ * @channel: ID of the channel the buffer is intended for
+ * @mbo: pointer to the buffer object
+ *
+ * Push the buffer into pending_list and try to transfer one buffer from
+ * pending_list. Return 0 on success, negative on failure.
+ */
+static int enqueue(struct most_interface *most_iface, int ch_idx,
+ struct mbo *mbo)
+{
+ struct dim2_hdm *dev = iface_to_hdm(most_iface);
+ struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+ unsigned long flags;
+
+ BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+ if (!hdm_ch->is_initialized)
+ return -EPERM;
+
+ if (mbo->bus_address == 0)
+ return -EFAULT;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ list_add_tail(&mbo->list, &hdm_ch->pending_list);
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ (void)try_start_dim_transfer(hdm_ch);
+
+ return 0;
+}
+
+/**
+ * request_netinfo - triggers retrieving of network info
+ * @iface: pointer to the interface
+ * @channel_id: corresponding channel ID
+ *
+ * Send a command to INIC which triggers retrieving of network info by means of
+ * "Message exchange over MDP/MEP". Return 0 on success, negative on failure.
+ */
+static void request_netinfo(struct most_interface *most_iface, int ch_idx)
+{
+ struct dim2_hdm *dev = iface_to_hdm(most_iface);
+ struct mbo *mbo;
+ u8 *data;
+
+ if (dev->atx_idx < 0) {
+ pr_err("Async Tx Not initialized\n");
+ return;
+ }
+
+ mbo = most_get_mbo(&dev->most_iface, dev->atx_idx, NULL);
+ if (!mbo)
+ return;
+
+ mbo->buffer_length = 5;
+
+ data = mbo->virt_address;
+
+ data[0] = 0x00; /* PML High byte */
+ data[1] = 0x03; /* PML Low byte */
+ data[2] = 0x02; /* PMHL */
+ data[3] = 0x08; /* FPH */
+ data[4] = 0x40; /* FMF (FIFO cmd msg - Triggers NAOverMDP) */
+
+ most_submit_mbo(mbo);
+}
+
+/**
+ * poison_channel - poison buffers of a channel
+ * @iface: pointer to the interface the channel to be poisoned belongs to
+ * @channel_id: corresponding channel ID
+ *
+ * Destroy a channel and complete all the buffers in both started_list &
+ * pending_list. Return 0 on success, negative on failure.
+ */
+static int poison_channel(struct most_interface *most_iface, int ch_idx)
+{
+ struct dim2_hdm *dev = iface_to_hdm(most_iface);
+ struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+ unsigned long flags;
+ u8 hal_ret;
+ int ret = 0;
+
+ BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+ if (!hdm_ch->is_initialized)
+ return -EPERM;
+
+ tasklet_disable(&dim2_tasklet);
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = dim_destroy_channel(&hdm_ch->ch);
+ hdm_ch->is_initialized = false;
+ if (ch_idx == dev->atx_idx)
+ dev->atx_idx = -1;
+ spin_unlock_irqrestore(&dim_lock, flags);
+ tasklet_enable(&dim2_tasklet);
+ if (hal_ret != DIM_NO_ERROR) {
+ pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
+ ret = -EFAULT;
+ }
+
+ complete_all_mbos(&hdm_ch->started_list);
+ complete_all_mbos(&hdm_ch->pending_list);
+
+ return ret;
+}
+
+/*
+ * dim2_probe - dim2 probe handler
+ * @pdev: platform device structure
+ *
+ * Register the dim2 interface with mostcore and initialize it.
+ * Return 0 on success, negative on failure.
+ */
+static int dim2_probe(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev;
+ struct resource *res;
+ int ret, i;
+ struct kobject *kobj;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->atx_idx = -1;
+
+ platform_set_drvdata(pdev, dev);
+#if defined(ENABLE_HDM_TEST)
+ test_dev = dev;
+#else
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->io_base))
+ return PTR_ERR(dev->io_base);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -ENODEV;
+ }
+ dev->irq_ahb0 = ret;
+
+ ret = devm_request_irq(&pdev->dev, dev->irq_ahb0, dim2_ahb_isr, 0,
+ "mlb_ahb0", dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d, err: %d\n",
+ dev->irq_ahb0, ret);
+ return ret;
+ }
+#endif
+ init_waitqueue_head(&dev->netinfo_waitq);
+ dev->deliver_netinfo = 0;
+ dev->netinfo_task = kthread_run(&deliver_netinfo_thread, (void *)dev,
+ "dim2_netinfo");
+ if (IS_ERR(dev->netinfo_task))
+ return PTR_ERR(dev->netinfo_task);
+
+ for (i = 0; i < DMA_CHANNELS; i++) {
+ struct most_channel_capability *cap = dev->capabilities + i;
+ struct hdm_channel *hdm_ch = dev->hch + i;
+
+ INIT_LIST_HEAD(&hdm_ch->pending_list);
+ INIT_LIST_HEAD(&hdm_ch->started_list);
+ hdm_ch->is_initialized = false;
+ snprintf(hdm_ch->name, sizeof(hdm_ch->name), "ca%d", i * 2 + 2);
+
+ cap->name_suffix = hdm_ch->name;
+ cap->direction = MOST_CH_RX | MOST_CH_TX;
+ cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
+ MOST_CH_ISOC_AVP | MOST_CH_SYNC;
+ cap->num_buffers_packet = MAX_BUFFERS_PACKET;
+ cap->buffer_size_packet = MAX_BUF_SIZE_PACKET;
+ cap->num_buffers_streaming = MAX_BUFFERS_STREAMING;
+ cap->buffer_size_streaming = MAX_BUF_SIZE_STREAMING;
+ }
+
+ {
+ const char *fmt;
+
+ if (sizeof(res->start) == sizeof(long long))
+ fmt = "dim2-%016llx";
+ else if (sizeof(res->start) == sizeof(long))
+ fmt = "dim2-%016lx";
+ else
+ fmt = "dim2-%016x";
+
+ snprintf(dev->name, sizeof(dev->name), fmt, res->start);
+ }
+
+ dev->most_iface.interface = ITYPE_MEDIALB_DIM2;
+ dev->most_iface.description = dev->name;
+ dev->most_iface.num_channels = DMA_CHANNELS;
+ dev->most_iface.channel_vector = dev->capabilities;
+ dev->most_iface.configure = configure_channel;
+ dev->most_iface.enqueue = enqueue;
+ dev->most_iface.poison_channel = poison_channel;
+ dev->most_iface.request_netinfo = request_netinfo;
+
+ kobj = most_register_interface(&dev->most_iface);
+ if (IS_ERR(kobj)) {
+ ret = PTR_ERR(kobj);
+ dev_err(&pdev->dev, "failed to register MOST interface\n");
+ goto err_stop_thread;
+ }
+
+ ret = dim2_sysfs_probe(&dev->bus, kobj);
+ if (ret)
+ goto err_unreg_iface;
+
+ ret = startup_dim(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize DIM2\n");
+ goto err_destroy_bus;
+ }
+
+ return 0;
+
+err_destroy_bus:
+ dim2_sysfs_destroy(&dev->bus);
+err_unreg_iface:
+ most_deregister_interface(&dev->most_iface);
+err_stop_thread:
+ kthread_stop(dev->netinfo_task);
+
+ return ret;
+}
+
+/**
+ * dim2_remove - dim2 remove handler
+ * @pdev: platform device structure
+ *
+ * Unregister the interface from mostcore
+ */
+static int dim2_remove(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+ struct dim2_platform_data *pdata = pdev->dev.platform_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ dim_shutdown();
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ if (pdata && pdata->destroy)
+ pdata->destroy(pdata);
+
+ dim2_sysfs_destroy(&dev->bus);
+ most_deregister_interface(&dev->most_iface);
+ kthread_stop(dev->netinfo_task);
+
+ /*
+ * break link to local platform_device_id struct
+ * to prevent crash by unload platform device module
+ */
+ pdev->id_entry = NULL;
+
+ return 0;
+}
+
+static struct platform_device_id dim2_id[] = {
+ { "medialb_dim2" },
+ { }, /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(platform, dim2_id);
+
+static struct platform_driver dim2_driver = {
+ .probe = dim2_probe,
+ .remove = dim2_remove,
+ .id_table = dim2_id,
+ .driver = {
+ .name = "hdm_dim2",
+ },
+};
+
+module_platform_driver(dim2_driver);
+
+MODULE_AUTHOR("Jain Roy Ambi <JainRoy.Ambi@microchip.com>");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_DESCRIPTION("MediaLB DIM2 Hardware Dependent Module");
+MODULE_LICENSE("GPL");
diff --git a/driver/hdm-dim2/dim2_hdm.h b/driver/hdm-dim2/dim2_hdm.h
new file mode 100644
index 0000000..4050e7c
--- /dev/null
+++ b/driver/hdm-dim2/dim2_hdm.h
@@ -0,0 +1,27 @@
+/*
+ * dim2_hdm.h - MediaLB DIM2 HDM Header
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef DIM2_HDM_H
+#define DIM2_HDM_H
+
+struct device;
+
+/* platform dependent data for dim2 interface */
+struct dim2_platform_data {
+ int (*init)(struct dim2_platform_data *pd, void __iomem *io_base,
+ int clk_speed);
+ void (*destroy)(struct dim2_platform_data *pd);
+ void *priv;
+};
+
+#endif /* DIM2_HDM_H */
diff --git a/driver/hdm-dim2/dim2_reg.h b/driver/hdm-dim2/dim2_reg.h
new file mode 100644
index 0000000..3b1c200
--- /dev/null
+++ b/driver/hdm-dim2/dim2_reg.h
@@ -0,0 +1,159 @@
+/*
+ * dim2_reg.h - Definitions for registers of DIM2
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef DIM2_OS62420_H
+#define DIM2_OS62420_H
+
+#include <linux/types.h>
+
+struct dim2_regs {
+ /* 0x00 */ u32 MLBC0;
+ /* 0x01 */ u32 rsvd0[1];
+ /* 0x02 */ u32 MLBPC0;
+ /* 0x03 */ u32 MS0;
+ /* 0x04 */ u32 rsvd1[1];
+ /* 0x05 */ u32 MS1;
+ /* 0x06 */ u32 rsvd2[2];
+ /* 0x08 */ u32 MSS;
+ /* 0x09 */ u32 MSD;
+ /* 0x0A */ u32 rsvd3[1];
+ /* 0x0B */ u32 MIEN;
+ /* 0x0C */ u32 rsvd4[1];
+ /* 0x0D */ u32 MLBPC2;
+ /* 0x0E */ u32 MLBPC1;
+ /* 0x0F */ u32 MLBC1;
+ /* 0x10 */ u32 rsvd5[0x10];
+ /* 0x20 */ u32 HCTL;
+ /* 0x21 */ u32 rsvd6[1];
+ /* 0x22 */ u32 HCMR0;
+ /* 0x23 */ u32 HCMR1;
+ /* 0x24 */ u32 HCER0;
+ /* 0x25 */ u32 HCER1;
+ /* 0x26 */ u32 HCBR0;
+ /* 0x27 */ u32 HCBR1;
+ /* 0x28 */ u32 rsvd7[8];
+ /* 0x30 */ u32 MDAT0;
+ /* 0x31 */ u32 MDAT1;
+ /* 0x32 */ u32 MDAT2;
+ /* 0x33 */ u32 MDAT3;
+ /* 0x34 */ u32 MDWE0;
+ /* 0x35 */ u32 MDWE1;
+ /* 0x36 */ u32 MDWE2;
+ /* 0x37 */ u32 MDWE3;
+ /* 0x38 */ u32 MCTL;
+ /* 0x39 */ u32 MADR;
+ /* 0x3A */ u32 rsvd8[0xB6];
+ /* 0xF0 */ u32 ACTL;
+ /* 0xF1 */ u32 rsvd9[3];
+ /* 0xF4 */ u32 ACSR0;
+ /* 0xF5 */ u32 ACSR1;
+ /* 0xF6 */ u32 ACMR0;
+ /* 0xF7 */ u32 ACMR1;
+};
+
+#define DIM2_MASK(n) (~((~(u32)0) << (n)))
+
+enum {
+ MLBC0_MLBLK_BIT = 7,
+
+ MLBC0_MLBPEN_BIT = 5,
+
+ MLBC0_MLBCLK_SHIFT = 2,
+ MLBC0_MLBCLK_VAL_256FS = 0,
+ MLBC0_MLBCLK_VAL_512FS = 1,
+ MLBC0_MLBCLK_VAL_1024FS = 2,
+ MLBC0_MLBCLK_VAL_2048FS = 3,
+
+ MLBC0_FCNT_SHIFT = 15,
+ MLBC0_FCNT_MASK = 7,
+ MLBC0_FCNT_MAX_VAL = 6,
+
+ MLBC0_MLBEN_BIT = 0,
+
+ MIEN_CTX_BREAK_BIT = 29,
+ MIEN_CTX_PE_BIT = 28,
+ MIEN_CTX_DONE_BIT = 27,
+
+ MIEN_CRX_BREAK_BIT = 26,
+ MIEN_CRX_PE_BIT = 25,
+ MIEN_CRX_DONE_BIT = 24,
+
+ MIEN_ATX_BREAK_BIT = 22,
+ MIEN_ATX_PE_BIT = 21,
+ MIEN_ATX_DONE_BIT = 20,
+
+ MIEN_ARX_BREAK_BIT = 19,
+ MIEN_ARX_PE_BIT = 18,
+ MIEN_ARX_DONE_BIT = 17,
+
+ MIEN_SYNC_PE_BIT = 16,
+
+ MIEN_ISOC_BUFO_BIT = 1,
+ MIEN_ISOC_PE_BIT = 0,
+
+ MLBC1_NDA_SHIFT = 8,
+ MLBC1_NDA_MASK = 0xFF,
+
+ MLBC1_CLKMERR_BIT = 7,
+ MLBC1_LOCKERR_BIT = 6,
+
+ ACTL_DMA_MODE_BIT = 2,
+ ACTL_DMA_MODE_VAL_DMA_MODE_0 = 0,
+ ACTL_DMA_MODE_VAL_DMA_MODE_1 = 1,
+ ACTL_SCE_BIT = 0,
+
+ HCTL_EN_BIT = 15
+};
+
+enum {
+ CDT1_BS_ISOC_SHIFT = 0,
+ CDT1_BS_ISOC_MASK = DIM2_MASK(9),
+
+ CDT3_BD_SHIFT = 0,
+ CDT3_BD_MASK = DIM2_MASK(12),
+ CDT3_BD_ISOC_MASK = DIM2_MASK(13),
+ CDT3_BA_SHIFT = 16,
+
+ ADT0_CE_BIT = 15,
+ ADT0_LE_BIT = 14,
+ ADT0_PG_BIT = 13,
+
+ ADT1_RDY_BIT = 15,
+ ADT1_DNE_BIT = 14,
+ ADT1_ERR_BIT = 13,
+ ADT1_PS_BIT = 12,
+ ADT1_MEP_BIT = 11,
+ ADT1_BD_SHIFT = 0,
+ ADT1_CTRL_ASYNC_BD_MASK = DIM2_MASK(11),
+ ADT1_ISOC_SYNC_BD_MASK = DIM2_MASK(13),
+
+ CAT_MFE_BIT = 14,
+
+ CAT_MT_BIT = 13,
+
+ CAT_RNW_BIT = 12,
+
+ CAT_CE_BIT = 11,
+
+ CAT_CT_SHIFT = 8,
+ CAT_CT_VAL_SYNC = 0,
+ CAT_CT_VAL_CONTROL = 1,
+ CAT_CT_VAL_ASYNC = 2,
+ CAT_CT_VAL_ISOC = 3,
+
+ CAT_CL_SHIFT = 0,
+ CAT_CL_MASK = DIM2_MASK(6)
+};
+
+#endif /* DIM2_OS62420_H */
diff --git a/driver/hdm-dim2/dim2_sysfs.c b/driver/hdm-dim2/dim2_sysfs.c
new file mode 100644
index 0000000..2b28e4a
--- /dev/null
+++ b/driver/hdm-dim2/dim2_sysfs.c
@@ -0,0 +1,115 @@
+/*
+ * dim2_sysfs.c - MediaLB sysfs information
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include "dim2_sysfs.h"
+
+struct bus_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct medialb_bus *bus, char *buf);
+ ssize_t (*store)(struct medialb_bus *bus, const char *buf,
+ size_t count);
+};
+
+static ssize_t state_show(struct medialb_bus *bus, char *buf)
+{
+ bool state = dim2_sysfs_get_state_cb();
+
+ return sprintf(buf, "%s\n", state ? "locked" : "");
+}
+
+static struct bus_attr state_attr = __ATTR_RO(state);
+
+static struct attribute *bus_default_attrs[] = {
+ &state_attr.attr,
+ NULL,
+};
+
+static struct attribute_group bus_attr_group = {
+ .attrs = bus_default_attrs,
+};
+
+static void bus_kobj_release(struct kobject *kobj)
+{
+}
+
+static ssize_t bus_kobj_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct medialb_bus *bus =
+ container_of(kobj, struct medialb_bus, kobj_group);
+ struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
+
+ if (!xattr->show)
+ return -EIO;
+
+ return xattr->show(bus, buf);
+}
+
+static ssize_t bus_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct medialb_bus *bus =
+ container_of(kobj, struct medialb_bus, kobj_group);
+ struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
+
+ if (!xattr->store)
+ return -EIO;
+
+ return xattr->store(bus, buf, count);
+}
+
+static struct sysfs_ops const bus_kobj_sysfs_ops = {
+ .show = bus_kobj_attr_show,
+ .store = bus_kobj_attr_store,
+};
+
+static struct kobj_type bus_ktype = {
+ .release = bus_kobj_release,
+ .sysfs_ops = &bus_kobj_sysfs_ops,
+};
+
+int dim2_sysfs_probe(struct medialb_bus *bus, struct kobject *parent_kobj)
+{
+ int err;
+
+ kobject_init(&bus->kobj_group, &bus_ktype);
+ err = kobject_add(&bus->kobj_group, parent_kobj, "bus");
+ if (err) {
+ pr_err("kobject_add() failed: %d\n", err);
+ goto err_kobject_add;
+ }
+
+ err = sysfs_create_group(&bus->kobj_group, &bus_attr_group);
+ if (err) {
+ pr_err("sysfs_create_group() failed: %d\n", err);
+ goto err_create_group;
+ }
+
+ return 0;
+
+err_create_group:
+ kobject_put(&bus->kobj_group);
+
+err_kobject_add:
+ return err;
+}
+
+void dim2_sysfs_destroy(struct medialb_bus *bus)
+{
+ kobject_put(&bus->kobj_group);
+}
diff --git a/driver/hdm-dim2/dim2_sysfs.h b/driver/hdm-dim2/dim2_sysfs.h
new file mode 100644
index 0000000..b71dd02
--- /dev/null
+++ b/driver/hdm-dim2/dim2_sysfs.h
@@ -0,0 +1,36 @@
+/*
+ * dim2_sysfs.h - MediaLB sysfs information
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
+
+#ifndef DIM2_SYSFS_H
+#define DIM2_SYSFS_H
+
+#include <linux/kobject.h>
+
+struct medialb_bus {
+ struct kobject kobj_group;
+};
+
+struct dim2_hdm;
+
+int dim2_sysfs_probe(struct medialb_bus *bus, struct kobject *parent_kobj);
+void dim2_sysfs_destroy(struct medialb_bus *bus);
+
+/*
+ * callback,
+ * must deliver MediaLB state as true if locked or false if unlocked
+ */
+bool dim2_sysfs_get_state_cb(void);
+
+#endif /* DIM2_SYSFS_H */
diff --git a/driver/hdm-i2c/hdm_i2c.c b/driver/hdm-i2c/hdm_i2c.c
new file mode 100644
index 0000000..ba0263b
--- /dev/null
+++ b/driver/hdm-i2c/hdm_i2c.c
@@ -0,0 +1,430 @@
+/*
+ * hdm_i2c.c - Hardware Dependent Module for I2C Interface
+ *
+ * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+
+#include <mostcore.h>
+
+enum { CH_RX, CH_TX, NUM_CHANNELS };
+
+#define MAX_BUFFERS_CONTROL 32
+#define MAX_BUF_SIZE_CONTROL 256
+
+/**
+ * list_first_mbo - get the first mbo from a list
+ * @ptr: the list head to take the mbo from.
+ */
+#define list_first_mbo(ptr) \
+ list_first_entry(ptr, struct mbo, list)
+
+/* IRQ / Polling option */
+static bool polling_req;
+module_param(polling_req, bool, S_IRUGO);
+MODULE_PARM_DESC(polling_req, "Request Polling. Default = 0 (use irq)");
+
+/* Polling Rate */
+static int scan_rate = 100;
+module_param(scan_rate, int, 0644);
+MODULE_PARM_DESC(scan_rate, "Polling rate in times/sec. Default = 100");
+
+struct hdm_i2c {
+ bool is_open[NUM_CHANNELS];
+ bool polling_mode;
+ struct most_interface most_iface;
+ struct most_channel_capability capabilities[NUM_CHANNELS];
+ struct i2c_client *client;
+ struct rx {
+ struct delayed_work dwork;
+ wait_queue_head_t waitq;
+ struct list_head list;
+ struct mutex list_mutex;
+ } rx;
+ char name[64];
+};
+
+#define to_hdm(iface) container_of(iface, struct hdm_i2c, most_iface)
+
+/**
+ * configure_channel - called from MOST core to configure a channel
+ * @iface: interface the channel belongs to
+ * @channel: channel to be configured
+ * @channel_config: structure that holds the configuration information
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * Receives configuration information from MOST core and initialize the
+ * corresponding channel.
+ */
+static int configure_channel(struct most_interface *most_iface,
+ int ch_idx,
+ struct most_channel_config *channel_config)
+{
+ struct hdm_i2c *dev = to_hdm(most_iface);
+
+ BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
+ BUG_ON(dev->is_open[ch_idx]);
+
+ if (channel_config->data_type != MOST_CH_CONTROL) {
+ pr_err("bad data type for channel %d\n", ch_idx);
+ return -EPERM;
+ }
+
+ if (channel_config->direction != dev->capabilities[ch_idx].direction) {
+ pr_err("bad direction for channel %d\n", ch_idx);
+ return -EPERM;
+ }
+
+ if ((channel_config->direction == MOST_CH_RX) && (dev->polling_mode)) {
+ schedule_delayed_work(&dev->rx.dwork,
+ msecs_to_jiffies(MSEC_PER_SEC / 4));
+ }
+ dev->is_open[ch_idx] = true;
+
+ return 0;
+}
+
+/**
+ * enqueue - called from MOST core to enqueue a buffer for data transfer
+ * @iface: intended interface
+ * @channel: ID of the channel the buffer is intended for
+ * @mbo: pointer to the buffer object
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * Transmit the data over I2C if it is a "write" request or push the buffer into
+ * list if it is an "read" request
+ */
+static int enqueue(struct most_interface *most_iface,
+ int ch_idx, struct mbo *mbo)
+{
+ struct hdm_i2c *dev = to_hdm(most_iface);
+ int ret;
+
+ BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
+ BUG_ON(!dev->is_open[ch_idx]);
+
+ if (ch_idx == CH_RX) {
+ /* RX */
+ mutex_lock(&dev->rx.list_mutex);
+ list_add_tail(&mbo->list, &dev->rx.list);
+ mutex_unlock(&dev->rx.list_mutex);
+ wake_up_interruptible(&dev->rx.waitq);
+ } else {
+ /* TX */
+ ret = i2c_master_send(dev->client, mbo->virt_address,
+ mbo->buffer_length);
+ if (ret <= 0) {
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_INVAL;
+ } else {
+ mbo->processed_length = mbo->buffer_length;
+ mbo->status = MBO_SUCCESS;
+ }
+ mbo->complete(mbo);
+ }
+
+ return 0;
+}
+
+/**
+ * poison_channel - called from MOST core to poison buffers of a channel
+ * @iface: pointer to the interface the channel to be poisoned belongs to
+ * @channel_id: corresponding channel ID
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * If channel direction is RX, complete the buffers in list with
+ * status MBO_E_CLOSE
+ */
+static int poison_channel(struct most_interface *most_iface,
+ int ch_idx)
+{
+ struct hdm_i2c *dev = to_hdm(most_iface);
+ struct mbo *mbo;
+
+ BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
+ BUG_ON(!dev->is_open[ch_idx]);
+
+ dev->is_open[ch_idx] = false;
+
+ if (ch_idx == CH_RX) {
+ mutex_lock(&dev->rx.list_mutex);
+ while (!list_empty(&dev->rx.list)) {
+ mbo = list_first_mbo(&dev->rx.list);
+ list_del(&mbo->list);
+ mutex_unlock(&dev->rx.list_mutex);
+
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_CLOSE;
+ mbo->complete(mbo);
+
+ mutex_lock(&dev->rx.list_mutex);
+ }
+ mutex_unlock(&dev->rx.list_mutex);
+ wake_up_interruptible(&dev->rx.waitq);
+ }
+
+ return 0;
+}
+
+static void request_netinfo(struct most_interface *most_iface,
+ int ch_idx)
+{
+ pr_info("request_netinfo()\n");
+}
+
+static void do_rx_work(struct hdm_i2c *dev)
+{
+ struct mbo *mbo;
+ unsigned char msg[MAX_BUF_SIZE_CONTROL];
+ int ret, ch_idx = CH_RX;
+ u16 pml, data_size;
+
+ /* Read PML (2 bytes) */
+ ret = i2c_master_recv(dev->client, msg, 2);
+ if (ret <= 0) {
+ pr_err("Failed to receive PML\n");
+ return;
+ }
+
+ pml = (msg[0] << 8) | msg[1];
+ if (!pml)
+ return;
+
+ data_size = pml + 2;
+
+ /* Read the whole message, including PML */
+ ret = i2c_master_recv(dev->client, msg, data_size);
+ if (ret <= 0) {
+ pr_err("Failed to receive a Port Message\n");
+ return;
+ }
+
+ for (;;) {
+ /* Conditions to wait for: poisoned channel or free buffer
+ * available for reading
+ */
+ if (wait_event_interruptible(dev->rx.waitq,
+ !dev->is_open[ch_idx] ||
+ !list_empty(&dev->rx.list))) {
+ pr_err("wait_event_interruptible() failed\n");
+ return;
+ }
+
+ if (!dev->is_open[ch_idx])
+ return;
+
+ mutex_lock(&dev->rx.list_mutex);
+
+ /* list may be empty if poison or remove is called */
+ if (!list_empty(&dev->rx.list))
+ break;
+
+ mutex_unlock(&dev->rx.list_mutex);
+ }
+
+ mbo = list_first_mbo(&dev->rx.list);
+ list_del(&mbo->list);
+ mutex_unlock(&dev->rx.list_mutex);
+
+ mbo->processed_length = min(data_size, mbo->buffer_length);
+ memcpy(mbo->virt_address, msg, mbo->processed_length);
+ mbo->status = MBO_SUCCESS;
+ mbo->complete(mbo);
+}
+
+/**
+ * pending_rx_work - Read pending messages through I2C
+ * @work: definition of this work item
+ *
+ * Invoked by the Interrupt Service Routine, most_irq_handler()
+ */
+static void pending_rx_work(struct work_struct *work)
+{
+ struct hdm_i2c *dev = container_of(work, struct hdm_i2c, rx.dwork.work);
+
+ do_rx_work(dev);
+
+ if (dev->polling_mode) {
+ if (dev->is_open[CH_RX])
+ schedule_delayed_work(&dev->rx.dwork,
+ msecs_to_jiffies(MSEC_PER_SEC
+ / scan_rate));
+ } else {
+ enable_irq(dev->client->irq);
+ }
+}
+
+/*
+ * most_irq_handler - Interrupt Service Routine
+ * @irq: irq number
+ * @_dev: private data
+ *
+ * Schedules a delayed work
+ *
+ * By default the interrupt line behavior is Active Low. Once an interrupt is
+ * generated by the device, until driver clears the interrupt (by reading
+ * the PMP message), device keeps the interrupt line in low state. Since i2c
+ * read is done in work queue, the interrupt line must be disabled temporarily
+ * to avoid ISR being called repeatedly. Re-enable the interrupt in workqueue,
+ * after reading the message.
+ *
+ * Note: If we use the interrupt line in Falling edge mode, there is a
+ * possibility to miss interrupts when ISR is getting executed.
+ *
+ */
+static irqreturn_t most_irq_handler(int irq, void *_dev)
+{
+ struct hdm_i2c *dev = _dev;
+
+ disable_irq_nosync(irq);
+
+ schedule_delayed_work(&dev->rx.dwork, 0);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * i2c_probe - i2c probe handler
+ * @client: i2c client device structure
+ * @id: i2c client device id
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * Register the i2c client device as a MOST interface
+ */
+static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct hdm_i2c *dev;
+ int ret, i;
+ struct kobject *kobj;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ /* ID format: i2c-<bus>-<address> */
+ snprintf(dev->name, sizeof(dev->name), "i2c-%d-%04x",
+ client->adapter->nr, client->addr);
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ dev->is_open[i] = false;
+ dev->capabilities[i].data_type = MOST_CH_CONTROL;
+ dev->capabilities[i].num_buffers_packet = MAX_BUFFERS_CONTROL;
+ dev->capabilities[i].buffer_size_packet = MAX_BUF_SIZE_CONTROL;
+ }
+ dev->capabilities[CH_RX].direction = MOST_CH_RX;
+ dev->capabilities[CH_RX].name_suffix = "rx";
+ dev->capabilities[CH_TX].direction = MOST_CH_TX;
+ dev->capabilities[CH_TX].name_suffix = "tx";
+
+ dev->most_iface.interface = ITYPE_I2C;
+ dev->most_iface.description = dev->name;
+ dev->most_iface.num_channels = NUM_CHANNELS;
+ dev->most_iface.channel_vector = dev->capabilities;
+ dev->most_iface.configure = configure_channel;
+ dev->most_iface.enqueue = enqueue;
+ dev->most_iface.poison_channel = poison_channel;
+ dev->most_iface.request_netinfo = request_netinfo;
+
+ INIT_LIST_HEAD(&dev->rx.list);
+ mutex_init(&dev->rx.list_mutex);
+ init_waitqueue_head(&dev->rx.waitq);
+
+ INIT_DELAYED_WORK(&dev->rx.dwork, pending_rx_work);
+
+ dev->client = client;
+ i2c_set_clientdata(client, dev);
+
+ kobj = most_register_interface(&dev->most_iface);
+ if (IS_ERR(kobj)) {
+ pr_err("Failed to register i2c as a MOST interface\n");
+ kfree(dev);
+ return PTR_ERR(kobj);
+ }
+
+ dev->polling_mode = polling_req || client->irq <= 0;
+ if (!dev->polling_mode) {
+ pr_info("Requesting IRQ: %d\n", client->irq);
+ ret = request_irq(client->irq, most_irq_handler, 0,
+ client->name, dev);
+ if (ret) {
+ pr_info("IRQ request failed: %d, falling back to polling\n",
+ ret);
+ dev->polling_mode = true;
+ }
+ }
+
+ if (dev->polling_mode)
+ pr_info("Using polling at rate: %d times/sec\n", scan_rate);
+
+ return 0;
+}
+
+/*
+ * i2c_remove - i2c remove handler
+ * @client: i2c client device structure
+ *
+ * Return 0 on success.
+ *
+ * Unregister the i2c client device as a MOST interface
+ */
+static int i2c_remove(struct i2c_client *client)
+{
+ struct hdm_i2c *dev = i2c_get_clientdata(client);
+ int i;
+
+ if (!dev->polling_mode)
+ free_irq(client->irq, dev);
+
+ most_deregister_interface(&dev->most_iface);
+
+ for (i = 0 ; i < NUM_CHANNELS; i++)
+ if (dev->is_open[i])
+ poison_channel(&dev->most_iface, i);
+ cancel_delayed_work_sync(&dev->rx.dwork);
+ kfree(dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id i2c_id[] = {
+ { "most_i2c", 0 },
+ { }, /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(i2c, i2c_id);
+
+static struct i2c_driver i2c_driver = {
+ .driver = {
+ .name = "hdm_i2c",
+ },
+ .probe = i2c_probe,
+ .remove = i2c_remove,
+ .id_table = i2c_id,
+};
+
+module_i2c_driver(i2c_driver);
+
+MODULE_AUTHOR("Jain Roy Ambi <JainRoy.Ambi@microchip.com>");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_DESCRIPTION("I2C Hardware Dependent Module");
+MODULE_LICENSE("GPL");
diff --git a/driver/hdm-usb/hdm_usb.c b/driver/hdm-usb/hdm_usb.c
new file mode 100644
index 0000000..8c88184
--- /dev/null
+++ b/driver/hdm-usb/hdm_usb.c
@@ -0,0 +1,1407 @@
+/*
+ * hdm_usb.c - Hardware dependent module for USB
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/uaccess.h>
+#include "mostcore.h"
+#include "networking.h"
+
+#define USB_MTU 512
+#define NO_ISOCHRONOUS_URB 0
+#define AV_PACKETS_PER_XACT 2
+#define BUF_CHAIN_SIZE 0xFFFF
+#define MAX_NUM_ENDPOINTS 30
+#define MAX_SUFFIX_LEN 10
+#define MAX_STRING_LEN 80
+#define MAX_BUF_SIZE 0xFFFF
+
+#define USB_VENDOR_ID_SMSC 0x0424 /* VID: SMSC */
+#define USB_DEV_ID_BRDG 0xC001 /* PID: USB Bridge */
+#define USB_DEV_ID_OS81118 0xCF18 /* PID: USB OS81118 */
+#define USB_DEV_ID_OS81119 0xCF19 /* PID: USB OS81119 */
+#define USB_DEV_ID_OS81210 0xCF30 /* PID: USB OS81210 */
+/* DRCI Addresses */
+#define DRCI_REG_NI_STATE 0x0100
+#define DRCI_REG_PACKET_BW 0x0101
+#define DRCI_REG_NODE_ADDR 0x0102
+#define DRCI_REG_NODE_POS 0x0103
+#define DRCI_REG_MEP_FILTER 0x0140
+#define DRCI_REG_HASH_TBL0 0x0141
+#define DRCI_REG_HASH_TBL1 0x0142
+#define DRCI_REG_HASH_TBL2 0x0143
+#define DRCI_REG_HASH_TBL3 0x0144
+#define DRCI_REG_HW_ADDR_HI 0x0145
+#define DRCI_REG_HW_ADDR_MI 0x0146
+#define DRCI_REG_HW_ADDR_LO 0x0147
+#define DRCI_REG_BASE 0x1100
+#define DRCI_COMMAND 0x02
+#define DRCI_READ_REQ 0xA0
+#define DRCI_WRITE_REQ 0xA1
+
+/**
+ * struct buf_anchor - used to create a list of pending URBs
+ * @urb: pointer to USB request block
+ * @list: linked list
+ * @urb_completion:
+ */
+struct buf_anchor {
+ struct urb *urb;
+ struct list_head list;
+};
+
+/**
+ * struct most_dci_obj - Direct Communication Interface
+ * @kobj:position in sysfs
+ * @usb_device: pointer to the usb device
+ */
+struct most_dci_obj {
+ struct kobject kobj;
+ struct usb_device *usb_device;
+};
+
+#define to_dci_obj(p) container_of(p, struct most_dci_obj, kobj)
+
+struct most_dev;
+
+struct clear_hold_work {
+ struct work_struct ws;
+ struct most_dev *mdev;
+ unsigned int channel;
+ int pipe;
+};
+
+#define to_clear_hold_work(w) container_of(w, struct clear_hold_work, ws)
+
+/**
+ * struct most_dev - holds all usb interface specific stuff
+ * @parent: parent object in sysfs
+ * @usb_device: pointer to usb device
+ * @iface: hardware interface
+ * @cap: channel capabilities
+ * @conf: channel configuration
+ * @dci: direct communication interface of hardware
+ * @hw_addr: MAC address of hardware
+ * @ep_address: endpoint address table
+ * @link_stat: link status of hardware
+ * @description: device description
+ * @suffix: suffix for channel name
+ * @anchor_list_lock: locks list access
+ * @padding_active: indicates channel uses padding
+ * @is_channel_healthy: health status table of each channel
+ * @anchor_list: list of anchored items
+ * @io_mutex: synchronize I/O with disconnect
+ * @link_stat_timer: timer for link status reports
+ * @poll_work_obj: work for polling link status
+ */
+struct most_dev {
+ struct kobject *parent;
+ struct usb_device *usb_device;
+ struct most_interface iface;
+ struct most_channel_capability *cap;
+ struct most_channel_config *conf;
+ struct most_dci_obj *dci;
+ u8 hw_addr[6];
+ u8 *ep_address;
+ u16 link_stat;
+ char description[MAX_STRING_LEN];
+ char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
+ spinlock_t anchor_list_lock[MAX_NUM_ENDPOINTS];
+ bool padding_active[MAX_NUM_ENDPOINTS];
+ bool is_channel_healthy[MAX_NUM_ENDPOINTS];
+ struct clear_hold_work clear_work[MAX_NUM_ENDPOINTS];
+ struct list_head *anchor_list;
+ struct mutex io_mutex;
+ struct timer_list link_stat_timer;
+ struct work_struct poll_work_obj;
+};
+
+#define to_mdev(d) container_of(d, struct most_dev, iface)
+#define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
+
+static void wq_clear_halt(struct work_struct *wq_obj);
+static void wq_netinfo(struct work_struct *wq_obj);
+
+/**
+ * drci_rd_reg - read a DCI register
+ * @dev: usb device
+ * @reg: register address
+ * @buf: buffer to store data
+ *
+ * This is reads data from INIC's direct register communication interface
+ */
+static inline int drci_rd_reg(struct usb_device *dev, u16 reg, u16 *buf)
+{
+ int retval;
+ u16 *dma_buf = kzalloc(sizeof(u16), GFP_KERNEL);
+ u8 req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+
+ if (!dma_buf)
+ return -ENOMEM;
+
+ retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ DRCI_READ_REQ, req_type,
+ 0x0000,
+ reg, dma_buf, sizeof(u16), 5 * HZ);
+ *buf = le16_to_cpu(*dma_buf);
+ kfree(dma_buf);
+
+ return retval;
+}
+
+/**
+ * drci_wr_reg - write a DCI register
+ * @dev: usb device
+ * @reg: register address
+ * @data: data to write
+ *
+ * This is writes data to INIC's direct register communication interface
+ */
+static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
+{
+ return usb_control_msg(dev,
+ usb_sndctrlpipe(dev, 0),
+ DRCI_WRITE_REQ,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ data,
+ reg,
+ NULL,
+ 0,
+ 5 * HZ);
+}
+
+/**
+ * free_anchored_buffers - free device's anchored items
+ * @mdev: the device
+ * @channel: channel ID
+ * @status: status of MBO termination
+ */
+static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel,
+ enum mbo_status_flags status)
+{
+ struct mbo *mbo;
+ struct buf_anchor *anchor, *tmp;
+ spinlock_t *lock = mdev->anchor_list_lock + channel;
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ list_for_each_entry_safe(anchor, tmp, &mdev->anchor_list[channel],
+ list) {
+ struct urb *urb = anchor->urb;
+
+ spin_unlock_irqrestore(lock, flags);
+ if (likely(urb)) {
+ mbo = urb->context;
+ usb_kill_urb(urb);
+ if (mbo && mbo->complete) {
+ mbo->status = status;
+ mbo->processed_length = 0;
+ mbo->complete(mbo);
+ }
+ usb_free_urb(urb);
+ }
+ spin_lock_irqsave(lock, flags);
+ list_del(&anchor->list);
+ kfree(anchor);
+ }
+ spin_unlock_irqrestore(lock, flags);
+}
+
+/**
+ * get_stream_frame_size - calculate frame size of current configuration
+ * @cfg: channel configuration
+ */
+static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
+{
+ unsigned int frame_size = 0;
+ unsigned int sub_size = cfg->subbuffer_size;
+
+ if (!sub_size) {
+ pr_warn("Misconfig: Subbuffer size zero.\n");
+ return frame_size;
+ }
+ switch (cfg->data_type) {
+ case MOST_CH_ISOC_AVP:
+ frame_size = AV_PACKETS_PER_XACT * sub_size;
+ break;
+ case MOST_CH_SYNC:
+ if (cfg->packets_per_xact == 0) {
+ pr_warn("Misconfig: Packets per XACT zero\n");
+ frame_size = 0;
+ } else if (cfg->packets_per_xact == 0xFF) {
+ frame_size = (USB_MTU / sub_size) * sub_size;
+ } else {
+ frame_size = cfg->packets_per_xact * sub_size;
+ }
+ break;
+ default:
+ pr_warn("Query frame size of non-streaming channel\n");
+ break;
+ }
+ return frame_size;
+}
+
+/**
+ * hdm_poison_channel - mark buffers of this channel as invalid
+ * @iface: pointer to the interface
+ * @channel: channel ID
+ *
+ * This unlinks all URBs submitted to the HCD,
+ * calls the associated completion function of the core and removes
+ * them from the list.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int hdm_poison_channel(struct most_interface *iface, int channel)
+{
+ struct most_dev *mdev = to_mdev(iface);
+ unsigned long flags;
+ spinlock_t *lock;
+
+ if (unlikely(!iface)) {
+ dev_warn(&mdev->usb_device->dev, "Poison: Bad interface.\n");
+ return -EIO;
+ }
+ if (unlikely(channel < 0 || channel >= iface->num_channels)) {
+ dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
+ return -ECHRNG;
+ }
+
+ lock = mdev->anchor_list_lock + channel;
+ spin_lock_irqsave(lock, flags);
+ mdev->is_channel_healthy[channel] = false;
+ spin_unlock_irqrestore(lock, flags);
+
+ cancel_work_sync(&mdev->clear_work[channel].ws);
+
+ mutex_lock(&mdev->io_mutex);
+ free_anchored_buffers(mdev, channel, MBO_E_CLOSE);
+ if (mdev->padding_active[channel])
+ mdev->padding_active[channel] = false;
+
+ if (mdev->conf[channel].data_type == MOST_CH_ASYNC) {
+ del_timer_sync(&mdev->link_stat_timer);
+ cancel_work_sync(&mdev->poll_work_obj);
+ }
+ mutex_unlock(&mdev->io_mutex);
+ return 0;
+}
+
+/**
+ * hdm_add_padding - add padding bytes
+ * @mdev: most device
+ * @channel: channel ID
+ * @mbo: buffer object
+ *
+ * This inserts the INIC hardware specific padding bytes into a streaming
+ * channel's buffer
+ */
+static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
+{
+ struct most_channel_config *conf = &mdev->conf[channel];
+ unsigned int j, num_frames, frame_size;
+ u16 rd_addr, wr_addr;
+
+ frame_size = get_stream_frame_size(conf);
+ if (!frame_size)
+ return -EIO;
+ num_frames = mbo->buffer_length / frame_size;
+
+ if (num_frames < 1) {
+ dev_err(&mdev->usb_device->dev,
+ "Missed minimal transfer unit.\n");
+ return -EIO;
+ }
+
+ for (j = 1; j < num_frames; j++) {
+ wr_addr = (num_frames - j) * USB_MTU;
+ rd_addr = (num_frames - j) * frame_size;
+ memmove(mbo->virt_address + wr_addr,
+ mbo->virt_address + rd_addr,
+ frame_size);
+ }
+ mbo->buffer_length = num_frames * USB_MTU;
+ return 0;
+}
+
+/**
+ * hdm_remove_padding - remove padding bytes
+ * @mdev: most device
+ * @channel: channel ID
+ * @mbo: buffer object
+ *
+ * This takes the INIC hardware specific padding bytes off a streaming
+ * channel's buffer.
+ */
+static int hdm_remove_padding(struct most_dev *mdev, int channel,
+ struct mbo *mbo)
+{
+ unsigned int j, num_frames, frame_size;
+ struct most_channel_config *const conf = &mdev->conf[channel];
+
+ frame_size = get_stream_frame_size(conf);
+ if (!frame_size)
+ return -EIO;
+ num_frames = mbo->processed_length / USB_MTU;
+
+ for (j = 1; j < num_frames; j++)
+ memmove(mbo->virt_address + frame_size * j,
+ mbo->virt_address + USB_MTU * j,
+ frame_size);
+
+ mbo->processed_length = frame_size * num_frames;
+ return 0;
+}
+
+/**
+ * hdm_write_completion - completion function for submitted Tx URBs
+ * @urb: the URB that has been completed
+ *
+ * This checks the status of the completed URB. In case the URB has been
+ * unlinked before, it is immediately freed. On any other error the MBO
+ * transfer flag is set. On success it frees allocated resources and calls
+ * the completion function.
+ *
+ * Context: interrupt!
+ */
+static void hdm_write_completion(struct urb *urb)
+{
+ struct mbo *mbo = urb->context;
+ struct buf_anchor *anchor = mbo->priv;
+ struct most_dev *mdev = to_mdev(mbo->ifp);
+ unsigned int channel = mbo->hdm_channel_id;
+ struct device *dev = &mdev->usb_device->dev;
+ spinlock_t *lock = mdev->anchor_list_lock + channel;
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ !mdev->is_channel_healthy[channel]) {
+ spin_unlock_irqrestore(lock, flags);
+ return;
+ }
+
+ if (unlikely(urb->status && urb->status != -ESHUTDOWN)) {
+ mbo->processed_length = 0;
+ switch (urb->status) {
+ case -EPIPE:
+ dev_warn(dev, "Broken OUT pipe detected\n");
+ mdev->is_channel_healthy[channel] = false;
+ spin_unlock_irqrestore(lock, flags);
+ mdev->clear_work[channel].pipe = urb->pipe;
+ schedule_work(&mdev->clear_work[channel].ws);
+ return;
+ case -ENODEV:
+ case -EPROTO:
+ mbo->status = MBO_E_CLOSE;
+ break;
+ default:
+ mbo->status = MBO_E_INVAL;
+ break;
+ }
+ } else {
+ mbo->status = MBO_SUCCESS;
+ mbo->processed_length = urb->actual_length;
+ }
+
+ list_del(&anchor->list);
+ spin_unlock_irqrestore(lock, flags);
+ kfree(anchor);
+
+ if (likely(mbo->complete))
+ mbo->complete(mbo);
+ usb_free_urb(urb);
+}
+
+/**
+ * hdm_read_completion - completion function for submitted Rx URBs
+ * @urb: the URB that has been completed
+ *
+ * This checks the status of the completed URB. In case the URB has been
+ * unlinked before it is immediately freed. On any other error the MBO transfer
+ * flag is set. On success it frees allocated resources, removes
+ * padding bytes -if necessary- and calls the completion function.
+ *
+ * Context: interrupt!
+ *
+ * **************************************************************************
+ * Error codes returned by in urb->status
+ * or in iso_frame_desc[n].status (for ISO)
+ * *************************************************************************
+ *
+ * USB device drivers may only test urb status values in completion handlers.
+ * This is because otherwise there would be a race between HCDs updating
+ * these values on one CPU, and device drivers testing them on another CPU.
+ *
+ * A transfer's actual_length may be positive even when an error has been
+ * reported. That's because transfers often involve several packets, so that
+ * one or more packets could finish before an error stops further endpoint I/O.
+ *
+ * For isochronous URBs, the urb status value is non-zero only if the URB is
+ * unlinked, the device is removed, the host controller is disabled or the total
+ * transferred length is less than the requested length and the URB_SHORT_NOT_OK
+ * flag is set. Completion handlers for isochronous URBs should only see
+ * urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
+ * Individual frame descriptor status fields may report more status codes.
+ *
+ *
+ * 0 Transfer completed successfully
+ *
+ * -ENOENT URB was synchronously unlinked by usb_unlink_urb
+ *
+ * -EINPROGRESS URB still pending, no results yet
+ * (That is, if drivers see this it's a bug.)
+ *
+ * -EPROTO (*, **) a) bitstuff error
+ * b) no response packet received within the
+ * prescribed bus turn-around time
+ * c) unknown USB error
+ *
+ * -EILSEQ (*, **) a) CRC mismatch
+ * b) no response packet received within the
+ * prescribed bus turn-around time
+ * c) unknown USB error
+ *
+ * Note that often the controller hardware does not
+ * distinguish among cases a), b), and c), so a
+ * driver cannot tell whether there was a protocol
+ * error, a failure to respond (often caused by
+ * device disconnect), or some other fault.
+ *
+ * -ETIME (**) No response packet received within the prescribed
+ * bus turn-around time. This error may instead be
+ * reported as -EPROTO or -EILSEQ.
+ *
+ * -ETIMEDOUT Synchronous USB message functions use this code
+ * to indicate timeout expired before the transfer
+ * completed, and no other error was reported by HC.
+ *
+ * -EPIPE (**) Endpoint stalled. For non-control endpoints,
+ * reset this status with usb_clear_halt().
+ *
+ * -ECOMM During an IN transfer, the host controller
+ * received data from an endpoint faster than it
+ * could be written to system memory
+ *
+ * -ENOSR During an OUT transfer, the host controller
+ * could not retrieve data from system memory fast
+ * enough to keep up with the USB data rate
+ *
+ * -EOVERFLOW (*) The amount of data returned by the endpoint was
+ * greater than either the max packet size of the
+ * endpoint or the remaining buffer size. "Babble".
+ *
+ * -EREMOTEIO The data read from the endpoint did not fill the
+ * specified buffer, and URB_SHORT_NOT_OK was set in
+ * urb->transfer_flags.
+ *
+ * -ENODEV Device was removed. Often preceded by a burst of
+ * other errors, since the hub driver doesn't detect
+ * device removal events immediately.
+ *
+ * -EXDEV ISO transfer only partially completed
+ * (only set in iso_frame_desc[n].status, not urb->status)
+ *
+ * -EINVAL ISO madness, if this happens: Log off and go home
+ *
+ * -ECONNRESET URB was asynchronously unlinked by usb_unlink_urb
+ *
+ * -ESHUTDOWN The device or host controller has been disabled due
+ * to some problem that could not be worked around,
+ * such as a physical disconnect.
+ *
+ *
+ * (*) Error codes like -EPROTO, -EILSEQ and -EOVERFLOW normally indicate
+ * hardware problems such as bad devices (including firmware) or cables.
+ *
+ * (**) This is also one of several codes that different kinds of host
+ * controller use to indicate a transfer has failed because of device
+ * disconnect. In the interval before the hub driver starts disconnect
+ * processing, devices may receive such fault reports for every request.
+ *
+ * See <https://www.kernel.org/doc/Documentation/usb/error-codes.txt>
+ */
+static void hdm_read_completion(struct urb *urb)
+{
+ struct mbo *mbo = urb->context;
+ struct buf_anchor *anchor = mbo->priv;
+ struct most_dev *mdev = to_mdev(mbo->ifp);
+ unsigned int channel = mbo->hdm_channel_id;
+ struct device *dev = &mdev->usb_device->dev;
+ spinlock_t *lock = mdev->anchor_list_lock + channel;
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ !mdev->is_channel_healthy[channel]) {
+ spin_unlock_irqrestore(lock, flags);
+ return;
+ }
+
+ if (unlikely(urb->status && urb->status != -ESHUTDOWN)) {
+ mbo->processed_length = 0;
+ switch (urb->status) {
+ case -EPIPE:
+ dev_warn(dev, "Broken IN pipe detected\n");
+ mdev->is_channel_healthy[channel] = false;
+ spin_unlock_irqrestore(lock, flags);
+ mdev->clear_work[channel].pipe = urb->pipe;
+ schedule_work(&mdev->clear_work[channel].ws);
+ return;
+ case -ENODEV:
+ case -EPROTO:
+ mbo->status = MBO_E_CLOSE;
+ break;
+ case -EOVERFLOW:
+ dev_warn(dev, "Babble on IN pipe detected\n");
+ default:
+ mbo->status = MBO_E_INVAL;
+ break;
+ }
+ } else {
+ mbo->processed_length = urb->actual_length;
+ mbo->status = MBO_SUCCESS;
+ if (mdev->padding_active[channel] &&
+ hdm_remove_padding(mdev, channel, mbo)) {
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_INVAL;
+ }
+ }
+
+ list_del(&anchor->list);
+ spin_unlock_irqrestore(lock, flags);
+ kfree(anchor);
+
+ if (likely(mbo->complete))
+ mbo->complete(mbo);
+ usb_free_urb(urb);
+}
+
+/**
+ * hdm_enqueue - receive a buffer to be used for data transfer
+ * @iface: interface to enqueue to
+ * @channel: ID of the channel
+ * @mbo: pointer to the buffer object
+ *
+ * This allocates a new URB and fills it according to the channel
+ * that is being used for transmission of data. Before the URB is
+ * submitted it is stored in the private anchor list.
+ *
+ * Returns 0 on success. On any error the URB is freed and a error code
+ * is returned.
+ *
+ * Context: Could in _some_ cases be interrupt!
+ */
+static int hdm_enqueue(struct most_interface *iface, int channel,
+ struct mbo *mbo)
+{
+ struct most_dev *mdev;
+ struct buf_anchor *anchor;
+ struct most_channel_config *conf;
+ struct device *dev;
+ int retval = 0;
+ struct urb *urb;
+ unsigned long flags;
+ unsigned long length;
+ void *virt_address;
+ spinlock_t *lock;
+
+ if (unlikely(!iface || !mbo))
+ return -EIO;
+ if (unlikely(iface->num_channels <= channel || channel < 0))
+ return -ECHRNG;
+
+ mdev = to_mdev(iface);
+ conf = &mdev->conf[channel];
+ dev = &mdev->usb_device->dev;
+
+ if (!mdev->usb_device)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
+ if (!urb) {
+ dev_err(dev, "Failed to allocate URB\n");
+ return -ENOMEM;
+ }
+
+ anchor = kzalloc(sizeof(*anchor), GFP_ATOMIC);
+ if (!anchor) {
+ retval = -ENOMEM;
+ goto _error;
+ }
+
+ anchor->urb = urb;
+ mbo->priv = anchor;
+
+ if ((conf->direction & MOST_CH_TX) && mdev->padding_active[channel] &&
+ hdm_add_padding(mdev, channel, mbo)) {
+ retval = -EIO;
+ goto _error;
+ }
+
+ urb->transfer_dma = mbo->bus_address;
+ virt_address = mbo->virt_address;
+ length = mbo->buffer_length;
+
+ if (conf->direction & MOST_CH_TX) {
+ usb_fill_bulk_urb(urb, mdev->usb_device,
+ usb_sndbulkpipe(mdev->usb_device,
+ mdev->ep_address[channel]),
+ virt_address,
+ length,
+ hdm_write_completion,
+ mbo);
+ if (conf->data_type != MOST_CH_ISOC_AVP)
+ urb->transfer_flags |= URB_ZERO_PACKET;
+ } else {
+ usb_fill_bulk_urb(urb, mdev->usb_device,
+ usb_rcvbulkpipe(mdev->usb_device,
+ mdev->ep_address[channel]),
+ virt_address,
+ length + conf->extra_len,
+ hdm_read_completion,
+ mbo);
+ }
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ lock = mdev->anchor_list_lock + channel;
+ spin_lock_irqsave(lock, flags);
+ list_add_tail(&anchor->list, &mdev->anchor_list[channel]);
+ spin_unlock_irqrestore(lock, flags);
+
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ if (retval) {
+ dev_err(dev, "URB submit failed with error %d.\n", retval);
+ goto _error_1;
+ }
+ return 0;
+
+_error_1:
+ spin_lock_irqsave(lock, flags);
+ list_del(&anchor->list);
+ spin_unlock_irqrestore(lock, flags);
+ kfree(anchor);
+_error:
+ usb_free_urb(urb);
+ return retval;
+}
+
+/**
+ * hdm_configure_channel - receive channel configuration from core
+ * @iface: interface
+ * @channel: channel ID
+ * @conf: structure that holds the configuration information
+ */
+static int hdm_configure_channel(struct most_interface *iface, int channel,
+ struct most_channel_config *conf)
+{
+ unsigned int num_frames;
+ unsigned int frame_size;
+ unsigned int temp_size;
+ unsigned int tail_space;
+ struct most_dev *mdev = to_mdev(iface);
+ struct device *dev = &mdev->usb_device->dev;
+
+ mdev->is_channel_healthy[channel] = true;
+ mdev->clear_work[channel].channel = channel;
+ mdev->clear_work[channel].mdev = mdev;
+ INIT_WORK(&mdev->clear_work[channel].ws, wq_clear_halt);
+
+ if (unlikely(!iface || !conf)) {
+ dev_err(dev, "Bad interface or config pointer.\n");
+ return -EINVAL;
+ }
+ if (unlikely(channel < 0 || channel >= iface->num_channels)) {
+ dev_err(dev, "Channel ID out of range.\n");
+ return -EINVAL;
+ }
+ if (!conf->num_buffers || !conf->buffer_size) {
+ dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
+ return -EINVAL;
+ }
+
+ if (conf->data_type != MOST_CH_SYNC &&
+ !(conf->data_type == MOST_CH_ISOC_AVP &&
+ conf->packets_per_xact != 0xFF)) {
+ mdev->padding_active[channel] = false;
+ goto exit;
+ }
+
+ mdev->padding_active[channel] = true;
+ temp_size = conf->buffer_size;
+
+ frame_size = get_stream_frame_size(conf);
+ if (frame_size == 0 || frame_size > USB_MTU) {
+ dev_warn(dev, "Misconfig: frame size wrong\n");
+ return -EINVAL;
+ }
+
+ if (conf->buffer_size % frame_size) {
+ u16 tmp_val;
+
+ tmp_val = conf->buffer_size / frame_size;
+ conf->buffer_size = tmp_val * frame_size;
+ dev_notice(dev,
+ "Channel %d - rounding buffer size to %d bytes, channel config says %d bytes\n",
+ channel,
+ conf->buffer_size,
+ temp_size);
+ }
+
+ num_frames = conf->buffer_size / frame_size;
+ tail_space = num_frames * (USB_MTU - frame_size);
+ temp_size += tail_space;
+
+ /* calculate extra length to comply w/ HW padding */
+ conf->extra_len = (DIV_ROUND_UP(temp_size, USB_MTU) * USB_MTU)
+ - conf->buffer_size;
+exit:
+ mdev->conf[channel] = *conf;
+ return 0;
+}
+
+/**
+ * hdm_update_netinfo - retrieve latest networking information
+ * @mdev: device interface
+ *
+ * This triggers the USB vendor requests to read the hardware address and
+ * the current link status of the attached device.
+ */
+static int hdm_update_netinfo(struct most_dev *mdev)
+{
+ struct usb_device *usb_device = mdev->usb_device;
+ struct device *dev = &usb_device->dev;
+ u16 hi, mi, lo, link;
+
+ if (!is_valid_ether_addr(mdev->hw_addr)) {
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
+ dev_err(dev, "Vendor request \"hw_addr_hi\" failed\n");
+ return -1;
+ }
+
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
+ dev_err(dev, "Vendor request \"hw_addr_mid\" failed\n");
+ return -1;
+ }
+
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
+ dev_err(dev, "Vendor request \"hw_addr_low\" failed\n");
+ return -1;
+ }
+
+ mutex_lock(&mdev->io_mutex);
+ mdev->hw_addr[0] = hi >> 8;
+ mdev->hw_addr[1] = hi;
+ mdev->hw_addr[2] = mi >> 8;
+ mdev->hw_addr[3] = mi;
+ mdev->hw_addr[4] = lo >> 8;
+ mdev->hw_addr[5] = lo;
+ mutex_unlock(&mdev->io_mutex);
+ }
+
+ if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
+ dev_err(dev, "Vendor request \"link status\" failed\n");
+ return -1;
+ }
+
+ mutex_lock(&mdev->io_mutex);
+ mdev->link_stat = link;
+ mutex_unlock(&mdev->io_mutex);
+ return 0;
+}
+
+/**
+ * hdm_request_netinfo - request network information
+ * @iface: pointer to interface
+ * @channel: channel ID
+ *
+ * This is used as trigger to set up the link status timer that
+ * polls for the NI state of the INIC every 2 seconds.
+ *
+ */
+static void hdm_request_netinfo(struct most_interface *iface, int channel)
+{
+ struct most_dev *mdev;
+
+ BUG_ON(!iface);
+ mdev = to_mdev(iface);
+ mdev->link_stat_timer.expires = jiffies + HZ;
+ mod_timer(&mdev->link_stat_timer, mdev->link_stat_timer.expires);
+}
+
+/**
+ * link_stat_timer_handler - add work to link_stat work queue
+ * @data: pointer to USB device instance
+ *
+ * The handler runs in interrupt context. That's why we need to defer the
+ * tasks to a work queue.
+ */
+static void link_stat_timer_handler(unsigned long data)
+{
+ struct most_dev *mdev = (struct most_dev *)data;
+
+ schedule_work(&mdev->poll_work_obj);
+ mdev->link_stat_timer.expires = jiffies + (2 * HZ);
+ add_timer(&mdev->link_stat_timer);
+}
+
+/**
+ * wq_netinfo - work queue function
+ * @wq_obj: object that holds data for our deferred work to do
+ *
+ * This retrieves the network interface status of the USB INIC
+ * and compares it with the current status. If the status has
+ * changed, it updates the status of the core.
+ */
+static void wq_netinfo(struct work_struct *wq_obj)
+{
+ struct most_dev *mdev = to_mdev_from_work(wq_obj);
+ int i, prev_link_stat = mdev->link_stat;
+ u8 prev_hw_addr[6];
+
+ for (i = 0; i < 6; i++)
+ prev_hw_addr[i] = mdev->hw_addr[i];
+
+ if (hdm_update_netinfo(mdev) < 0)
+ return;
+ if (prev_link_stat != mdev->link_stat ||
+ prev_hw_addr[0] != mdev->hw_addr[0] ||
+ prev_hw_addr[1] != mdev->hw_addr[1] ||
+ prev_hw_addr[2] != mdev->hw_addr[2] ||
+ prev_hw_addr[3] != mdev->hw_addr[3] ||
+ prev_hw_addr[4] != mdev->hw_addr[4] ||
+ prev_hw_addr[5] != mdev->hw_addr[5])
+ most_deliver_netinfo(&mdev->iface, mdev->link_stat,
+ &mdev->hw_addr[0]);
+}
+
+/**
+ * wq_clear_halt - work queue function
+ * @wq_obj: work_struct object to execute
+ *
+ * This sends a clear_halt to the given USB pipe.
+ */
+static void wq_clear_halt(struct work_struct *wq_obj)
+{
+ struct clear_hold_work *clear_work = to_clear_hold_work(wq_obj);
+ struct most_dev *mdev = clear_work->mdev;
+ unsigned int channel = clear_work->channel;
+ int pipe = clear_work->pipe;
+
+ mutex_lock(&mdev->io_mutex);
+ most_stop_enqueue(&mdev->iface, channel);
+ free_anchored_buffers(mdev, channel, MBO_E_INVAL);
+ if (usb_clear_halt(mdev->usb_device, pipe))
+ dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
+
+ mdev->is_channel_healthy[channel] = true;
+ most_resume_enqueue(&mdev->iface, channel);
+ mutex_unlock(&mdev->io_mutex);
+}
+
+/**
+ * hdm_usb_fops - file operation table for USB driver
+ */
+static const struct file_operations hdm_usb_fops = {
+ .owner = THIS_MODULE,
+};
+
+/**
+ * usb_device_id - ID table for HCD device probing
+ */
+static struct usb_device_id usbid[] = {
+ { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_BRDG), },
+ { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81118), },
+ { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81119), },
+ { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81210), },
+ { } /* Terminating entry */
+};
+
+#define MOST_DCI_RO_ATTR(_name) \
+ struct most_dci_attribute most_dci_attr_##_name = \
+ __ATTR(_name, S_IRUGO, show_value, NULL)
+
+#define MOST_DCI_ATTR(_name) \
+ struct most_dci_attribute most_dci_attr_##_name = \
+ __ATTR(_name, S_IRUGO | S_IWUSR, show_value, store_value)
+
+/**
+ * struct most_dci_attribute - to access the attributes of a dci object
+ * @attr: attributes of a dci object
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_dci_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct most_dci_obj *d,
+ struct most_dci_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct most_dci_obj *d,
+ struct most_dci_attribute *attr,
+ const char *buf,
+ size_t count);
+};
+
+#define to_dci_attr(a) container_of(a, struct most_dci_attribute, attr)
+
+/**
+ * dci_attr_show - show function for dci object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ */
+static ssize_t dci_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct most_dci_attribute *dci_attr = to_dci_attr(attr);
+ struct most_dci_obj *dci_obj = to_dci_obj(kobj);
+
+ if (!dci_attr->show)
+ return -EIO;
+
+ return dci_attr->show(dci_obj, dci_attr, buf);
+}
+
+/**
+ * dci_attr_store - store function for dci object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t dci_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct most_dci_attribute *dci_attr = to_dci_attr(attr);
+ struct most_dci_obj *dci_obj = to_dci_obj(kobj);
+
+ if (!dci_attr->store)
+ return -EIO;
+
+ return dci_attr->store(dci_obj, dci_attr, buf, len);
+}
+
+static const struct sysfs_ops most_dci_sysfs_ops = {
+ .show = dci_attr_show,
+ .store = dci_attr_store,
+};
+
+/**
+ * most_dci_release - release function for dci object
+ * @kobj: pointer to kobject
+ *
+ * This frees the memory allocated for the dci object
+ */
+static void most_dci_release(struct kobject *kobj)
+{
+ struct most_dci_obj *dci_obj = to_dci_obj(kobj);
+
+ kfree(dci_obj);
+}
+
+static ssize_t show_value(struct most_dci_obj *dci_obj,
+ struct most_dci_attribute *attr, char *buf)
+{
+ u16 tmp_val;
+ u16 reg_addr;
+ int err;
+
+ if (!strcmp(attr->attr.name, "ni_state"))
+ reg_addr = DRCI_REG_NI_STATE;
+ else if (!strcmp(attr->attr.name, "packet_bandwidth"))
+ reg_addr = DRCI_REG_PACKET_BW;
+ else if (!strcmp(attr->attr.name, "node_address"))
+ reg_addr = DRCI_REG_NODE_ADDR;
+ else if (!strcmp(attr->attr.name, "node_position"))
+ reg_addr = DRCI_REG_NODE_POS;
+ else if (!strcmp(attr->attr.name, "mep_filter"))
+ reg_addr = DRCI_REG_MEP_FILTER;
+ else if (!strcmp(attr->attr.name, "mep_hash0"))
+ reg_addr = DRCI_REG_HASH_TBL0;
+ else if (!strcmp(attr->attr.name, "mep_hash1"))
+ reg_addr = DRCI_REG_HASH_TBL1;
+ else if (!strcmp(attr->attr.name, "mep_hash2"))
+ reg_addr = DRCI_REG_HASH_TBL2;
+ else if (!strcmp(attr->attr.name, "mep_hash3"))
+ reg_addr = DRCI_REG_HASH_TBL3;
+ else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
+ reg_addr = DRCI_REG_HW_ADDR_HI;
+ else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
+ reg_addr = DRCI_REG_HW_ADDR_MI;
+ else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
+ reg_addr = DRCI_REG_HW_ADDR_LO;
+ else
+ return -EIO;
+
+ err = drci_rd_reg(dci_obj->usb_device, reg_addr, &tmp_val);
+ if (err < 0)
+ return err;
+
+ return snprintf(buf, PAGE_SIZE, "%04x\n", tmp_val);
+}
+
+static ssize_t store_value(struct most_dci_obj *dci_obj,
+ struct most_dci_attribute *attr,
+ const char *buf, size_t count)
+{
+ u16 val;
+ u16 reg_addr;
+ int err;
+
+ if (!strcmp(attr->attr.name, "mep_filter"))
+ reg_addr = DRCI_REG_MEP_FILTER;
+ else if (!strcmp(attr->attr.name, "mep_hash0"))
+ reg_addr = DRCI_REG_HASH_TBL0;
+ else if (!strcmp(attr->attr.name, "mep_hash1"))
+ reg_addr = DRCI_REG_HASH_TBL1;
+ else if (!strcmp(attr->attr.name, "mep_hash2"))
+ reg_addr = DRCI_REG_HASH_TBL2;
+ else if (!strcmp(attr->attr.name, "mep_hash3"))
+ reg_addr = DRCI_REG_HASH_TBL3;
+ else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
+ reg_addr = DRCI_REG_HW_ADDR_HI;
+ else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
+ reg_addr = DRCI_REG_HW_ADDR_MI;
+ else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
+ reg_addr = DRCI_REG_HW_ADDR_LO;
+ else
+ return -EIO;
+
+ err = kstrtou16(buf, 16, &val);
+ if (err)
+ return err;
+
+ err = drci_wr_reg(dci_obj->usb_device, reg_addr, val);
+ if (err < 0)
+ return err;
+
+ return count;
+}
+
+static MOST_DCI_RO_ATTR(ni_state);
+static MOST_DCI_RO_ATTR(packet_bandwidth);
+static MOST_DCI_RO_ATTR(node_address);
+static MOST_DCI_RO_ATTR(node_position);
+static MOST_DCI_ATTR(mep_filter);
+static MOST_DCI_ATTR(mep_hash0);
+static MOST_DCI_ATTR(mep_hash1);
+static MOST_DCI_ATTR(mep_hash2);
+static MOST_DCI_ATTR(mep_hash3);
+static MOST_DCI_ATTR(mep_eui48_hi);
+static MOST_DCI_ATTR(mep_eui48_mi);
+static MOST_DCI_ATTR(mep_eui48_lo);
+
+/**
+ * most_dci_def_attrs - array of default attribute files of the dci object
+ */
+static struct attribute *most_dci_def_attrs[] = {
+ &most_dci_attr_ni_state.attr,
+ &most_dci_attr_packet_bandwidth.attr,
+ &most_dci_attr_node_address.attr,
+ &most_dci_attr_node_position.attr,
+ &most_dci_attr_mep_filter.attr,
+ &most_dci_attr_mep_hash0.attr,
+ &most_dci_attr_mep_hash1.attr,
+ &most_dci_attr_mep_hash2.attr,
+ &most_dci_attr_mep_hash3.attr,
+ &most_dci_attr_mep_eui48_hi.attr,
+ &most_dci_attr_mep_eui48_mi.attr,
+ &most_dci_attr_mep_eui48_lo.attr,
+ NULL,
+};
+
+/**
+ * DCI ktype
+ */
+static struct kobj_type most_dci_ktype = {
+ .sysfs_ops = &most_dci_sysfs_ops,
+ .release = most_dci_release,
+ .default_attrs = most_dci_def_attrs,
+};
+
+/**
+ * create_most_dci_obj - allocates a dci object
+ * @parent: parent kobject
+ *
+ * This creates a dci object and registers it with sysfs.
+ * Returns a pointer to the object or NULL when something went wrong.
+ */
+static struct
+most_dci_obj *create_most_dci_obj(struct kobject *parent)
+{
+ struct most_dci_obj *most_dci = kzalloc(sizeof(*most_dci), GFP_KERNEL);
+ int retval;
+
+ if (!most_dci)
+ return NULL;
+
+ retval = kobject_init_and_add(&most_dci->kobj, &most_dci_ktype, parent,
+ "dci");
+ if (retval) {
+ kobject_put(&most_dci->kobj);
+ return NULL;
+ }
+ return most_dci;
+}
+
+/**
+ * destroy_most_dci_obj - DCI object release function
+ * @p: pointer to dci object
+ */
+static void destroy_most_dci_obj(struct most_dci_obj *p)
+{
+ kobject_put(&p->kobj);
+}
+
+/**
+ * hdm_probe - probe function of USB device driver
+ * @interface: Interface of the attached USB device
+ * @id: Pointer to the USB ID table.
+ *
+ * This allocates and initializes the device instance, adds the new
+ * entry to the internal list, scans the USB descriptors and registers
+ * the interface with the core.
+ * Additionally, the DCI objects are created and the hardware is sync'd.
+ *
+ * Return 0 on success. In case of an error a negative number is returned.
+ */
+static int
+hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
+{
+ struct usb_host_interface *usb_iface_desc = interface->cur_altsetting;
+ struct usb_device *usb_dev = interface_to_usbdev(interface);
+ struct device *dev = &usb_dev->dev;
+ struct most_dev *mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ unsigned int i;
+ unsigned int num_endpoints;
+ struct most_channel_capability *tmp_cap;
+ struct usb_endpoint_descriptor *ep_desc;
+ int ret = 0;
+ int err;
+
+ if (!mdev)
+ goto exit_ENOMEM;
+
+ usb_set_intfdata(interface, mdev);
+ num_endpoints = usb_iface_desc->desc.bNumEndpoints;
+ mutex_init(&mdev->io_mutex);
+ INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
+ setup_timer(&mdev->link_stat_timer, link_stat_timer_handler,
+ (unsigned long)mdev);
+
+ mdev->usb_device = usb_dev;
+ mdev->link_stat_timer.expires = jiffies + (2 * HZ);
+
+ mdev->iface.mod = hdm_usb_fops.owner;
+ mdev->iface.interface = ITYPE_USB;
+ mdev->iface.configure = hdm_configure_channel;
+ mdev->iface.request_netinfo = hdm_request_netinfo;
+ mdev->iface.enqueue = hdm_enqueue;
+ mdev->iface.poison_channel = hdm_poison_channel;
+ mdev->iface.description = mdev->description;
+ mdev->iface.num_channels = num_endpoints;
+
+ snprintf(mdev->description, sizeof(mdev->description),
+ "usb_device %d-%s:%d.%d",
+ usb_dev->bus->busnum,
+ usb_dev->devpath,
+ usb_dev->config->desc.bConfigurationValue,
+ usb_iface_desc->desc.bInterfaceNumber);
+
+ mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
+ if (!mdev->conf)
+ goto exit_free;
+
+ mdev->cap = kcalloc(num_endpoints, sizeof(*mdev->cap), GFP_KERNEL);
+ if (!mdev->cap)
+ goto exit_free1;
+
+ mdev->iface.channel_vector = mdev->cap;
+ mdev->iface.priv = NULL;
+
+ mdev->ep_address =
+ kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
+ if (!mdev->ep_address)
+ goto exit_free2;
+
+ mdev->anchor_list =
+ kcalloc(num_endpoints, sizeof(*mdev->anchor_list), GFP_KERNEL);
+ if (!mdev->anchor_list)
+ goto exit_free3;
+
+ tmp_cap = mdev->cap;
+ for (i = 0; i < num_endpoints; i++) {
+ ep_desc = &usb_iface_desc->endpoint[i].desc;
+ mdev->ep_address[i] = ep_desc->bEndpointAddress;
+ mdev->padding_active[i] = false;
+ mdev->is_channel_healthy[i] = true;
+
+ snprintf(&mdev->suffix[i][0], MAX_SUFFIX_LEN, "ep%02x",
+ mdev->ep_address[i]);
+
+ tmp_cap->name_suffix = &mdev->suffix[i][0];
+ tmp_cap->buffer_size_packet = MAX_BUF_SIZE;
+ tmp_cap->buffer_size_streaming = MAX_BUF_SIZE;
+ tmp_cap->num_buffers_packet = BUF_CHAIN_SIZE;
+ tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
+ tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
+ MOST_CH_ISOC_AVP | MOST_CH_SYNC;
+ if (usb_endpoint_dir_in(ep_desc))
+ tmp_cap->direction = MOST_CH_RX;
+ else
+ tmp_cap->direction = MOST_CH_TX;
+ tmp_cap++;
+ INIT_LIST_HEAD(&mdev->anchor_list[i]);
+ spin_lock_init(&mdev->anchor_list_lock[i]);
+ err = drci_wr_reg(usb_dev,
+ DRCI_REG_BASE + DRCI_COMMAND +
+ ep_desc->bEndpointAddress * 16,
+ 1);
+ if (err < 0)
+ pr_warn("DCI Sync for EP %02x failed",
+ ep_desc->bEndpointAddress);
+ }
+ dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
+ le16_to_cpu(usb_dev->descriptor.idVendor),
+ le16_to_cpu(usb_dev->descriptor.idProduct),
+ usb_dev->bus->busnum,
+ usb_dev->devnum);
+
+ dev_notice(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
+ usb_dev->bus->busnum,
+ usb_dev->devpath,
+ usb_dev->config->desc.bConfigurationValue,
+ usb_iface_desc->desc.bInterfaceNumber);
+
+ mdev->parent = most_register_interface(&mdev->iface);
+ if (IS_ERR(mdev->parent)) {
+ ret = PTR_ERR(mdev->parent);
+ goto exit_free4;
+ }
+
+ mutex_lock(&mdev->io_mutex);
+ if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81118 ||
+ le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81119 ||
+ le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81210) {
+ /* this increments the reference count of the instance
+ * object of the core
+ */
+ mdev->dci = create_most_dci_obj(mdev->parent);
+ if (!mdev->dci) {
+ mutex_unlock(&mdev->io_mutex);
+ most_deregister_interface(&mdev->iface);
+ ret = -ENOMEM;
+ goto exit_free4;
+ }
+
+ kobject_uevent(&mdev->dci->kobj, KOBJ_ADD);
+ mdev->dci->usb_device = mdev->usb_device;
+ }
+ mutex_unlock(&mdev->io_mutex);
+ return 0;
+
+exit_free4:
+ kfree(mdev->anchor_list);
+exit_free3:
+ kfree(mdev->ep_address);
+exit_free2:
+ kfree(mdev->cap);
+exit_free1:
+ kfree(mdev->conf);
+exit_free:
+ kfree(mdev);
+exit_ENOMEM:
+ if (ret == 0 || ret == -ENOMEM) {
+ ret = -ENOMEM;
+ dev_err(dev, "out of memory\n");
+ }
+ return ret;
+}
+
+/**
+ * hdm_disconnect - disconnect function of USB device driver
+ * @interface: Interface of the attached USB device
+ *
+ * This deregisters the interface with the core, removes the kernel timer
+ * and frees resources.
+ *
+ * Context: hub kernel thread
+ */
+static void hdm_disconnect(struct usb_interface *interface)
+{
+ struct most_dev *mdev = usb_get_intfdata(interface);
+
+ mutex_lock(&mdev->io_mutex);
+ usb_set_intfdata(interface, NULL);
+ mdev->usb_device = NULL;
+ mutex_unlock(&mdev->io_mutex);
+
+ del_timer_sync(&mdev->link_stat_timer);
+ cancel_work_sync(&mdev->poll_work_obj);
+
+ destroy_most_dci_obj(mdev->dci);
+ most_deregister_interface(&mdev->iface);
+
+ kfree(mdev->anchor_list);
+ kfree(mdev->cap);
+ kfree(mdev->conf);
+ kfree(mdev->ep_address);
+ kfree(mdev);
+}
+
+static struct usb_driver hdm_usb = {
+ .name = "hdm_usb",
+ .id_table = usbid,
+ .probe = hdm_probe,
+ .disconnect = hdm_disconnect,
+};
+
+static int __init hdm_usb_init(void)
+{
+ pr_info("hdm_usb_init()\n");
+ if (usb_register(&hdm_usb)) {
+ pr_err("could not register hdm_usb driver\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void __exit hdm_usb_exit(void)
+{
+ pr_info("hdm_usb_exit()\n");
+ usb_deregister(&hdm_usb);
+}
+
+module_init(hdm_usb_init);
+module_exit(hdm_usb_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
+MODULE_DESCRIPTION("HDM_4_USB");
diff --git a/driver/mostcore/core.c b/driver/mostcore/core.c
new file mode 100644
index 0000000..81dbac9
--- /dev/null
+++ b/driver/mostcore/core.c
@@ -0,0 +1,1961 @@
+/*
+ * core.c - Implementation of core module of MOST Linux driver stack
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/kobject.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/sysfs.h>
+#include <linux/kthread.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include "mostcore.h"
+
+#define MAX_CHANNELS 64
+#define STRING_SIZE 80
+
+static struct class *most_class;
+static struct device *core_dev;
+static struct ida mdev_id;
+static int dummy_num_buffers;
+
+struct most_c_aim_obj {
+ struct most_aim *ptr;
+ int refs;
+ int num_buffers;
+};
+
+struct most_c_obj {
+ struct kobject kobj;
+ struct completion cleanup;
+ atomic_t mbo_ref;
+ atomic_t mbo_nq_level;
+ u16 channel_id;
+ bool is_poisoned;
+ struct mutex start_mutex;
+ struct mutex nq_mutex;
+ int is_starving;
+ struct most_interface *iface;
+ struct most_inst_obj *inst;
+ struct most_channel_config cfg;
+ bool keep_mbo;
+ bool enqueue_halt;
+ struct list_head fifo;
+ spinlock_t fifo_lock;
+ struct list_head halt_fifo;
+ struct list_head list;
+ struct most_c_aim_obj aim0;
+ struct most_c_aim_obj aim1;
+ struct list_head trash_fifo;
+ struct task_struct *hdm_enqueue_task;
+ wait_queue_head_t hdm_fifo_wq;
+};
+
+#define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
+
+struct most_inst_obj {
+ int dev_id;
+ struct most_interface *iface;
+ struct list_head channel_list;
+ struct most_c_obj *channel[MAX_CHANNELS];
+ struct kobject kobj;
+ struct list_head list;
+};
+
+static const struct {
+ int most_ch_data_type;
+ char *name;
+} ch_data_type[] = { { MOST_CH_CONTROL, "control\n" },
+ { MOST_CH_ASYNC, "async\n" },
+ { MOST_CH_SYNC, "sync\n" },
+ { MOST_CH_ISOC_AVP, "isoc_avp\n"} };
+
+#define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
+
+/**
+ * list_pop_mbo - retrieves the first MBO of the list and removes it
+ * @ptr: the list head to grab the MBO from.
+ */
+#define list_pop_mbo(ptr) \
+({ \
+ struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
+ list_del(&_mbo->list); \
+ _mbo; \
+})
+
+/* ___ ___
+ * ___C H A N N E L___
+ */
+
+/**
+ * struct most_c_attr - to access the attributes of a channel object
+ * @attr: attributes of a channel
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_c_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct most_c_obj *d,
+ struct most_c_attr *attr,
+ char *buf);
+ ssize_t (*store)(struct most_c_obj *d,
+ struct most_c_attr *attr,
+ const char *buf,
+ size_t count);
+};
+
+#define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
+
+#define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
+ struct most_c_attr most_chnl_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+/**
+ * channel_attr_show - show function of channel object
+ * @kobj: pointer to its kobject
+ * @attr: pointer to its attributes
+ * @buf: buffer
+ */
+static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct most_c_attr *channel_attr = to_channel_attr(attr);
+ struct most_c_obj *c_obj = to_c_obj(kobj);
+
+ if (!channel_attr->show)
+ return -EIO;
+
+ return channel_attr->show(c_obj, channel_attr, buf);
+}
+
+/**
+ * channel_attr_store - store function of channel object
+ * @kobj: pointer to its kobject
+ * @attr: pointer to its attributes
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t channel_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct most_c_attr *channel_attr = to_channel_attr(attr);
+ struct most_c_obj *c_obj = to_c_obj(kobj);
+
+ if (!channel_attr->store)
+ return -EIO;
+ return channel_attr->store(c_obj, channel_attr, buf, len);
+}
+
+static const struct sysfs_ops most_channel_sysfs_ops = {
+ .show = channel_attr_show,
+ .store = channel_attr_store,
+};
+
+/**
+ * most_free_mbo_coherent - free an MBO and its coherent buffer
+ * @mbo: buffer to be released
+ *
+ */
+static void most_free_mbo_coherent(struct mbo *mbo)
+{
+ struct most_c_obj *c = mbo->context;
+ u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
+
+ dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
+ mbo->bus_address);
+ kfree(mbo);
+ if (atomic_sub_and_test(1, &c->mbo_ref))
+ complete(&c->cleanup);
+}
+
+/**
+ * flush_channel_fifos - clear the channel fifos
+ * @c: pointer to channel object
+ */
+static void flush_channel_fifos(struct most_c_obj *c)
+{
+ unsigned long flags, hf_flags;
+ struct mbo *mbo, *tmp;
+
+ if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
+ return;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
+ list_del(&mbo->list);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ most_free_mbo_coherent(mbo);
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ }
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+
+ spin_lock_irqsave(&c->fifo_lock, hf_flags);
+ list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
+ list_del(&mbo->list);
+ spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
+ most_free_mbo_coherent(mbo);
+ spin_lock_irqsave(&c->fifo_lock, hf_flags);
+ }
+ spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
+
+ if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
+ pr_info("WARN: fifo | trash fifo not empty\n");
+}
+
+/**
+ * flush_trash_fifo - clear the trash fifo
+ * @c: pointer to channel object
+ */
+static int flush_trash_fifo(struct most_c_obj *c)
+{
+ struct mbo *mbo, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
+ list_del(&mbo->list);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ most_free_mbo_coherent(mbo);
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ }
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ return 0;
+}
+
+/**
+ * most_channel_release - release function of channel object
+ * @kobj: pointer to channel's kobject
+ */
+static void most_channel_release(struct kobject *kobj)
+{
+ struct most_c_obj *c = to_c_obj(kobj);
+
+ kfree(c);
+}
+
+static ssize_t show_available_directions(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ unsigned int i = c->channel_id;
+
+ strcpy(buf, "");
+ if (c->iface->channel_vector[i].direction & MOST_CH_RX)
+ strcat(buf, "dir_rx ");
+ if (c->iface->channel_vector[i].direction & MOST_CH_TX)
+ strcat(buf, "dir_tx ");
+ strcat(buf, "\n");
+ return strlen(buf) + 1;
+}
+
+static ssize_t show_available_datatypes(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ unsigned int i = c->channel_id;
+
+ strcpy(buf, "");
+ if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
+ strcat(buf, "control ");
+ if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
+ strcat(buf, "async ");
+ if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
+ strcat(buf, "sync ");
+ if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC_AVP)
+ strcat(buf, "isoc_avp ");
+ strcat(buf, "\n");
+ return strlen(buf) + 1;
+}
+
+static
+ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ unsigned int i = c->channel_id;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ c->iface->channel_vector[i].num_buffers_packet);
+}
+
+static
+ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ unsigned int i = c->channel_id;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ c->iface->channel_vector[i].num_buffers_streaming);
+}
+
+static
+ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ unsigned int i = c->channel_id;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ c->iface->channel_vector[i].buffer_size_packet);
+}
+
+static
+ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ unsigned int i = c->channel_id;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ c->iface->channel_vector[i].buffer_size_streaming);
+}
+
+static ssize_t show_channel_starving(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
+}
+
+#define create_show_channel_attribute(val) \
+ static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
+
+create_show_channel_attribute(available_directions);
+create_show_channel_attribute(available_datatypes);
+create_show_channel_attribute(number_of_packet_buffers);
+create_show_channel_attribute(number_of_stream_buffers);
+create_show_channel_attribute(size_of_stream_buffer);
+create_show_channel_attribute(size_of_packet_buffer);
+create_show_channel_attribute(channel_starving);
+
+static ssize_t show_set_number_of_buffers(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
+}
+
+static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
+
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t show_set_buffer_size(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
+}
+
+static ssize_t store_set_buffer_size(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
+
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t show_set_direction(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ if (c->cfg.direction & MOST_CH_TX)
+ return snprintf(buf, PAGE_SIZE, "dir_tx\n");
+ else if (c->cfg.direction & MOST_CH_RX)
+ return snprintf(buf, PAGE_SIZE, "dir_rx\n");
+ return snprintf(buf, PAGE_SIZE, "unconfigured\n");
+}
+
+static ssize_t store_set_direction(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ const char *buf,
+ size_t count)
+{
+ if (!strcmp(buf, "dir_rx\n")) {
+ c->cfg.direction = MOST_CH_RX;
+ } else if (!strcmp(buf, "dir_tx\n")) {
+ c->cfg.direction = MOST_CH_TX;
+ } else {
+ pr_info("WARN: invalid attribute settings\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static ssize_t show_set_datatype(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
+ if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
+ return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
+ }
+ return snprintf(buf, PAGE_SIZE, "unconfigured\n");
+}
+
+static ssize_t store_set_datatype(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ const char *buf,
+ size_t count)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
+ if (!strcmp(buf, ch_data_type[i].name)) {
+ c->cfg.data_type = ch_data_type[i].most_ch_data_type;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(ch_data_type)) {
+ pr_info("WARN: invalid attribute settings\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static ssize_t show_set_subbuffer_size(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
+}
+
+static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
+
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t show_set_packets_per_xact(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
+}
+
+static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
+
+ if (ret)
+ return ret;
+ return count;
+}
+
+#define create_channel_attribute(value) \
+ static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
+ show_##value, \
+ store_##value)
+
+create_channel_attribute(set_buffer_size);
+create_channel_attribute(set_number_of_buffers);
+create_channel_attribute(set_direction);
+create_channel_attribute(set_datatype);
+create_channel_attribute(set_subbuffer_size);
+create_channel_attribute(set_packets_per_xact);
+
+/**
+ * most_channel_def_attrs - array of default attributes of channel object
+ */
+static struct attribute *most_channel_def_attrs[] = {
+ &most_chnl_attr_available_directions.attr,
+ &most_chnl_attr_available_datatypes.attr,
+ &most_chnl_attr_number_of_packet_buffers.attr,
+ &most_chnl_attr_number_of_stream_buffers.attr,
+ &most_chnl_attr_size_of_packet_buffer.attr,
+ &most_chnl_attr_size_of_stream_buffer.attr,
+ &most_chnl_attr_set_number_of_buffers.attr,
+ &most_chnl_attr_set_buffer_size.attr,
+ &most_chnl_attr_set_direction.attr,
+ &most_chnl_attr_set_datatype.attr,
+ &most_chnl_attr_set_subbuffer_size.attr,
+ &most_chnl_attr_set_packets_per_xact.attr,
+ &most_chnl_attr_channel_starving.attr,
+ NULL,
+};
+
+static struct kobj_type most_channel_ktype = {
+ .sysfs_ops = &most_channel_sysfs_ops,
+ .release = most_channel_release,
+ .default_attrs = most_channel_def_attrs,
+};
+
+static struct kset *most_channel_kset;
+
+/**
+ * create_most_c_obj - allocates a channel object
+ * @name: name of the channel object
+ * @parent: parent kobject
+ *
+ * This create a channel object and registers it with sysfs.
+ * Returns a pointer to the object or NULL when something went wrong.
+ */
+static struct most_c_obj *
+create_most_c_obj(const char *name, struct kobject *parent)
+{
+ struct most_c_obj *c;
+ int retval;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return NULL;
+ c->kobj.kset = most_channel_kset;
+ retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
+ "%s", name);
+ if (retval) {
+ kobject_put(&c->kobj);
+ return NULL;
+ }
+ kobject_uevent(&c->kobj, KOBJ_ADD);
+ return c;
+}
+
+/* ___ ___
+ * ___I N S T A N C E___
+ */
+#define MOST_INST_ATTR(_name, _mode, _show, _store) \
+ struct most_inst_attribute most_inst_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+static struct list_head instance_list;
+
+/**
+ * struct most_inst_attribute - to access the attributes of instance object
+ * @attr: attributes of an instance
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_inst_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct most_inst_obj *d,
+ struct most_inst_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct most_inst_obj *d,
+ struct most_inst_attribute *attr,
+ const char *buf,
+ size_t count);
+};
+
+#define to_instance_attr(a) \
+ container_of(a, struct most_inst_attribute, attr)
+
+/**
+ * instance_attr_show - show function for an instance object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ */
+static ssize_t instance_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct most_inst_attribute *instance_attr;
+ struct most_inst_obj *instance_obj;
+
+ instance_attr = to_instance_attr(attr);
+ instance_obj = to_inst_obj(kobj);
+
+ if (!instance_attr->show)
+ return -EIO;
+
+ return instance_attr->show(instance_obj, instance_attr, buf);
+}
+
+/**
+ * instance_attr_store - store function for an instance object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t instance_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct most_inst_attribute *instance_attr;
+ struct most_inst_obj *instance_obj;
+
+ instance_attr = to_instance_attr(attr);
+ instance_obj = to_inst_obj(kobj);
+
+ if (!instance_attr->store)
+ return -EIO;
+
+ return instance_attr->store(instance_obj, instance_attr, buf, len);
+}
+
+static const struct sysfs_ops most_inst_sysfs_ops = {
+ .show = instance_attr_show,
+ .store = instance_attr_store,
+};
+
+/**
+ * most_inst_release - release function for instance object
+ * @kobj: pointer to instance's kobject
+ *
+ * This frees the allocated memory for the instance object
+ */
+static void most_inst_release(struct kobject *kobj)
+{
+ struct most_inst_obj *inst = to_inst_obj(kobj);
+
+ kfree(inst);
+}
+
+static ssize_t show_description(struct most_inst_obj *instance_obj,
+ struct most_inst_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ instance_obj->iface->description);
+}
+
+static ssize_t show_interface(struct most_inst_obj *instance_obj,
+ struct most_inst_attribute *attr,
+ char *buf)
+{
+ switch (instance_obj->iface->interface) {
+ case ITYPE_LOOPBACK:
+ return snprintf(buf, PAGE_SIZE, "loopback\n");
+ case ITYPE_I2C:
+ return snprintf(buf, PAGE_SIZE, "i2c\n");
+ case ITYPE_I2S:
+ return snprintf(buf, PAGE_SIZE, "i2s\n");
+ case ITYPE_TSI:
+ return snprintf(buf, PAGE_SIZE, "tsi\n");
+ case ITYPE_HBI:
+ return snprintf(buf, PAGE_SIZE, "hbi\n");
+ case ITYPE_MEDIALB_DIM:
+ return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
+ case ITYPE_MEDIALB_DIM2:
+ return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
+ case ITYPE_USB:
+ return snprintf(buf, PAGE_SIZE, "usb\n");
+ case ITYPE_PCIE:
+ return snprintf(buf, PAGE_SIZE, "pcie\n");
+ }
+ return snprintf(buf, PAGE_SIZE, "unknown\n");
+}
+
+#define create_inst_attribute(value) \
+ static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
+
+create_inst_attribute(description);
+create_inst_attribute(interface);
+
+static struct attribute *most_inst_def_attrs[] = {
+ &most_inst_attr_description.attr,
+ &most_inst_attr_interface.attr,
+ NULL,
+};
+
+static struct kobj_type most_inst_ktype = {
+ .sysfs_ops = &most_inst_sysfs_ops,
+ .release = most_inst_release,
+ .default_attrs = most_inst_def_attrs,
+};
+
+static struct kset *most_inst_kset;
+
+/**
+ * create_most_inst_obj - creates an instance object
+ * @name: name of the object to be created
+ *
+ * This allocates memory for an instance structure, assigns the proper kset
+ * and registers it with sysfs.
+ *
+ * Returns a pointer to the instance object or NULL when something went wrong.
+ */
+static struct most_inst_obj *create_most_inst_obj(const char *name)
+{
+ struct most_inst_obj *inst;
+ int retval;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return NULL;
+ inst->kobj.kset = most_inst_kset;
+ retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
+ "%s", name);
+ if (retval) {
+ kobject_put(&inst->kobj);
+ return NULL;
+ }
+ kobject_uevent(&inst->kobj, KOBJ_ADD);
+ return inst;
+}
+
+/**
+ * destroy_most_inst_obj - MOST instance release function
+ * @inst: pointer to the instance object
+ *
+ * This decrements the reference counter of the instance object.
+ * If the reference count turns zero, its release function is called
+ */
+static void destroy_most_inst_obj(struct most_inst_obj *inst)
+{
+ struct most_c_obj *c, *tmp;
+
+ list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
+ flush_trash_fifo(c);
+ flush_channel_fifos(c);
+ kobject_put(&c->kobj);
+ }
+ kobject_put(&inst->kobj);
+}
+
+/* ___ ___
+ * ___A I M___
+ */
+struct most_aim_obj {
+ struct kobject kobj;
+ struct list_head list;
+ struct most_aim *driver;
+ char add_link[STRING_SIZE];
+ char remove_link[STRING_SIZE];
+};
+
+#define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
+
+static struct list_head aim_list;
+
+/**
+ * struct most_aim_attribute - to access the attributes of AIM object
+ * @attr: attributes of an AIM
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_aim_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct most_aim_obj *d,
+ struct most_aim_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct most_aim_obj *d,
+ struct most_aim_attribute *attr,
+ const char *buf,
+ size_t count);
+};
+
+#define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
+
+/**
+ * aim_attr_show - show function of an AIM object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ */
+static ssize_t aim_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct most_aim_attribute *aim_attr;
+ struct most_aim_obj *aim_obj;
+
+ aim_attr = to_aim_attr(attr);
+ aim_obj = to_aim_obj(kobj);
+
+ if (!aim_attr->show)
+ return -EIO;
+
+ return aim_attr->show(aim_obj, aim_attr, buf);
+}
+
+/**
+ * aim_attr_store - store function of an AIM object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t aim_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct most_aim_attribute *aim_attr;
+ struct most_aim_obj *aim_obj;
+
+ aim_attr = to_aim_attr(attr);
+ aim_obj = to_aim_obj(kobj);
+
+ if (!aim_attr->store)
+ return -EIO;
+ return aim_attr->store(aim_obj, aim_attr, buf, len);
+}
+
+static const struct sysfs_ops most_aim_sysfs_ops = {
+ .show = aim_attr_show,
+ .store = aim_attr_store,
+};
+
+/**
+ * most_aim_release - AIM release function
+ * @kobj: pointer to AIM's kobject
+ */
+static void most_aim_release(struct kobject *kobj)
+{
+ struct most_aim_obj *aim_obj = to_aim_obj(kobj);
+
+ kfree(aim_obj);
+}
+
+static ssize_t show_add_link(struct most_aim_obj *aim_obj,
+ struct most_aim_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->add_link);
+}
+
+/**
+ * split_string - parses and changes string in the buffer buf and
+ * splits it into two mandatory and one optional substrings.
+ *
+ * @buf: complete string from attribute 'add_channel'
+ * @a: address of pointer to 1st substring (=instance name)
+ * @b: address of pointer to 2nd substring (=channel name)
+ * @c: optional address of pointer to 3rd substring (=user defined name)
+ *
+ * Examples:
+ *
+ * Input: "mdev0:ch0@ep_81:my_channel\n" or
+ * "mdev0:ch0@ep_81:my_channel"
+ *
+ * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
+ *
+ * Input: "mdev0:ch0@ep_81\n"
+ * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
+ *
+ * Input: "mdev0:ch0@ep_81"
+ * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
+ */
+static int split_string(char *buf, char **a, char **b, char **c)
+{
+ *a = strsep(&buf, ":");
+ if (!*a)
+ return -EIO;
+
+ *b = strsep(&buf, ":\n");
+ if (!*b)
+ return -EIO;
+
+ if (c)
+ *c = strsep(&buf, ":\n");
+
+ return 0;
+}
+
+/**
+ * get_channel_by_name - get pointer to channel object
+ * @mdev: name of the device instance
+ * @mdev_ch: name of the respective channel
+ *
+ * This retrieves the pointer to a channel object.
+ */
+static struct
+most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
+{
+ struct most_c_obj *c, *tmp;
+ struct most_inst_obj *i, *i_tmp;
+ int found = 0;
+
+ list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
+ if (!strcmp(kobject_name(&i->kobj), mdev)) {
+ found++;
+ break;
+ }
+ }
+ if (unlikely(!found))
+ return ERR_PTR(-EIO);
+
+ list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
+ if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
+ found++;
+ break;
+ }
+ }
+ if (unlikely(found < 2))
+ return ERR_PTR(-EIO);
+ return c;
+}
+
+/**
+ * store_add_link - store() function for add_link attribute
+ * @aim_obj: pointer to AIM object
+ * @attr: its attributes
+ * @buf: buffer
+ * @len: buffer length
+ *
+ * This parses the string given by buf and splits it into
+ * three substrings. Note: third substring is optional. In case a cdev
+ * AIM is loaded the optional 3rd substring will make up the name of
+ * device node in the /dev directory. If omitted, the device node will
+ * inherit the channel's name within sysfs.
+ *
+ * Searches for a pair of device and channel and probes the AIM
+ *
+ * Example:
+ * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
+ * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
+ *
+ * (1) would create the device node /dev/my_rxchannel
+ * (2) would create the device node /dev/mdev0-ch0@ep_81
+ */
+static ssize_t store_add_link(struct most_aim_obj *aim_obj,
+ struct most_aim_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct most_c_obj *c;
+ struct most_aim **aim_ptr;
+ char buffer[STRING_SIZE];
+ char *mdev;
+ char *mdev_ch;
+ char *mdev_devnod;
+ char devnod_buf[STRING_SIZE];
+ int ret;
+ size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
+
+ strlcpy(buffer, buf, max_len);
+ strlcpy(aim_obj->add_link, buf, max_len);
+
+ ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
+ if (ret)
+ return ret;
+
+ if (!mdev_devnod || *mdev_devnod == 0) {
+ snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
+ mdev_ch);
+ mdev_devnod = devnod_buf;
+ }
+
+ c = get_channel_by_name(mdev, mdev_ch);
+ if (IS_ERR(c))
+ return -ENODEV;
+
+ if (!c->aim0.ptr)
+ aim_ptr = &c->aim0.ptr;
+ else if (!c->aim1.ptr)
+ aim_ptr = &c->aim1.ptr;
+ else
+ return -ENOSPC;
+
+ *aim_ptr = aim_obj->driver;
+ ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
+ &c->cfg, &c->kobj, mdev_devnod);
+ if (ret) {
+ *aim_ptr = NULL;
+ return ret;
+ }
+
+ return len;
+}
+
+static struct most_aim_attribute most_aim_attr_add_link =
+ __ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link);
+
+static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
+ struct most_aim_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
+}
+
+/**
+ * store_remove_link - store function for remove_link attribute
+ * @aim_obj: pointer to AIM object
+ * @attr: its attributes
+ * @buf: buffer
+ * @len: buffer length
+ *
+ * Example:
+ * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
+ */
+static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
+ struct most_aim_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct most_c_obj *c;
+ char buffer[STRING_SIZE];
+ char *mdev;
+ char *mdev_ch;
+ int ret;
+ size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
+
+ strlcpy(buffer, buf, max_len);
+ strlcpy(aim_obj->remove_link, buf, max_len);
+ ret = split_string(buffer, &mdev, &mdev_ch, NULL);
+ if (ret)
+ return ret;
+
+ c = get_channel_by_name(mdev, mdev_ch);
+ if (IS_ERR(c))
+ return -ENODEV;
+
+ if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
+ return -EIO;
+ if (c->aim0.ptr == aim_obj->driver)
+ c->aim0.ptr = NULL;
+ if (c->aim1.ptr == aim_obj->driver)
+ c->aim1.ptr = NULL;
+ return len;
+}
+
+static struct most_aim_attribute most_aim_attr_remove_link =
+ __ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link,
+ store_remove_link);
+
+static struct attribute *most_aim_def_attrs[] = {
+ &most_aim_attr_add_link.attr,
+ &most_aim_attr_remove_link.attr,
+ NULL,
+};
+
+static struct kobj_type most_aim_ktype = {
+ .sysfs_ops = &most_aim_sysfs_ops,
+ .release = most_aim_release,
+ .default_attrs = most_aim_def_attrs,
+};
+
+static struct kset *most_aim_kset;
+
+/**
+ * create_most_aim_obj - creates an AIM object
+ * @name: name of the AIM
+ *
+ * This creates an AIM object assigns the proper kset and registers
+ * it with sysfs.
+ * Returns a pointer to the object or NULL if something went wrong.
+ */
+static struct most_aim_obj *create_most_aim_obj(const char *name)
+{
+ struct most_aim_obj *most_aim;
+ int retval;
+
+ most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
+ if (!most_aim)
+ return NULL;
+ most_aim->kobj.kset = most_aim_kset;
+ retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
+ NULL, "%s", name);
+ if (retval) {
+ kobject_put(&most_aim->kobj);
+ return NULL;
+ }
+ kobject_uevent(&most_aim->kobj, KOBJ_ADD);
+ return most_aim;
+}
+
+/**
+ * destroy_most_aim_obj - AIM release function
+ * @p: pointer to AIM object
+ *
+ * This decrements the reference counter of the AIM object. If the
+ * reference count turns zero, its release function will be called.
+ */
+static void destroy_most_aim_obj(struct most_aim_obj *p)
+{
+ kobject_put(&p->kobj);
+}
+
+/* ___ ___
+ * ___C O R E___
+ */
+
+/**
+ * Instantiation of the MOST bus
+ */
+static struct bus_type most_bus = {
+ .name = "most",
+};
+
+/**
+ * Instantiation of the core driver
+ */
+static struct device_driver mostcore = {
+ .name = "mostcore",
+ .bus = &most_bus,
+};
+
+static inline void trash_mbo(struct mbo *mbo)
+{
+ unsigned long flags;
+ struct most_c_obj *c = mbo->context;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ list_add(&mbo->list, &c->trash_fifo);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+}
+
+static bool hdm_mbo_ready(struct most_c_obj *c)
+{
+ bool empty;
+
+ if (c->enqueue_halt)
+ return false;
+
+ spin_lock_irq(&c->fifo_lock);
+ empty = list_empty(&c->halt_fifo);
+ spin_unlock_irq(&c->fifo_lock);
+
+ return !empty;
+}
+
+static void nq_hdm_mbo(struct mbo *mbo)
+{
+ unsigned long flags;
+ struct most_c_obj *c = mbo->context;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ list_add_tail(&mbo->list, &c->halt_fifo);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ wake_up_interruptible(&c->hdm_fifo_wq);
+}
+
+static int hdm_enqueue_thread(void *data)
+{
+ struct most_c_obj *c = data;
+ struct mbo *mbo;
+ int ret;
+ typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
+
+ while (likely(!kthread_should_stop())) {
+ wait_event_interruptible(c->hdm_fifo_wq,
+ hdm_mbo_ready(c) ||
+ kthread_should_stop());
+
+ mutex_lock(&c->nq_mutex);
+ spin_lock_irq(&c->fifo_lock);
+ if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
+ spin_unlock_irq(&c->fifo_lock);
+ mutex_unlock(&c->nq_mutex);
+ continue;
+ }
+
+ mbo = list_pop_mbo(&c->halt_fifo);
+ spin_unlock_irq(&c->fifo_lock);
+
+ if (c->cfg.direction == MOST_CH_RX)
+ mbo->buffer_length = c->cfg.buffer_size;
+
+ ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
+ mutex_unlock(&c->nq_mutex);
+
+ if (unlikely(ret)) {
+ pr_err("hdm enqueue failed\n");
+ nq_hdm_mbo(mbo);
+ c->hdm_enqueue_task = NULL;
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
+{
+ struct task_struct *task =
+ kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
+ channel_id);
+
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ c->hdm_enqueue_task = task;
+ return 0;
+}
+
+/**
+ * arm_mbo - recycle MBO for further usage
+ * @mbo: buffer object
+ *
+ * This puts an MBO back to the list to have it ready for up coming
+ * tx transactions.
+ *
+ * In case the MBO belongs to a channel that recently has been
+ * poisoned, the MBO is scheduled to be trashed.
+ * Calls the completion handler of an attached AIM.
+ */
+static void arm_mbo(struct mbo *mbo)
+{
+ unsigned long flags;
+ struct most_c_obj *c;
+
+ BUG_ON((!mbo) || (!mbo->context));
+ c = mbo->context;
+
+ if (c->is_poisoned) {
+ trash_mbo(mbo);
+ return;
+ }
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ ++*mbo->num_buffers_ptr;
+ list_add_tail(&mbo->list, &c->fifo);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+
+ if (c->aim0.refs && c->aim0.ptr->tx_completion)
+ c->aim0.ptr->tx_completion(c->iface, c->channel_id);
+
+ if (c->aim1.refs && c->aim1.ptr->tx_completion)
+ c->aim1.ptr->tx_completion(c->iface, c->channel_id);
+}
+
+/**
+ * arm_mbo_chain - helper function that arms an MBO chain for the HDM
+ * @c: pointer to interface channel
+ * @dir: direction of the channel
+ * @compl: pointer to completion function
+ *
+ * This allocates buffer objects including the containing DMA coherent
+ * buffer and puts them in the fifo.
+ * Buffers of Rx channels are put in the kthread fifo, hence immediately
+ * submitted to the HDM.
+ *
+ * Returns the number of allocated and enqueued MBOs.
+ */
+static int arm_mbo_chain(struct most_c_obj *c, int dir,
+ void (*compl)(struct mbo *))
+{
+ unsigned int i;
+ int retval;
+ struct mbo *mbo;
+ u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
+
+ atomic_set(&c->mbo_nq_level, 0);
+
+ for (i = 0; i < c->cfg.num_buffers; i++) {
+ mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
+ if (!mbo) {
+ retval = i;
+ goto _exit;
+ }
+ mbo->context = c;
+ mbo->ifp = c->iface;
+ mbo->hdm_channel_id = c->channel_id;
+ mbo->virt_address = dma_alloc_coherent(NULL,
+ coherent_buf_size,
+ &mbo->bus_address,
+ GFP_KERNEL);
+ if (!mbo->virt_address) {
+ pr_info("WARN: No DMA coherent buffer.\n");
+ retval = i;
+ goto _error1;
+ }
+ mbo->complete = compl;
+ mbo->num_buffers_ptr = &dummy_num_buffers;
+ if (dir == MOST_CH_RX) {
+ nq_hdm_mbo(mbo);
+ atomic_inc(&c->mbo_nq_level);
+ } else {
+ arm_mbo(mbo);
+ }
+ }
+ return i;
+
+_error1:
+ kfree(mbo);
+_exit:
+ return retval;
+}
+
+/**
+ * most_submit_mbo - submits an MBO to fifo
+ * @mbo: pointer to the MBO
+ *
+ */
+int most_submit_mbo(struct mbo *mbo)
+{
+ if (unlikely((!mbo) || (!mbo->context))) {
+ pr_err("Bad MBO or missing channel reference\n");
+ return -EINVAL;
+ }
+
+ nq_hdm_mbo(mbo);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(most_submit_mbo);
+
+/**
+ * most_write_completion - write completion handler
+ * @mbo: pointer to MBO
+ *
+ * This recycles the MBO for further usage. In case the channel has been
+ * poisoned, the MBO is scheduled to be trashed.
+ */
+static void most_write_completion(struct mbo *mbo)
+{
+ struct most_c_obj *c;
+
+ BUG_ON((!mbo) || (!mbo->context));
+
+ c = mbo->context;
+ if (mbo->status == MBO_E_INVAL)
+ pr_info("WARN: Tx MBO status: invalid\n");
+ if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
+ trash_mbo(mbo);
+ else
+ arm_mbo(mbo);
+}
+
+/**
+ * get_channel_by_iface - get pointer to channel object
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ *
+ * This retrieves a pointer to a channel of the given interface and channel ID.
+ */
+static struct
+most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
+{
+ struct most_inst_obj *i;
+
+ if (unlikely(!iface)) {
+ pr_err("Bad interface\n");
+ return NULL;
+ }
+ if (unlikely((id < 0) || (id >= iface->num_channels))) {
+ pr_err("Channel index (%d) out of range\n", id);
+ return NULL;
+ }
+ i = iface->priv;
+ if (unlikely(!i)) {
+ pr_err("interface is not registered\n");
+ return NULL;
+ }
+ return i->channel[id];
+}
+
+int channel_has_mbo(struct most_interface *iface, int id, struct most_aim *aim)
+{
+ struct most_c_obj *c = get_channel_by_iface(iface, id);
+ unsigned long flags;
+ int empty;
+
+ if (unlikely(!c))
+ return -EINVAL;
+
+ if (c->aim0.refs && c->aim1.refs &&
+ ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
+ (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
+ return 0;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ empty = list_empty(&c->fifo);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ return !empty;
+}
+EXPORT_SYMBOL_GPL(channel_has_mbo);
+
+/**
+ * most_get_mbo - get pointer to an MBO of pool
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ *
+ * This attempts to get a free buffer out of the channel fifo.
+ * Returns a pointer to MBO on success or NULL otherwise.
+ */
+struct mbo *most_get_mbo(struct most_interface *iface, int id,
+ struct most_aim *aim)
+{
+ struct mbo *mbo;
+ struct most_c_obj *c;
+ unsigned long flags;
+ int *num_buffers_ptr;
+
+ c = get_channel_by_iface(iface, id);
+ if (unlikely(!c))
+ return NULL;
+
+ if (c->aim0.refs && c->aim1.refs &&
+ ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
+ (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
+ return NULL;
+
+ if (aim == c->aim0.ptr)
+ num_buffers_ptr = &c->aim0.num_buffers;
+ else if (aim == c->aim1.ptr)
+ num_buffers_ptr = &c->aim1.num_buffers;
+ else
+ num_buffers_ptr = &dummy_num_buffers;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ if (list_empty(&c->fifo)) {
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ return NULL;
+ }
+ mbo = list_pop_mbo(&c->fifo);
+ --*num_buffers_ptr;
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+
+ mbo->num_buffers_ptr = num_buffers_ptr;
+ mbo->buffer_length = c->cfg.buffer_size;
+ return mbo;
+}
+EXPORT_SYMBOL_GPL(most_get_mbo);
+
+/**
+ * most_put_mbo - return buffer to pool
+ * @mbo: buffer object
+ */
+void most_put_mbo(struct mbo *mbo)
+{
+ struct most_c_obj *c = mbo->context;
+
+ if (c->cfg.direction == MOST_CH_TX) {
+ arm_mbo(mbo);
+ return;
+ }
+ nq_hdm_mbo(mbo);
+ atomic_inc(&c->mbo_nq_level);
+}
+EXPORT_SYMBOL_GPL(most_put_mbo);
+
+/**
+ * most_read_completion - read completion handler
+ * @mbo: pointer to MBO
+ *
+ * This function is called by the HDM when data has been received from the
+ * hardware and copied to the buffer of the MBO.
+ *
+ * In case the channel has been poisoned it puts the buffer in the trash queue.
+ * Otherwise, it passes the buffer to an AIM for further processing.
+ */
+static void most_read_completion(struct mbo *mbo)
+{
+ struct most_c_obj *c = mbo->context;
+
+ if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
+ trash_mbo(mbo);
+ return;
+ }
+
+ if (mbo->status == MBO_E_INVAL) {
+ nq_hdm_mbo(mbo);
+ atomic_inc(&c->mbo_nq_level);
+ return;
+ }
+
+ if (atomic_sub_and_test(1, &c->mbo_nq_level))
+ c->is_starving = 1;
+
+ if (c->aim0.refs && c->aim0.ptr->rx_completion &&
+ c->aim0.ptr->rx_completion(mbo) == 0)
+ return;
+
+ if (c->aim1.refs && c->aim1.ptr->rx_completion &&
+ c->aim1.ptr->rx_completion(mbo) == 0)
+ return;
+
+ most_put_mbo(mbo);
+}
+
+/**
+ * most_start_channel - prepares a channel for communication
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ *
+ * This prepares the channel for usage. Cross-checks whether the
+ * channel's been properly configured.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+int most_start_channel(struct most_interface *iface, int id,
+ struct most_aim *aim)
+{
+ int num_buffer;
+ int ret;
+ struct most_c_obj *c = get_channel_by_iface(iface, id);
+
+ if (unlikely(!c))
+ return -EINVAL;
+
+ mutex_lock(&c->start_mutex);
+ if (c->aim0.refs + c->aim1.refs > 0)
+ goto out; /* already started by other aim */
+
+ if (!try_module_get(iface->mod)) {
+ pr_info("failed to acquire HDM lock\n");
+ mutex_unlock(&c->start_mutex);
+ return -ENOLCK;
+ }
+
+ c->cfg.extra_len = 0;
+ if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
+ pr_info("channel configuration failed. Go check settings...\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ init_waitqueue_head(&c->hdm_fifo_wq);
+
+ if (c->cfg.direction == MOST_CH_RX)
+ num_buffer = arm_mbo_chain(c, c->cfg.direction,
+ most_read_completion);
+ else
+ num_buffer = arm_mbo_chain(c, c->cfg.direction,
+ most_write_completion);
+ if (unlikely(!num_buffer)) {
+ pr_info("failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = run_enqueue_thread(c, id);
+ if (ret)
+ goto error;
+
+ c->is_starving = 0;
+ c->aim0.num_buffers = c->cfg.num_buffers / 2;
+ c->aim1.num_buffers = c->cfg.num_buffers - c->aim0.num_buffers;
+ atomic_set(&c->mbo_ref, num_buffer);
+
+out:
+ if (aim == c->aim0.ptr)
+ c->aim0.refs++;
+ if (aim == c->aim1.ptr)
+ c->aim1.refs++;
+ mutex_unlock(&c->start_mutex);
+ return 0;
+
+error:
+ module_put(iface->mod);
+ mutex_unlock(&c->start_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(most_start_channel);
+
+/**
+ * most_stop_channel - stops a running channel
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ */
+int most_stop_channel(struct most_interface *iface, int id,
+ struct most_aim *aim)
+{
+ struct most_c_obj *c;
+
+ if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
+ pr_err("Bad interface or index out of range\n");
+ return -EINVAL;
+ }
+ c = get_channel_by_iface(iface, id);
+ if (unlikely(!c))
+ return -EINVAL;
+
+ mutex_lock(&c->start_mutex);
+ if (c->aim0.refs + c->aim1.refs >= 2)
+ goto out;
+
+ if (c->hdm_enqueue_task)
+ kthread_stop(c->hdm_enqueue_task);
+ c->hdm_enqueue_task = NULL;
+
+ if (iface->mod)
+ module_put(iface->mod);
+
+ c->is_poisoned = true;
+ if (c->iface->poison_channel(c->iface, c->channel_id)) {
+ pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
+ c->iface->description);
+ mutex_unlock(&c->start_mutex);
+ return -EAGAIN;
+ }
+ flush_trash_fifo(c);
+ flush_channel_fifos(c);
+
+#ifdef CMPL_INTERRUPTIBLE
+ if (wait_for_completion_interruptible(&c->cleanup)) {
+ pr_info("Interrupted while clean up ch %d\n", c->channel_id);
+ mutex_unlock(&c->start_mutex);
+ return -EINTR;
+ }
+#else
+ wait_for_completion(&c->cleanup);
+#endif
+ c->is_poisoned = false;
+
+out:
+ if (aim == c->aim0.ptr)
+ c->aim0.refs--;
+ if (aim == c->aim1.ptr)
+ c->aim1.refs--;
+ mutex_unlock(&c->start_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(most_stop_channel);
+
+/**
+ * most_register_aim - registers an AIM (driver) with the core
+ * @aim: instance of AIM to be registered
+ */
+int most_register_aim(struct most_aim *aim)
+{
+ struct most_aim_obj *aim_obj;
+
+ if (!aim) {
+ pr_err("Bad driver\n");
+ return -EINVAL;
+ }
+ aim_obj = create_most_aim_obj(aim->name);
+ if (!aim_obj) {
+ pr_info("failed to alloc driver object\n");
+ return -ENOMEM;
+ }
+ aim_obj->driver = aim;
+ aim->context = aim_obj;
+ pr_info("registered new application interfacing module %s\n",
+ aim->name);
+ list_add_tail(&aim_obj->list, &aim_list);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(most_register_aim);
+
+/**
+ * most_deregister_aim - deregisters an AIM (driver) with the core
+ * @aim: AIM to be removed
+ */
+int most_deregister_aim(struct most_aim *aim)
+{
+ struct most_aim_obj *aim_obj;
+ struct most_c_obj *c, *tmp;
+ struct most_inst_obj *i, *i_tmp;
+
+ if (!aim) {
+ pr_err("Bad driver\n");
+ return -EINVAL;
+ }
+
+ aim_obj = aim->context;
+ if (!aim_obj) {
+ pr_info("driver not registered.\n");
+ return -EINVAL;
+ }
+ list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
+ list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
+ if (c->aim0.ptr == aim || c->aim1.ptr == aim)
+ aim->disconnect_channel(
+ c->iface, c->channel_id);
+ if (c->aim0.ptr == aim)
+ c->aim0.ptr = NULL;
+ if (c->aim1.ptr == aim)
+ c->aim1.ptr = NULL;
+ }
+ }
+ list_del(&aim_obj->list);
+ destroy_most_aim_obj(aim_obj);
+ pr_info("deregistering application interfacing module %s\n", aim->name);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(most_deregister_aim);
+
+/**
+ * most_register_interface - registers an interface with core
+ * @iface: pointer to the instance of the interface description.
+ *
+ * Allocates and initializes a new interface instance and all of its channels.
+ * Returns a pointer to kobject or an error pointer.
+ */
+struct kobject *most_register_interface(struct most_interface *iface)
+{
+ unsigned int i;
+ int id;
+ char name[STRING_SIZE];
+ char channel_name[STRING_SIZE];
+ struct most_c_obj *c;
+ struct most_inst_obj *inst;
+
+ if (!iface || !iface->enqueue || !iface->configure ||
+ !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
+ pr_err("Bad interface or channel overflow\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ pr_info("Failed to alloc mdev ID\n");
+ return ERR_PTR(id);
+ }
+ snprintf(name, STRING_SIZE, "mdev%d", id);
+
+ inst = create_most_inst_obj(name);
+ if (!inst) {
+ pr_info("Failed to allocate interface instance\n");
+ ida_simple_remove(&mdev_id, id);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ iface->priv = inst;
+ INIT_LIST_HEAD(&inst->channel_list);
+ inst->iface = iface;
+ inst->dev_id = id;
+ list_add_tail(&inst->list, &instance_list);
+
+ for (i = 0; i < iface->num_channels; i++) {
+ const char *name_suffix = iface->channel_vector[i].name_suffix;
+
+ if (!name_suffix)
+ snprintf(channel_name, STRING_SIZE, "ch%d", i);
+ else if (name_suffix[0] == '@')
+ snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
+ name_suffix);
+ else
+ snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
+
+ /* this increments the reference count of this instance */
+ c = create_most_c_obj(channel_name, &inst->kobj);
+ if (!c)
+ goto free_instance;
+ inst->channel[i] = c;
+ c->is_starving = 0;
+ c->iface = iface;
+ c->inst = inst;
+ c->channel_id = i;
+ c->keep_mbo = false;
+ c->enqueue_halt = false;
+ c->is_poisoned = false;
+ c->cfg.direction = 0;
+ c->cfg.data_type = 0;
+ c->cfg.num_buffers = 0;
+ c->cfg.buffer_size = 0;
+ c->cfg.subbuffer_size = 0;
+ c->cfg.packets_per_xact = 0;
+ spin_lock_init(&c->fifo_lock);
+ INIT_LIST_HEAD(&c->fifo);
+ INIT_LIST_HEAD(&c->trash_fifo);
+ INIT_LIST_HEAD(&c->halt_fifo);
+ init_completion(&c->cleanup);
+ atomic_set(&c->mbo_ref, 0);
+ mutex_init(&c->start_mutex);
+ mutex_init(&c->nq_mutex);
+ list_add_tail(&c->list, &inst->channel_list);
+ }
+ pr_info("registered new MOST device mdev%d (%s)\n",
+ inst->dev_id, iface->description);
+ return &inst->kobj;
+
+free_instance:
+ pr_info("Failed allocate channel(s)\n");
+ list_del(&inst->list);
+ ida_simple_remove(&mdev_id, id);
+ destroy_most_inst_obj(inst);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_GPL(most_register_interface);
+
+/**
+ * most_deregister_interface - deregisters an interface with core
+ * @iface: pointer to the interface instance description.
+ *
+ * Before removing an interface instance from the list, all running
+ * channels are stopped and poisoned.
+ */
+void most_deregister_interface(struct most_interface *iface)
+{
+ struct most_inst_obj *i = iface->priv;
+ struct most_c_obj *c;
+
+ if (unlikely(!i)) {
+ pr_info("Bad Interface\n");
+ return;
+ }
+ pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
+ iface->description);
+
+ list_for_each_entry(c, &i->channel_list, list) {
+ if (c->aim0.ptr)
+ c->aim0.ptr->disconnect_channel(c->iface,
+ c->channel_id);
+ if (c->aim1.ptr)
+ c->aim1.ptr->disconnect_channel(c->iface,
+ c->channel_id);
+ c->aim0.ptr = NULL;
+ c->aim1.ptr = NULL;
+ }
+
+ ida_simple_remove(&mdev_id, i->dev_id);
+ list_del(&i->list);
+ destroy_most_inst_obj(i);
+}
+EXPORT_SYMBOL_GPL(most_deregister_interface);
+
+/**
+ * most_stop_enqueue - prevents core from enqueueing MBOs
+ * @iface: pointer to interface
+ * @id: channel id
+ *
+ * This is called by an HDM that _cannot_ attend to its duties and
+ * is imminent to get run over by the core. The core is not going to
+ * enqueue any further packets unless the flagging HDM calls
+ * most_resume enqueue().
+ */
+void most_stop_enqueue(struct most_interface *iface, int id)
+{
+ struct most_c_obj *c = get_channel_by_iface(iface, id);
+
+ if (!c)
+ return;
+
+ mutex_lock(&c->nq_mutex);
+ c->enqueue_halt = true;
+ mutex_unlock(&c->nq_mutex);
+}
+EXPORT_SYMBOL_GPL(most_stop_enqueue);
+
+/**
+ * most_resume_enqueue - allow core to enqueue MBOs again
+ * @iface: pointer to interface
+ * @id: channel id
+ *
+ * This clears the enqueue halt flag and enqueues all MBOs currently
+ * sitting in the wait fifo.
+ */
+void most_resume_enqueue(struct most_interface *iface, int id)
+{
+ struct most_c_obj *c = get_channel_by_iface(iface, id);
+
+ if (!c)
+ return;
+
+ mutex_lock(&c->nq_mutex);
+ c->enqueue_halt = false;
+ mutex_unlock(&c->nq_mutex);
+
+ wake_up_interruptible(&c->hdm_fifo_wq);
+}
+EXPORT_SYMBOL_GPL(most_resume_enqueue);
+
+static int __init most_init(void)
+{
+ int err;
+
+ pr_info("init()\n");
+ INIT_LIST_HEAD(&instance_list);
+ INIT_LIST_HEAD(&aim_list);
+ ida_init(&mdev_id);
+
+ err = bus_register(&most_bus);
+ if (err) {
+ pr_info("Cannot register most bus\n");
+ return err;
+ }
+
+ most_class = class_create(THIS_MODULE, "most");
+ if (IS_ERR(most_class)) {
+ pr_info("No udev support.\n");
+ err = PTR_ERR(most_class);
+ goto exit_bus;
+ }
+
+ err = driver_register(&mostcore);
+ if (err) {
+ pr_info("Cannot register core driver\n");
+ goto exit_class;
+ }
+
+ core_dev = device_create(most_class, NULL, 0, NULL, "mostcore");
+ if (IS_ERR(core_dev)) {
+ err = PTR_ERR(core_dev);
+ goto exit_driver;
+ }
+
+ most_aim_kset = kset_create_and_add("aims", NULL, &core_dev->kobj);
+ if (!most_aim_kset) {
+ err = -ENOMEM;
+ goto exit_class_container;
+ }
+
+ most_inst_kset = kset_create_and_add("devices", NULL, &core_dev->kobj);
+ if (!most_inst_kset) {
+ err = -ENOMEM;
+ goto exit_driver_kset;
+ }
+
+ return 0;
+
+exit_driver_kset:
+ kset_unregister(most_aim_kset);
+exit_class_container:
+ device_destroy(most_class, 0);
+exit_driver:
+ driver_unregister(&mostcore);
+exit_class:
+ class_destroy(most_class);
+exit_bus:
+ bus_unregister(&most_bus);
+ return err;
+}
+
+static void __exit most_exit(void)
+{
+ struct most_inst_obj *i, *i_tmp;
+ struct most_aim_obj *d, *d_tmp;
+
+ pr_info("exit core module\n");
+ list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
+ destroy_most_aim_obj(d);
+ }
+
+ list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
+ list_del(&i->list);
+ destroy_most_inst_obj(i);
+ }
+ kset_unregister(most_inst_kset);
+ kset_unregister(most_aim_kset);
+ device_destroy(most_class, 0);
+ driver_unregister(&mostcore);
+ class_destroy(most_class);
+ bus_unregister(&most_bus);
+ ida_destroy(&mdev_id);
+}
+
+module_init(most_init);
+module_exit(most_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
+MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");
diff --git a/driver/mostcore/mostcore.h b/driver/mostcore/mostcore.h
new file mode 100644
index 0000000..60e018e
--- /dev/null
+++ b/driver/mostcore/mostcore.h
@@ -0,0 +1,320 @@
+/*
+ * mostcore.h - Interface between MostCore,
+ * Hardware Dependent Module (HDM) and Application Interface Module (AIM).
+ *
+ * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/*
+ * Authors:
+ * Andrey Shvetsov <andrey.shvetsov@k2l.de>
+ * Christian Gromm <christian.gromm@microchip.com>
+ * Sebastian Graf
+ */
+
+#ifndef __MOST_CORE_H__
+#define __MOST_CORE_H__
+
+#include <linux/types.h>
+
+struct kobject;
+struct module;
+
+/**
+ * Interface type
+ */
+enum most_interface_type {
+ ITYPE_LOOPBACK = 1,
+ ITYPE_I2C,
+ ITYPE_I2S,
+ ITYPE_TSI,
+ ITYPE_HBI,
+ ITYPE_MEDIALB_DIM,
+ ITYPE_MEDIALB_DIM2,
+ ITYPE_USB,
+ ITYPE_PCIE
+};
+
+/**
+ * Channel direction.
+ */
+enum most_channel_direction {
+ MOST_CH_RX = 1 << 0,
+ MOST_CH_TX = 1 << 1,
+};
+
+/**
+ * Channel data type.
+ */
+enum most_channel_data_type {
+ MOST_CH_CONTROL = 1 << 0,
+ MOST_CH_ASYNC = 1 << 1,
+ MOST_CH_ISOC_AVP = 1 << 2,
+ MOST_CH_SYNC = 1 << 5,
+};
+
+enum mbo_status_flags {
+ /* MBO was processed successfully (data was send or received )*/
+ MBO_SUCCESS = 0,
+ /* The MBO contains wrong or missing information. */
+ MBO_E_INVAL,
+ /* MBO was completed as HDM Channel will be closed */
+ MBO_E_CLOSE,
+};
+
+/**
+ * struct most_channel_capability - Channel capability
+ * @direction: Supported channel directions.
+ * The value is bitwise OR-combination of the values from the
+ * enumeration most_channel_direction. Zero is allowed value and means
+ * "channel may not be used".
+ * @data_type: Supported channel data types.
+ * The value is bitwise OR-combination of the values from the
+ * enumeration most_channel_data_type. Zero is allowed value and means
+ * "channel may not be used".
+ * @num_buffer_packet: Maximum number of buffers supported by this channel
+ * for packet data types (Async,Control,QoS)
+ * @buffer_size_packet: Maximum buffer size supported by this channel
+ * for packet data types (Async,Control,QoS)
+ * @num_buffer_streaming: Maximum number of buffers supported by this channel
+ * for streaming data types (Sync,AV Packetized)
+ * @buffer_size_streaming: Maximum buffer size supported by this channel
+ * for streaming data types (Sync,AV Packetized)
+ * @name_suffix: Optional suffix providean by an HDM that is attached to the
+ * regular channel name.
+ *
+ * Describes the capabilities of a MostCore channel like supported Data Types
+ * and directions. This information is provided by an HDM for the MostCore.
+ *
+ * The Core creates read only sysfs attribute files in
+ * /sys/devices/virtual/most/mostcore/devices/mdev-#/mdev#-ch#/ with the
+ * following attributes:
+ * -available_directions
+ * -available_datatypes
+ * -number_of_packet_buffers
+ * -number_of_stream_buffers
+ * -size_of_packet_buffer
+ * -size_of_stream_buffer
+ * where content of each file is a string with all supported properties of this
+ * very channel attribute.
+ */
+struct most_channel_capability {
+ u16 direction;
+ u16 data_type;
+ u16 num_buffers_packet;
+ u16 buffer_size_packet;
+ u16 num_buffers_streaming;
+ u16 buffer_size_streaming;
+ char *name_suffix;
+};
+
+/**
+ * struct most_channel_config - stores channel configuration
+ * @direction: direction of the channel
+ * @data_type: data type travelling over this channel
+ * @num_buffers: number of buffers
+ * @buffer_size: size of a buffer for AIM.
+ * Buffer size may be cutted down by HDM in a configure callback
+ * to match to a given interface and channel type.
+ * @extra_len: additional buffer space for internal HDM purposes like padding.
+ * May be set by HDM in a configure callback if needed.
+ * @subbuffer_size: size of a subbuffer
+ * @packets_per_xact: number of MOST frames that are packet inside one USB
+ * packet. This is USB specific
+ *
+ * Describes the configuration for a MostCore channel. This information is
+ * provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a
+ * parameter of the "configure" function call.
+ */
+struct most_channel_config {
+ enum most_channel_direction direction;
+ enum most_channel_data_type data_type;
+ u16 num_buffers;
+ u16 buffer_size;
+ u16 extra_len;
+ u16 subbuffer_size;
+ u16 packets_per_xact;
+};
+
+/*
+ * struct mbo - MOST Buffer Object.
+ * @context: context for core completion handler
+ * @priv: private data for HDM
+ *
+ * public: documented fields that are used for the communications
+ * between MostCore and HDMs
+ *
+ * @list: list head for use by the mbo's current owner
+ * @ifp: (in) associated interface instance
+ * @hdm_channel_id: (in) HDM channel instance
+ * @virt_address: (in) kernel virtual address of the buffer
+ * @bus_address: (in) bus address of the buffer
+ * @buffer_length: (in) buffer payload length
+ * @processed_length: (out) processed length
+ * @status: (out) transfer status
+ * @complete: (in) completion routine
+ *
+ * The MostCore allocates and initializes the MBO.
+ *
+ * The HDM receives MBO for transfer from MostCore with the call to enqueue().
+ * The HDM copies the data to- or from the buffer depending on configured
+ * channel direction, set "processed_length" and "status" and completes
+ * the transfer procedure by calling the completion routine.
+ *
+ * At the end the MostCore deallocates the MBO or recycles it for further
+ * transfers for the same or different HDM.
+ *
+ * Directions of usage:
+ * The core driver should never access any MBO fields (even if marked
+ * as "public") while the MBO is owned by an HDM. The ownership starts with
+ * the call of enqueue() and ends with the call of its complete() routine.
+ *
+ * II.
+ * Every HDM attached to the core driver _must_ ensure that it returns any MBO
+ * it owns (due to a previous call to enqueue() by the core driver) before it
+ * de-registers an interface or gets unloaded from the kernel. If this direction
+ * is violated memory leaks will occur, since the core driver does _not_ track
+ * MBOs it is currently not in control of.
+ *
+ */
+struct mbo {
+ void *context;
+ void *priv;
+ struct list_head list;
+ struct most_interface *ifp;
+ int *num_buffers_ptr;
+ u16 hdm_channel_id;
+ void *virt_address;
+ dma_addr_t bus_address;
+ u16 buffer_length;
+ u16 processed_length;
+ enum mbo_status_flags status;
+ void (*complete)(struct mbo *);
+};
+
+/**
+ * Interface instance description.
+ *
+ * Describes one instance of an interface like Medusa PCIe or Vantage USB.
+ * This structure is allocated and initialized in the HDM. MostCore may not
+ * modify this structure.
+ *
+ * @interface Interface type. \sa most_interface_type.
+ * @description PRELIMINARY.
+ * Unique description of the device instance from point of view of the
+ * interface in free text form (ASCII).
+ * It may be a hexadecimal presentation of the memory address for the MediaLB
+ * IP or USB device ID with USB properties for USB interface, etc.
+ * @num_channels Number of channels and size of the channel_vector.
+ * @channel_vector Properties of the channels.
+ * Array index represents channel ID by the driver.
+ * @configure Callback to change data type for the channel of the
+ * interface instance. May be zero if the instance of the interface is not
+ * configurable. Parameter channel_config describes direction and data
+ * type for the channel, configured by the higher level. The content of
+ * @enqueue Delivers MBO to the HDM for processing.
+ * After HDM completes Rx- or Tx- operation the processed MBO shall
+ * be returned back to the MostCore using completion routine.
+ * The reason to get the MBO delivered from the MostCore after the channel
+ * is poisoned is the re-opening of the channel by the application.
+ * In this case the HDM shall hold MBOs and service the channel as usual.
+ * The HDM must be able to hold at least one MBO for each channel.
+ * The callback returns a negative value on error, otherwise 0.
+ * @poison_channel Informs HDM about closing the channel. The HDM shall
+ * cancel all transfers and synchronously or asynchronously return
+ * all enqueued for this channel MBOs using the completion routine.
+ * The callback returns a negative value on error, otherwise 0.
+ * @request_netinfo: triggers retrieving of network info from the HDM by
+ * means of "Message exchange over MDP/MEP"
+ * @priv Private field used by mostcore to store context information.
+ */
+struct most_interface {
+ struct module *mod;
+ enum most_interface_type interface;
+ const char *description;
+ int num_channels;
+ struct most_channel_capability *channel_vector;
+ int (*configure)(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *channel_config);
+ int (*enqueue)(struct most_interface *iface, int channel_idx,
+ struct mbo *mbo);
+ int (*poison_channel)(struct most_interface *iface, int channel_idx);
+ void (*request_netinfo)(struct most_interface *iface, int channel_idx);
+ void *priv;
+};
+
+/**
+ * struct most_aim - identifies MOST device driver to mostcore
+ * @name: Driver name
+ * @probe_channel: function for core to notify driver about channel connection
+ * @disconnect_channel: callback function to disconnect a certain channel
+ * @rx_completion: completion handler for received packets
+ * @tx_completion: completion handler for transmitted packets
+ * @context: context pointer to be used by mostcore
+ */
+struct most_aim {
+ const char *name;
+ int (*probe_channel)(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *cfg,
+ struct kobject *parent, char *name);
+ int (*disconnect_channel)(struct most_interface *iface,
+ int channel_idx);
+ int (*rx_completion)(struct mbo *mbo);
+ int (*tx_completion)(struct most_interface *iface, int channel_idx);
+ void *context;
+};
+
+/**
+ * most_register_interface - Registers instance of the interface.
+ * @iface: Pointer to the interface instance description.
+ *
+ * Returns a pointer to the kobject of the generated instance.
+ *
+ * Note: HDM has to ensure that any reference held on the kobj is
+ * released before deregistering the interface.
+ */
+struct kobject *most_register_interface(struct most_interface *iface);
+
+/**
+ * Deregisters instance of the interface.
+ * @intf_instance Pointer to the interface instance description.
+ */
+void most_deregister_interface(struct most_interface *iface);
+int most_submit_mbo(struct mbo *mbo);
+
+/**
+ * most_stop_enqueue - prevents core from enqueing MBOs
+ * @iface: pointer to interface
+ * @channel_idx: channel index
+ */
+void most_stop_enqueue(struct most_interface *iface, int channel_idx);
+
+/**
+ * most_resume_enqueue - allow core to enqueue MBOs again
+ * @iface: pointer to interface
+ * @channel_idx: channel index
+ *
+ * This clears the enqueue halt flag and enqueues all MBOs currently
+ * in wait fifo.
+ */
+void most_resume_enqueue(struct most_interface *iface, int channel_idx);
+int most_register_aim(struct most_aim *aim);
+int most_deregister_aim(struct most_aim *aim);
+struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
+ struct most_aim *);
+void most_put_mbo(struct mbo *mbo);
+int channel_has_mbo(struct most_interface *iface, int channel_idx,
+ struct most_aim *aim);
+int most_start_channel(struct most_interface *iface, int channel_idx,
+ struct most_aim *);
+int most_stop_channel(struct most_interface *iface, int channel_idx,
+ struct most_aim *);
+
+#endif /* MOST_CORE_H_ */