summaryrefslogtreecommitdiffstats
path: root/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rc8-timberdale.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rc8-timberdale.patch')
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rc8-timberdale.patch10621
1 files changed, 10621 insertions, 0 deletions
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rc8-timberdale.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rc8-timberdale.patch
new file mode 100644
index 0000000000..32879a2486
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rc8-timberdale.patch
@@ -0,0 +1,10621 @@
+diff --git a/arch/x86/include/asm/gpio.h b/arch/x86/include/asm/gpio.h
+index 49dbfdf..9da6f01 100644
+--- a/arch/x86/include/asm/gpio.h
++++ b/arch/x86/include/asm/gpio.h
+@@ -43,7 +43,7 @@ static inline int gpio_cansleep(unsigned int gpio)
+ */
+ static inline int gpio_to_irq(unsigned int gpio)
+ {
+- return -ENOSYS;
++ return __gpio_to_irq(gpio);
+ }
+
+ static inline int irq_to_gpio(unsigned int irq)
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index e02d74b..ab2dcd3 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -126,6 +126,13 @@ config AMCC_PPC440SPE_ADMA
+ help
+ Enable support for the AMCC PPC440SPe RAID engines.
+
++config TIMB_DMA
++ tristate "Timberdale FPGA DMA support"
++ depends on MFD_TIMBERDALE || HAS_IOMEM
++ select DMA_ENGINE
++ help
++ Enable support for the Timberdale FPGA DMA engine.
++
+ config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ bool
+
+diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
+index 807053d..33a94ec 100644
+--- a/drivers/dma/Makefile
++++ b/drivers/dma/Makefile
+@@ -12,3 +12,4 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
+ obj-$(CONFIG_SH_DMAE) += shdma.o
+ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
+ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
++obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
+new file mode 100644
+index 0000000..74b37aa
+--- /dev/null
++++ b/drivers/dma/timb_dma.c
+@@ -0,0 +1,875 @@
++/*
++ * timb_dma.c timberdale FPGA DMA driver
++ * Copyright (c) 2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA DMA engine
++ */
++
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++
++#include <linux/timb_dma.h>
++
++#define DRIVER_NAME "timb-dma"
++
++/* Global DMA registers */
++#define TIMBDMA_ACR 0x34
++#define TIMBDMA_32BIT_ADDR 0x01
++
++#define TIMBDMA_ISR 0x080000
++#define TIMBDMA_IPR 0x080004
++#define TIMBDMA_IER 0x080008
++
++/* Channel specific registers */
++/* RX instances base addresses are 0x00, 0x40, 0x80 ...
++ * TX instances base addresses are 0x18, 0x58, 0x98 ...
++ */
++#define TIMBDMA_INSTANCE_OFFSET 0x40
++#define TIMBDMA_INSTANCE_TX_OFFSET 0x18
++
++/* RX registers, relative the instance base */
++#define TIMBDMA_OFFS_RX_DHAR 0x00
++#define TIMBDMA_OFFS_RX_DLAR 0x04
++#define TIMBDMA_OFFS_RX_LR 0x0C
++#define TIMBDMA_OFFS_RX_BLR 0x10
++#define TIMBDMA_OFFS_RX_ER 0x14
++#define TIMBDMA_RX_EN 0x01
++/* bytes per Row, video specific register
++ * which is placed after the TX registers...
++ */
++#define TIMBDMA_OFFS_RX_BPRR 0x30
++
++/* TX registers, relative the instance base */
++#define TIMBDMA_OFFS_TX_DHAR 0x00
++#define TIMBDMA_OFFS_TX_DLAR 0x04
++#define TIMBDMA_OFFS_TX_BLR 0x0C
++#define TIMBDMA_OFFS_TX_LR 0x14
++
++
++#define TIMB_DMA_DESC_SIZE 8
++
++struct timb_dma_desc {
++ struct list_head desc_node;
++ struct dma_async_tx_descriptor txd;
++ u8 *desc_list;
++ unsigned int desc_list_len;
++ bool interrupt;
++};
++
++struct timb_dma_chan {
++ struct dma_chan chan;
++ void __iomem *membase;
++ spinlock_t lock; /* Used for mutual exclusion */
++ dma_cookie_t last_completed_cookie;
++ bool ongoing;
++ struct list_head active_list;
++ struct list_head queue;
++ struct list_head free_list;
++ unsigned int bytes_per_line;
++ enum dma_data_direction direction;
++ unsigned int descs; /* Descriptors to allocate */
++ unsigned int desc_elems; /* number of elems per descriptor */
++};
++
++struct timb_dma {
++ struct dma_device dma;
++ void __iomem *membase;
++ struct tasklet_struct tasklet;
++ struct timb_dma_chan channels[0];
++};
++
++static struct device *chan2dev(struct dma_chan *chan)
++{
++ return &chan->dev->device;
++}
++static struct device *chan2dmadev(struct dma_chan *chan)
++{
++ return chan2dev(chan)->parent->parent;
++}
++
++static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
++{
++ int id = td_chan->chan.chan_id;
++ return (struct timb_dma *)((u8 *)td_chan -
++ id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
++}
++
++/* Must be called with the spinlock held */
++static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
++{
++ int id = td_chan->chan.chan_id;
++ struct timb_dma *td = tdchantotd(td_chan);
++ u32 ier;
++
++ /* enable interrupt for this channel */
++ ier = ioread32(td->membase + TIMBDMA_IER);
++ ier |= 1 << id;
++ dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
++ ier);
++ iowrite32(ier, td->membase + TIMBDMA_IER);
++}
++
++/* Should be called with the spinlock held */
++static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
++{
++ int id = td_chan->chan.chan_id;
++ struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
++ id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
++ u32 isr;
++ bool done = false;
++
++ dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
++
++ isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
++ if (isr) {
++ iowrite32(isr, td->membase + TIMBDMA_ISR);
++ done = true;
++ }
++
++ return done;
++}
++
++static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
++ bool single)
++{
++ dma_addr_t addr;
++ int len;
++
++ addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
++ dma_desc[4];
++
++ len = (dma_desc[3] << 8) | dma_desc[2];
++
++ if (single)
++ dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
++ td_chan->direction);
++ else
++ dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
++ td_chan->direction);
++}
++
++static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
++{
++ struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
++ struct timb_dma_chan, chan);
++ u8 *descs;
++
++ for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
++ __td_unmap_desc(td_chan, descs, single);
++ if (descs[0] & 0x02)
++ break;
++ }
++}
++
++static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
++ struct scatterlist *sg, bool last)
++{
++ if (sg_dma_len(sg) > USHORT_MAX) {
++ dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
++ return -EINVAL;
++ }
++
++ /* length must be word aligned */
++ if (sg_dma_len(sg) % sizeof(u32)) {
++ dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
++ sg_dma_len(sg));
++ return -EINVAL;
++ }
++
++ dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n",
++ dma_desc, (void *)(int)sg_dma_address(sg));
++
++ dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
++ dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
++ dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
++ dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
++
++ dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
++ dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
++
++ dma_desc[1] = 0x00;
++ dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
++
++ return 0;
++}
++
++/* Must be called with the spinlock held */
++static void __td_start_dma(struct timb_dma_chan *td_chan)
++{
++ struct timb_dma_desc *td_desc;
++
++ if (td_chan->ongoing) {
++ dev_err(chan2dev(&td_chan->chan),
++ "Transfer already ongoing\n");
++ return;
++ }
++
++ td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
++ desc_node);
++
++ dev_dbg(chan2dev(&td_chan->chan),
++ "td_chan: %p, chan: %d, membase: %p\n",
++ td_chan, td_chan->chan.chan_id, td_chan->membase);
++
++ dev_dbg(chan2dev(&td_chan->chan), "Desc: 0x%02x 0x%02x 0x%02x 0x%02x "
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ td_desc->desc_list[0], td_desc->desc_list[1],
++ td_desc->desc_list[2], td_desc->desc_list[3],
++ td_desc->desc_list[4], td_desc->desc_list[5],
++ td_desc->desc_list[6], td_desc->desc_list[7]);
++
++ dev_dbg(chan2dev(&td_chan->chan),
++ "Desc: phys: %p\n", (void *)(int)td_desc->txd.phys);
++
++ if (td_chan->direction == DMA_FROM_DEVICE) {
++
++ /* descriptor address */
++ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
++ iowrite32(td_desc->txd.phys, td_chan->membase +
++ TIMBDMA_OFFS_RX_DLAR);
++ /* Bytes per line */
++ iowrite32(td_chan->bytes_per_line, td_chan->membase +
++ TIMBDMA_OFFS_RX_BPRR);
++ /* enable RX */
++ iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
++ } else {
++ /* address high */
++ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
++ iowrite32(td_desc->txd.phys, td_chan->membase +
++ TIMBDMA_OFFS_TX_DLAR);
++ }
++
++ td_chan->ongoing = true;
++
++ if (td_desc->interrupt)
++ __td_enable_chan_irq(td_chan);
++}
++
++static void __td_finish(struct timb_dma_chan *td_chan)
++{
++ dma_async_tx_callback callback;
++ void *param;
++ struct dma_async_tx_descriptor *txd;
++ struct timb_dma_desc *td_desc;
++
++ /* can happen if the descriptor is canceled */
++ if (list_empty(&td_chan->active_list))
++ return;
++
++ td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
++ desc_node);
++ txd = &td_desc->txd;
++
++ dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
++ txd->cookie);
++
++ /* make sure to stop the transfer */
++ if (td_chan->direction == DMA_FROM_DEVICE)
++ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
++/* Currently no support for stopping DMA transfers
++ else
++ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
++*/
++ td_chan->last_completed_cookie = txd->cookie;
++ td_chan->ongoing = false;
++
++ callback = txd->callback;
++ param = txd->callback_param;
++
++ list_move(&td_desc->desc_node, &td_chan->free_list);
++
++ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
++ __td_unmap_descs(td_desc,
++ txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
++
++ /*
++ * The API requires that no submissions are done from a
++ * callback, so we don't need to drop the lock here
++ */
++ if (callback)
++ callback(param);
++}
++
++static u32 __td_ier_mask(struct timb_dma *td)
++{
++ int i;
++ u32 ret = 0;
++
++ for (i = 0; i < td->dma.chancnt; i++) {
++ struct timb_dma_chan *td_chan = td->channels + i;
++ if (td_chan->ongoing) {
++ struct timb_dma_desc *td_desc =
++ list_entry(td_chan->active_list.next,
++ struct timb_dma_desc, desc_node);
++ if (td_desc->interrupt)
++ ret |= 1 << i;
++ }
++ }
++
++ return ret;
++}
++
++static void __td_start_next(struct timb_dma_chan *td_chan)
++{
++ struct timb_dma_desc *td_desc;
++
++ BUG_ON(list_empty(&td_chan->queue));
++ BUG_ON(td_chan->ongoing);
++
++ td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
++ desc_node);
++
++ dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
++ __func__, td_desc->txd.cookie);
++
++ list_move(&td_desc->desc_node, &td_chan->active_list);
++ __td_start_dma(td_chan);
++}
++
++static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
++{
++ struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
++ txd);
++ struct timb_dma_chan *td_chan = container_of(txd->chan,
++ struct timb_dma_chan, chan);
++ dma_cookie_t cookie;
++
++ spin_lock_bh(&td_chan->lock);
++
++ cookie = txd->chan->cookie;
++ if (++cookie < 0)
++ cookie = 1;
++ txd->chan->cookie = cookie;
++ txd->cookie = cookie;
++
++ if (list_empty(&td_chan->active_list)) {
++ dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
++ txd->cookie);
++ list_add_tail(&td_desc->desc_node, &td_chan->active_list);
++ __td_start_dma(td_chan);
++ } else {
++ dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
++ txd->cookie);
++
++ list_add_tail(&td_desc->desc_node, &td_chan->queue);
++ }
++
++ spin_unlock_bh(&td_chan->lock);
++
++ return cookie;
++}
++
++static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
++{
++ struct dma_chan *chan = &td_chan->chan;
++ struct timb_dma_desc *td_desc;
++ int err;
++
++ dev_dbg(chan2dev(chan), "Allocating descriptor\n");
++
++ td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
++ if (!td_desc) {
++ dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
++ goto err;
++ }
++
++ td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
++
++ dev_dbg(chan2dev(chan), "Allocating descriptor list\n");
++
++ td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
++ if (!td_desc->desc_list) {
++ dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
++ goto err;
++ }
++
++ dev_dbg(chan2dev(chan), "Initialising txd\n");
++
++ dma_async_tx_descriptor_init(&td_desc->txd, chan);
++ td_desc->txd.tx_submit = td_tx_submit;
++ td_desc->txd.flags = DMA_CTRL_ACK;
++
++ dev_dbg(chan2dev(chan), "Mapping up decriptor list\n");
++
++ dev_dbg(chan2dev(chan), "parent: %p, list: %p, len: %d\n",
++ chan2dmadev(chan), td_desc->desc_list, td_desc->desc_list_len);
++ td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
++ td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
++ dev_dbg(chan2dev(chan), "Mapping done, phys: %p\n",
++ (void *)(int)td_desc->txd.phys);
++
++ err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
++ if (err) {
++ dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
++ goto err;
++ }
++
++ dev_dbg(chan2dev(chan), "Allocated desc: %p\n", td_desc);
++ return td_desc;
++err:
++ kfree(td_desc->desc_list);
++ kfree(td_desc);
++
++ return NULL;
++
++}
++
++static void td_free_desc(struct timb_dma_desc *td_desc)
++{
++ dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
++ dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
++ td_desc->desc_list_len, DMA_TO_DEVICE);
++
++ kfree(td_desc->desc_list);
++ kfree(td_desc);
++}
++
++static void td_desc_put(struct timb_dma_chan *td_chan,
++ struct timb_dma_desc *td_desc)
++{
++ dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
++
++ spin_lock_bh(&td_chan->lock);
++ list_add(&td_desc->desc_node, &td_chan->free_list);
++ spin_unlock_bh(&td_chan->lock);
++}
++
++static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
++{
++ struct timb_dma_desc *td_desc, *_td_desc;
++ struct timb_dma_desc *ret = NULL;
++
++ spin_lock_bh(&td_chan->lock);
++ list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
++ desc_node) {
++ if (async_tx_test_ack(&td_desc->txd)) {
++ list_del(&td_desc->desc_node);
++ ret = td_desc;
++ break;
++ }
++ dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
++ td_desc);
++ }
++ spin_unlock_bh(&td_chan->lock);
++
++ return ret;
++}
++
++static int td_alloc_chan_resources(struct dma_chan *chan)
++{
++ struct timb_dma_chan *td_chan =
++ container_of(chan, struct timb_dma_chan, chan);
++ int i;
++
++ dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
++
++ BUG_ON(!list_empty(&td_chan->free_list));
++ for (i = 0; i < td_chan->descs; i++) {
++ struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
++ if (!td_desc) {
++ if (i)
++ break;
++ else {
++ dev_err(chan2dev(chan),
++ "Couldnt allocate any descriptors\n");
++ return -ENOMEM;
++ }
++ }
++
++ td_desc_put(td_chan, td_desc);
++ }
++
++ spin_lock_bh(&td_chan->lock);
++ td_chan->last_completed_cookie = 1;
++ chan->cookie = 1;
++ spin_unlock_bh(&td_chan->lock);
++
++ return 0;
++}
++
++static void td_free_chan_resources(struct dma_chan *chan)
++{
++ struct timb_dma_chan *td_chan =
++ container_of(chan, struct timb_dma_chan, chan);
++ struct timb_dma_desc *td_desc, *_td_desc;
++ LIST_HEAD(list);
++
++ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
++
++ /* check that all descriptors are free */
++ BUG_ON(!list_empty(&td_chan->active_list));
++ BUG_ON(!list_empty(&td_chan->queue));
++
++ spin_lock_bh(&td_chan->lock);
++ list_splice_init(&td_chan->free_list, &list);
++ spin_unlock_bh(&td_chan->lock);
++
++ list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
++ dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
++ td_desc);
++ td_free_desc(td_desc);
++ }
++}
++
++static enum dma_status td_is_tx_complete(struct dma_chan *chan,
++ dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
++{
++ struct timb_dma_chan *td_chan =
++ container_of(chan, struct timb_dma_chan, chan);
++ dma_cookie_t last_used;
++ dma_cookie_t last_complete;
++ int ret;
++
++ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
++
++ last_complete = td_chan->last_completed_cookie;
++ last_used = chan->cookie;
++
++ ret = dma_async_is_complete(cookie, last_complete, last_used);
++
++ if (done)
++ *done = last_complete;
++ if (used)
++ *used = last_used;
++
++ dev_dbg(chan2dev(chan),
++ "%s: exit, ret: %d, last_complete: %d, last_used: %d\n",
++ __func__, ret, last_complete, last_used);
++
++ return ret;
++}
++
++static void td_issue_pending(struct dma_chan *chan)
++{
++ struct timb_dma_chan *td_chan =
++ container_of(chan, struct timb_dma_chan, chan);
++
++ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
++ spin_lock_bh(&td_chan->lock);
++
++ if (!list_empty(&td_chan->active_list))
++ /* transfer ongoing */
++ if (__td_dma_done_ack(td_chan))
++ __td_finish(td_chan);
++
++ if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
++ __td_start_next(td_chan);
++
++ spin_unlock_bh(&td_chan->lock);
++}
++
++static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
++ struct scatterlist *sgl, unsigned int sg_len,
++ enum dma_data_direction direction, unsigned long flags)
++{
++ struct timb_dma_chan *td_chan =
++ container_of(chan, struct timb_dma_chan, chan);
++ struct timb_dma_desc *td_desc;
++ struct scatterlist *sg;
++ unsigned int i;
++ unsigned int desc_usage = 0;
++
++ if (!sgl || !sg_len) {
++ dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
++ return NULL;
++ }
++
++ /* even channels are for RX, odd for TX */
++ if (td_chan->direction != direction) {
++ dev_err(chan2dev(chan),
++ "Requesting channel in wrong direction\n");
++ return NULL;
++ }
++
++ td_desc = td_desc_get(td_chan);
++ if (!td_desc) {
++ dev_err(chan2dev(chan), "Not enough descriptors available\n");
++ return NULL;
++ }
++
++ td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
++
++ for_each_sg(sgl, sg, sg_len, i) {
++ int err;
++ if (desc_usage > td_desc->desc_list_len) {
++ dev_err(chan2dev(chan), "No descriptor space\n");
++ return NULL;
++ }
++
++ err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
++ i == (sg_len - 1));
++ if (err) {
++ dev_err(chan2dev(chan), "Failed to update desc: %d\n",
++ err);
++ td_desc_put(td_chan, td_desc);
++ return NULL;
++ }
++ desc_usage += TIMB_DMA_DESC_SIZE;
++ }
++
++ dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
++ td_desc->desc_list_len, DMA_TO_DEVICE);
++
++ return &td_desc->txd;
++}
++
++static void td_terminate_all(struct dma_chan *chan)
++{
++ struct timb_dma_chan *td_chan =
++ container_of(chan, struct timb_dma_chan, chan);
++ struct timb_dma_desc *td_desc, *_td_desc;
++
++ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
++
++ /* first the easy part, put the queue into the free list */
++ spin_lock_bh(&td_chan->lock);
++ list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
++ desc_node)
++ list_move(&td_desc->desc_node, &td_chan->free_list);
++
++ /* now tear down the runnning */
++ __td_finish(td_chan);
++ spin_unlock_bh(&td_chan->lock);
++}
++
++static void td_tasklet(unsigned long data)
++{
++ struct timb_dma *td = (struct timb_dma *)data;
++ u32 isr;
++ u32 ipr;
++ u32 ier;
++ int i;
++
++ isr = ioread32(td->membase + TIMBDMA_ISR);
++ ipr = isr & __td_ier_mask(td);
++
++ /* ack the interrupts */
++ iowrite32(ipr, td->membase + TIMBDMA_ISR);
++
++ for (i = 0; i < td->dma.chancnt; i++)
++ if (ipr & (1 << i)) {
++ struct timb_dma_chan *td_chan = td->channels + i;
++ spin_lock(&td_chan->lock);
++ __td_finish(td_chan);
++ if (!list_empty(&td_chan->queue))
++ __td_start_next(td_chan);
++ spin_unlock(&td_chan->lock);
++ }
++
++ ier = __td_ier_mask(td);
++ iowrite32(ier, td->membase + TIMBDMA_IER);
++}
++
++
++static irqreturn_t td_irq(int irq, void *devid)
++{
++ struct timb_dma *td = devid;
++ u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
++
++ if (ipr) {
++ /* disable interrupts, will be re-enabled in tasklet */
++ iowrite32(0, td->membase + TIMBDMA_IER);
++
++ tasklet_schedule(&td->tasklet);
++
++ return IRQ_HANDLED;
++ } else
++ return IRQ_NONE;
++}
++
++
++static int __devinit td_probe(struct platform_device *pdev)
++{
++ struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
++ struct timb_dma *td;
++ struct resource *iomem;
++ int irq;
++ int err;
++ int i;
++
++ if (!pdata) {
++ dev_err(&pdev->dev, "No platform data\n");
++ return -EINVAL;
++ }
++
++ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!iomem)
++ return -EINVAL;
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0)
++ return irq;
++
++ if (!request_mem_region(iomem->start, resource_size(iomem),
++ DRIVER_NAME))
++ return -EBUSY;
++
++ td = kzalloc(sizeof(struct timb_dma) +
++ sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
++ if (!td) {
++ err = -ENOMEM;
++ goto err_release_region;
++ }
++
++ dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
++
++ td->membase = ioremap(iomem->start, resource_size(iomem));
++ if (!td->membase) {
++ dev_err(&pdev->dev, "Failed to remap I/O memory\n");
++ err = -ENOMEM;
++ goto err_free_mem;
++ }
++
++ /* 32bit addressing */
++ iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
++
++ /* disable and clear any interrupts */
++ iowrite32(0x0, td->membase + TIMBDMA_IER);
++ iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
++
++ tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
++
++ err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
++ if (err) {
++ dev_err(&pdev->dev, "Failed to request IRQ\n");
++ goto err_tasklet_kill;
++ }
++
++ td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
++ td->dma.device_free_chan_resources = td_free_chan_resources;
++ td->dma.device_is_tx_complete = td_is_tx_complete;
++ td->dma.device_issue_pending = td_issue_pending;
++
++ dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
++ td->dma.device_prep_slave_sg = td_prep_slave_sg;
++ td->dma.device_terminate_all = td_terminate_all;
++
++ td->dma.dev = &pdev->dev;
++
++ INIT_LIST_HEAD(&td->dma.channels);
++
++ for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) {
++ struct timb_dma_chan *td_chan = &td->channels[i];
++ struct timb_dma_platform_data_channel *pchan =
++ pdata->channels + i;
++
++ /* even channels are RX, odd are TX */
++ if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) {
++ dev_err(&pdev->dev, "Wrong channel configuration\n");
++ err = -EINVAL;
++ goto err_tasklet_kill;
++ }
++
++ td_chan->chan.device = &td->dma;
++ td_chan->chan.cookie = 1;
++ td_chan->chan.chan_id = i;
++ spin_lock_init(&td_chan->lock);
++ INIT_LIST_HEAD(&td_chan->active_list);
++ INIT_LIST_HEAD(&td_chan->queue);
++ INIT_LIST_HEAD(&td_chan->free_list);
++
++ td_chan->descs = pchan->descriptors;
++ td_chan->desc_elems = pchan->descriptor_elements;
++ td_chan->bytes_per_line = pchan->bytes_per_line;
++ td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
++ DMA_TO_DEVICE;
++
++ td_chan->membase = td->membase +
++ (i / 2) * TIMBDMA_INSTANCE_OFFSET +
++ (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
++
++ dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
++ i, td_chan->membase);
++
++ list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
++ }
++
++ err = dma_async_device_register(&td->dma);
++ if (err) {
++ dev_err(&pdev->dev, "Failed to register async device\n");
++ goto err_free_irq;
++ }
++
++ platform_set_drvdata(pdev, td);
++
++ dev_dbg(&pdev->dev, "Probe result: %d\n", err);
++ return err;
++
++err_free_irq:
++ free_irq(irq, td);
++err_tasklet_kill:
++ tasklet_kill(&td->tasklet);
++ iounmap(td->membase);
++err_free_mem:
++ kfree(td);
++err_release_region:
++ release_mem_region(iomem->start, resource_size(iomem));
++
++ return err;
++
++}
++
++static int __devexit td_remove(struct platform_device *pdev)
++{
++ struct timb_dma *td = platform_get_drvdata(pdev);
++ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ int irq = platform_get_irq(pdev, 0);
++
++ dma_async_device_unregister(&td->dma);
++ free_irq(irq, td);
++ tasklet_kill(&td->tasklet);
++ iounmap(td->membase);
++ kfree(td);
++ release_mem_region(iomem->start, resource_size(iomem));
++
++ platform_set_drvdata(pdev, NULL);
++
++ dev_dbg(&pdev->dev, "Removed...\n");
++ return 0;
++}
++
++static struct platform_driver td_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = td_probe,
++ .remove = __exit_p(td_remove),
++};
++
++static int __init td_init(void)
++{
++ return platform_driver_register(&td_driver);
++}
++module_init(td_init);
++
++static void __exit td_exit(void)
++{
++ platform_driver_unregister(&td_driver);
++}
++module_exit(td_exit);
++
++MODULE_LICENSE("GPL v2");
++MODULE_DESCRIPTION("Timberdale DMA controller driver");
++MODULE_AUTHOR("Richard Röjfors <richard.rojfors@pelagicore.com>");
++MODULE_ALIAS("platform:"DRIVER_NAME);
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index 1f1d88a..b34485f 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -217,7 +217,8 @@ config GPIO_LANGWELL
+
+ config GPIO_TIMBERDALE
+ bool "Support for timberdale GPIO IP"
+- depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
++ default n
++ depends on HAS_IOMEM
+ ---help---
+ Add support for the GPIO IP in the timberdale FPGA.
+
+diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
+index a4d344b..1abc486 100644
+--- a/drivers/gpio/timbgpio.c
++++ b/drivers/gpio/timbgpio.c
+@@ -37,6 +37,8 @@
+ #define TGPIO_ICR 0x14
+ #define TGPIO_FLR 0x18
+ #define TGPIO_LVR 0x1c
++#define TGPIO_VER 0x20
++#define TGPIO_BFLR 0x24
+
+ struct timbgpio {
+ void __iomem *membase;
+@@ -125,17 +127,24 @@ static int timbgpio_irq_type(unsigned irq, unsigned trigger)
+ struct timbgpio *tgpio = get_irq_chip_data(irq);
+ int offset = irq - tgpio->irq_base;
+ unsigned long flags;
+- u32 lvr, flr;
++ u32 lvr, flr, bflr = 0;
++ u32 ver;
+
+ if (offset < 0 || offset > tgpio->gpio.ngpio)
+ return -EINVAL;
+
++ ver = ioread32(tgpio->membase + TGPIO_VER);
++
+ spin_lock_irqsave(&tgpio->lock, flags);
+
++
+ lvr = ioread32(tgpio->membase + TGPIO_LVR);
+ flr = ioread32(tgpio->membase + TGPIO_FLR);
++ if (ver > 2)
++ bflr = ioread32(tgpio->membase + TGPIO_BFLR);
+
+ if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
++ bflr &= ~(1 << offset);
+ flr &= ~(1 << offset);
+ if (trigger & IRQ_TYPE_LEVEL_HIGH)
+ lvr |= 1 << offset;
+@@ -143,21 +152,27 @@ static int timbgpio_irq_type(unsigned irq, unsigned trigger)
+ lvr &= ~(1 << offset);
+ }
+
+- if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+- return -EINVAL;
+- else {
++ if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
++ if (ver < 3)
++ return -EINVAL;
++ else {
++ flr |= 1 << offset;
++ bflr |= 1 << offset;
++ }
++ } else {
++ bflr &= ~(1 << offset);
+ flr |= 1 << offset;
+- /* opposite compared to the datasheet, but it mirrors the
+- * reality
+- */
+ if (trigger & IRQ_TYPE_EDGE_FALLING)
+- lvr |= 1 << offset;
+- else
+ lvr &= ~(1 << offset);
++ else
++ lvr |= 1 << offset;
+ }
+
+ iowrite32(lvr, tgpio->membase + TGPIO_LVR);
+ iowrite32(flr, tgpio->membase + TGPIO_FLR);
++ if (ver > 2)
++ iowrite32(bflr, tgpio->membase + TGPIO_BFLR);
++
+ iowrite32(1 << offset, tgpio->membase + TGPIO_ICR);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
+
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 5f318ce..44ff5c8 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -564,6 +564,16 @@ config I2C_VERSATILE
+ This driver can also be built as a module. If so, the module
+ will be called i2c-versatile.
+
++config I2C_XILINX
++ tristate "Xilinx I2C Controller"
++ depends on EXPERIMENTAL && HAS_IOMEM
++ help
++ If you say yes to this option, support will be included for the
++ Xilinx I2C controller.
++
++ This driver can also be built as a module. If so, the module
++ will be called xilinx_i2c.
++
+ comment "External I2C/SMBus adapter drivers"
+
+ config I2C_PARPORT
+diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
+index 302c551..168f302 100644
+--- a/drivers/i2c/busses/Makefile
++++ b/drivers/i2c/busses/Makefile
+@@ -54,6 +54,7 @@ obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
+ obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
+ obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
+ obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
++obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
+
+ # External I2C/SMBus adapter drivers
+ obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+new file mode 100644
+index 0000000..6946a09
+--- /dev/null
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -0,0 +1,824 @@
++/*
++ * i2c-xiic.c
++ * Copyright (c) 2002-2007 Xilinx Inc.
++ * Copyright (c) 2009-2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ *
++ * This code was implemented by Mocean Laboratories AB when porting linux
++ * to the automotive development board Russellville. The copyright holder
++ * as seen in the header is Intel corporation.
++ * Mocean Laboratories forked off the GNU/Linux platform work into a
++ * separate company called Pelagicore AB, which commited the code to the
++ * kernel.
++ */
++
++/* Supports:
++ * Xilinx IIC
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/platform_device.h>
++#include <linux/i2c.h>
++#include <linux/interrupt.h>
++#include <linux/wait.h>
++#include <linux/i2c-xiic.h>
++#include <linux/io.h>
++
++#define DRIVER_NAME "xiic-i2c"
++
++enum xilinx_i2c_state {
++ STATE_DONE,
++ STATE_ERROR,
++ STATE_START
++};
++
++/**
++ * struct xiic_i2c - Internal representation of the XIIC I2C bus
++ * @base: Memory base of the HW registers
++ * @wait: Wait queue for callers
++ * @adap: Kernel adapter representation
++ * @tx_msg: Messages from above to be sent
++ * @lock: Mutual exclusion
++ * @tx_pos: Current pos in TX message
++ * @nmsgs: Number of messages in tx_msg
++ * @state: See STATE_
++ * @rx_msg: Current RX message
++ * @rx_pos: Position within current RX message
++ */
++struct xiic_i2c {
++ void __iomem *base;
++ wait_queue_head_t wait;
++ struct i2c_adapter adap;
++ struct i2c_msg *tx_msg;
++ spinlock_t lock;
++ unsigned int tx_pos;
++ unsigned int nmsgs;
++ enum xilinx_i2c_state state;
++ struct i2c_msg *rx_msg;
++ int rx_pos;
++};
++
++
++#define XIIC_MSB_OFFSET 0
++#define XIIC_REG_OFFSET (0x100+XIIC_MSB_OFFSET)
++
++/*
++ * Register offsets in bytes from RegisterBase. Three is added to the
++ * base offset to access LSB (IBM style) of the word
++ */
++#define XIIC_CR_REG_OFFSET (0x00+XIIC_REG_OFFSET) /* Control Register */
++#define XIIC_SR_REG_OFFSET (0x04+XIIC_REG_OFFSET) /* Status Register */
++#define XIIC_DTR_REG_OFFSET (0x08+XIIC_REG_OFFSET) /* Data Tx Register */
++#define XIIC_DRR_REG_OFFSET (0x0C+XIIC_REG_OFFSET) /* Data Rx Register */
++#define XIIC_ADR_REG_OFFSET (0x10+XIIC_REG_OFFSET) /* Address Register */
++#define XIIC_TFO_REG_OFFSET (0x14+XIIC_REG_OFFSET) /* Tx FIFO Occupancy */
++#define XIIC_RFO_REG_OFFSET (0x18+XIIC_REG_OFFSET) /* Rx FIFO Occupancy */
++#define XIIC_TBA_REG_OFFSET (0x1C+XIIC_REG_OFFSET) /* 10 Bit Address reg */
++#define XIIC_RFD_REG_OFFSET (0x20+XIIC_REG_OFFSET) /* Rx FIFO Depth reg */
++#define XIIC_GPO_REG_OFFSET (0x24+XIIC_REG_OFFSET) /* Output Register */
++
++/* Control Register masks */
++#define XIIC_CR_ENABLE_DEVICE_MASK 0x01 /* Device enable = 1 */
++#define XIIC_CR_TX_FIFO_RESET_MASK 0x02 /* Transmit FIFO reset=1 */
++#define XIIC_CR_MSMS_MASK 0x04 /* Master starts Txing=1 */
++#define XIIC_CR_DIR_IS_TX_MASK 0x08 /* Dir of tx. Txing=1 */
++#define XIIC_CR_NO_ACK_MASK 0x10 /* Tx Ack. NO ack = 1 */
++#define XIIC_CR_REPEATED_START_MASK 0x20 /* Repeated start = 1 */
++#define XIIC_CR_GENERAL_CALL_MASK 0x40 /* Gen Call enabled = 1 */
++
++/* Status Register masks */
++#define XIIC_SR_GEN_CALL_MASK 0x01 /* 1=a mstr issued a GC */
++#define XIIC_SR_ADDR_AS_SLAVE_MASK 0x02 /* 1=when addr as slave */
++#define XIIC_SR_BUS_BUSY_MASK 0x04 /* 1 = bus is busy */
++#define XIIC_SR_MSTR_RDING_SLAVE_MASK 0x08 /* 1=Dir: mstr <-- slave */
++#define XIIC_SR_TX_FIFO_FULL_MASK 0x10 /* 1 = Tx FIFO full */
++#define XIIC_SR_RX_FIFO_FULL_MASK 0x20 /* 1 = Rx FIFO full */
++#define XIIC_SR_RX_FIFO_EMPTY_MASK 0x40 /* 1 = Rx FIFO empty */
++#define XIIC_SR_TX_FIFO_EMPTY_MASK 0x80 /* 1 = Tx FIFO empty */
++
++/* Interrupt Status Register masks Interrupt occurs when... */
++#define XIIC_INTR_ARB_LOST_MASK 0x01 /* 1 = arbitration lost */
++#define XIIC_INTR_TX_ERROR_MASK 0x02 /* 1=Tx error/msg complete */
++#define XIIC_INTR_TX_EMPTY_MASK 0x04 /* 1 = Tx FIFO/reg empty */
++#define XIIC_INTR_RX_FULL_MASK 0x08 /* 1=Rx FIFO/reg=OCY level */
++#define XIIC_INTR_BNB_MASK 0x10 /* 1 = Bus not busy */
++#define XIIC_INTR_AAS_MASK 0x20 /* 1 = when addr as slave */
++#define XIIC_INTR_NAAS_MASK 0x40 /* 1 = not addr as slave */
++#define XIIC_INTR_TX_HALF_MASK 0x80 /* 1 = TX FIFO half empty */
++
++/* The following constants specify the depth of the FIFOs */
++#define IIC_RX_FIFO_DEPTH 16 /* Rx fifo capacity */
++#define IIC_TX_FIFO_DEPTH 16 /* Tx fifo capacity */
++
++/* The following constants specify groups of interrupts that are typically
++ * enabled or disables at the same time
++ */
++#define XIIC_TX_INTERRUPTS \
++(XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)
++
++#define XIIC_TX_RX_INTERRUPTS (XIIC_INTR_RX_FULL_MASK | XIIC_TX_INTERRUPTS)
++
++/* The following constants are used with the following macros to specify the
++ * operation, a read or write operation.
++ */
++#define XIIC_READ_OPERATION 1
++#define XIIC_WRITE_OPERATION 0
++
++/*
++ * Tx Fifo upper bit masks.
++ */
++#define XIIC_TX_DYN_START_MASK 0x0100 /* 1 = Set dynamic start */
++#define XIIC_TX_DYN_STOP_MASK 0x0200 /* 1 = Set dynamic stop */
++
++/*
++ * The following constants define the register offsets for the Interrupt
++ * registers. There are some holes in the memory map for reserved addresses
++ * to allow other registers to be added and still match the memory map of the
++ * interrupt controller registers
++ */
++#define XIIC_DGIER_OFFSET 0x1C /* Device Global Interrupt Enable Register */
++#define XIIC_IISR_OFFSET 0x20 /* Interrupt Status Register */
++#define XIIC_IIER_OFFSET 0x28 /* Interrupt Enable Register */
++#define XIIC_RESETR_OFFSET 0x40 /* Reset Register */
++
++#define XIIC_RESET_MASK 0xAUL
++
++/*
++ * The following constant is used for the device global interrupt enable
++ * register, to enable all interrupts for the device, this is the only bit
++ * in the register
++ */
++#define XIIC_GINTR_ENABLE_MASK 0x80000000UL
++
++#define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos)
++#define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos)
++
++static void xiic_start_xfer(struct xiic_i2c *i2c);
++static void __xiic_start_xfer(struct xiic_i2c *i2c);
++
++static inline void xiic_setreg8(struct xiic_i2c *i2c, int reg, u8 value)
++{
++ iowrite8(value, i2c->base + reg);
++}
++
++static inline u8 xiic_getreg8(struct xiic_i2c *i2c, int reg)
++{
++ return ioread8(i2c->base + reg);
++}
++
++static inline void xiic_setreg16(struct xiic_i2c *i2c, int reg, u16 value)
++{
++ iowrite16(value, i2c->base + reg);
++}
++
++static inline void xiic_setreg32(struct xiic_i2c *i2c, int reg, int value)
++{
++ iowrite32(value, i2c->base + reg);
++}
++
++static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg)
++{
++ return ioread32(i2c->base + reg);
++}
++
++static inline void xiic_irq_dis(struct xiic_i2c *i2c, u32 mask)
++{
++ u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
++ xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier & ~mask);
++}
++
++static inline void xiic_irq_en(struct xiic_i2c *i2c, u32 mask)
++{
++ u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
++ xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier | mask);
++}
++
++static inline void xiic_irq_clr(struct xiic_i2c *i2c, u32 mask)
++{
++ u32 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
++ xiic_setreg32(i2c, XIIC_IISR_OFFSET, isr & mask);
++}
++
++static inline void xiic_irq_clr_en(struct xiic_i2c *i2c, u32 mask)
++{
++ xiic_irq_clr(i2c, mask);
++ xiic_irq_en(i2c, mask);
++}
++
++static void xiic_clear_rx_fifo(struct xiic_i2c *i2c)
++{
++ u8 sr;
++ for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
++ !(sr & XIIC_SR_RX_FIFO_EMPTY_MASK);
++ sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET))
++ xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
++}
++
++static void xiic_reinit(struct xiic_i2c *i2c)
++{
++ xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
++
++ /* Set receive Fifo depth to maximum (zero based). */
++ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, IIC_RX_FIFO_DEPTH - 1);
++
++ /* Reset Tx Fifo. */
++ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK);
++
++ /* Enable IIC Device, remove Tx Fifo reset & disable general call. */
++ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK);
++
++ /* make sure RX fifo is empty */
++ xiic_clear_rx_fifo(i2c);
++
++ /* Enable interrupts */
++ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
++
++ xiic_irq_clr_en(i2c, XIIC_INTR_AAS_MASK | XIIC_INTR_ARB_LOST_MASK);
++}
++
++static void xiic_deinit(struct xiic_i2c *i2c)
++{
++ u8 cr;
++
++ xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
++
++ /* Disable IIC Device. */
++ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
++ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & ~XIIC_CR_ENABLE_DEVICE_MASK);
++}
++
++static void xiic_read_rx(struct xiic_i2c *i2c)
++{
++ u8 bytes_in_fifo;
++ int i;
++
++ bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1;
++
++ dev_dbg(i2c->adap.dev.parent, "%s entry, bytes in fifo: %d, msg: %d"
++ ", SR: 0x%x, CR: 0x%x\n",
++ __func__, bytes_in_fifo, xiic_rx_space(i2c),
++ xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
++ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
++
++ if (bytes_in_fifo > xiic_rx_space(i2c))
++ bytes_in_fifo = xiic_rx_space(i2c);
++
++ for (i = 0; i < bytes_in_fifo; i++)
++ i2c->rx_msg->buf[i2c->rx_pos++] =
++ xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
++
++ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET,
++ (xiic_rx_space(i2c) > IIC_RX_FIFO_DEPTH) ?
++ IIC_RX_FIFO_DEPTH - 1 : xiic_rx_space(i2c) - 1);
++}
++
++static int xiic_tx_fifo_space(struct xiic_i2c *i2c)
++{
++ /* return the actual space left in the FIFO */
++ return IIC_TX_FIFO_DEPTH - xiic_getreg8(i2c, XIIC_TFO_REG_OFFSET) - 1;
++}
++
++static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
++{
++ u8 fifo_space = xiic_tx_fifo_space(i2c);
++ int len = xiic_tx_space(i2c);
++
++ len = (len > fifo_space) ? fifo_space : len;
++
++ dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n",
++ __func__, len, fifo_space);
++
++ while (len--) {
++ u16 data = i2c->tx_msg->buf[i2c->tx_pos++];
++ if ((xiic_tx_space(i2c) == 0) && (i2c->nmsgs == 1)) {
++ /* last message in transfer -> STOP */
++ data |= XIIC_TX_DYN_STOP_MASK;
++ dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
++
++ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
++ } else
++ xiic_setreg8(i2c, XIIC_DTR_REG_OFFSET, data);
++ }
++}
++
++static void xiic_wakeup(struct xiic_i2c *i2c, int code)
++{
++ i2c->tx_msg = NULL;
++ i2c->rx_msg = NULL;
++ i2c->nmsgs = 0;
++ i2c->state = code;
++ wake_up(&i2c->wait);
++}
++
++static void xiic_process(struct xiic_i2c *i2c)
++{
++ u32 pend, isr, ier;
++ u32 clr = 0;
++
++ /* Get the interrupt Status from the IPIF. There is no clearing of
++ * interrupts in the IPIF. Interrupts must be cleared at the source.
++ * To find which interrupts are pending; AND interrupts pending with
++ * interrupts masked.
++ */
++ isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
++ ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
++ pend = isr & ier;
++
++ dev_dbg(i2c->adap.dev.parent, "%s entry, IER: 0x%x, ISR: 0x%x, "
++ "pend: 0x%x, SR: 0x%x, msg: %p, nmsgs: %d\n",
++ __func__, ier, isr, pend, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
++ i2c->tx_msg, i2c->nmsgs);
++
++ /* Do not processes a devices interrupts if the device has no
++ * interrupts pending
++ */
++ if (!pend)
++ return;
++
++ /* Service requesting interrupt */
++ if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
++ ((pend & XIIC_INTR_TX_ERROR_MASK) &&
++ !(pend & XIIC_INTR_RX_FULL_MASK))) {
++ /* bus arbritration lost, or...
++ * Transmit error _OR_ RX completed
++ * if this happens when RX_FULL is not set
++ * this is probably a TX error
++ */
++
++ dev_dbg(i2c->adap.dev.parent, "%s error\n", __func__);
++
++ /* dynamic mode seem to suffer from problems if we just flushes
++ * fifos and the next message is a TX with len 0 (only addr)
++ * reset the IP instead of just flush fifos
++ */
++ xiic_reinit(i2c);
++
++ if (i2c->tx_msg)
++ xiic_wakeup(i2c, STATE_ERROR);
++
++ } else if (pend & XIIC_INTR_RX_FULL_MASK) {
++ /* Receive register/FIFO is full */
++
++ clr = XIIC_INTR_RX_FULL_MASK;
++ if (!i2c->rx_msg) {
++ dev_dbg(i2c->adap.dev.parent,
++ "%s unexpexted RX IRQ\n", __func__);
++ xiic_clear_rx_fifo(i2c);
++ goto out;
++ }
++
++ xiic_read_rx(i2c);
++ if (xiic_rx_space(i2c) == 0) {
++ /* this is the last part of the message */
++ i2c->rx_msg = NULL;
++
++ /* also clear TX error if there (RX complete) */
++ clr |= (isr & XIIC_INTR_TX_ERROR_MASK);
++
++ dev_dbg(i2c->adap.dev.parent,
++ "%s end of message, nmsgs: %d\n",
++ __func__, i2c->nmsgs);
++
++ /* send next message if this wasn't the last,
++ * otherwise the transfer will be finialise when
++ * receiving the bus not busy interrupt
++ */
++ if (i2c->nmsgs > 1) {
++ i2c->nmsgs--;
++ i2c->tx_msg++;
++ dev_dbg(i2c->adap.dev.parent,
++ "%s will start next...\n", __func__);
++
++ __xiic_start_xfer(i2c);
++ }
++ }
++ } else if (pend & XIIC_INTR_BNB_MASK) {
++ /* IIC bus has transitioned to not busy */
++ clr = XIIC_INTR_BNB_MASK;
++
++ /* The bus is not busy, disable BusNotBusy interrupt */
++ xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);
++
++ if (!i2c->tx_msg)
++ goto out;
++
++ if ((i2c->nmsgs == 1) && !i2c->rx_msg &&
++ xiic_tx_space(i2c) == 0)
++ xiic_wakeup(i2c, STATE_DONE);
++ else
++ xiic_wakeup(i2c, STATE_ERROR);
++
++ } else if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
++ /* Transmit register/FIFO is empty or ½ empty */
++
++ clr = pend &
++ (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK);
++
++ if (!i2c->tx_msg) {
++ dev_dbg(i2c->adap.dev.parent,
++ "%s unexpexted TX IRQ\n", __func__);
++ goto out;
++ }
++
++ xiic_fill_tx_fifo(i2c);
++
++ /* current message sent and there is space in the fifo */
++ if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) {
++ dev_dbg(i2c->adap.dev.parent,
++ "%s end of message sent, nmsgs: %d\n",
++ __func__, i2c->nmsgs);
++ if (i2c->nmsgs > 1) {
++ i2c->nmsgs--;
++ i2c->tx_msg++;
++ __xiic_start_xfer(i2c);
++ } else {
++ xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
++
++ dev_dbg(i2c->adap.dev.parent,
++ "%s Got TX IRQ but no more to do...\n",
++ __func__);
++ }
++ } else if (!xiic_tx_space(i2c) && (i2c->nmsgs == 1))
++ /* current frame is sent and is last,
++ * make sure to disable tx half
++ */
++ xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
++ } else {
++ /* got IRQ which is not acked */
++ dev_err(i2c->adap.dev.parent, "%s Got unexpected IRQ\n",
++ __func__);
++ clr = pend;
++ }
++out:
++ dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr);
++
++ xiic_setreg32(i2c, XIIC_IISR_OFFSET, clr);
++}
++
++static int xiic_bus_busy(struct xiic_i2c *i2c)
++{
++ u8 sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
++
++ return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0;
++}
++
++static int xiic_busy(struct xiic_i2c *i2c)
++{
++ int tries = 3;
++ int err;
++
++ if (i2c->tx_msg)
++ return -EBUSY;
++
++ /* for instance if previous transfer was terminated due to TX error
++ * it might be that the bus is on it's way to become available
++ * give it at most 3 ms to wake
++ */
++ err = xiic_bus_busy(i2c);
++ while (err && tries--) {
++ mdelay(1);
++ err = xiic_bus_busy(i2c);
++ }
++
++ return err;
++}
++
++static void xiic_start_recv(struct xiic_i2c *i2c)
++{
++ u8 rx_watermark;
++ struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
++
++ /* Clear and enable Rx full interrupt. */
++ xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
++
++ /* we want to get all but last byte, because the TX_ERROR IRQ is used
++ * to inidicate error ACK on the address, and negative ack on the last
++ * received byte, so to not mix them receive all but last.
++ * In the case where there is only one byte to receive
++ * we can check if ERROR and RX full is set at the same time
++ */
++ rx_watermark = msg->len;
++ if (rx_watermark > IIC_RX_FIFO_DEPTH)
++ rx_watermark = IIC_RX_FIFO_DEPTH;
++ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
++
++ if (!(msg->flags & I2C_M_NOSTART))
++ /* write the address */
++ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
++ (msg->addr << 1) | XIIC_READ_OPERATION |
++ XIIC_TX_DYN_START_MASK);
++
++ xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
++
++ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
++ msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
++ if (i2c->nmsgs == 1)
++ /* very last, enable bus not busy as well */
++ xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
++
++ /* the message is tx:ed */
++ i2c->tx_pos = msg->len;
++}
++
++static void xiic_start_send(struct xiic_i2c *i2c)
++{
++ struct i2c_msg *msg = i2c->tx_msg;
++
++ xiic_irq_clr(i2c, XIIC_INTR_TX_ERROR_MASK);
++
++ dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d, "
++ "ISR: 0x%x, CR: 0x%x\n",
++ __func__, msg, msg->len, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
++ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
++
++ if (!(msg->flags & I2C_M_NOSTART)) {
++ /* write the address */
++ u16 data = ((msg->addr << 1) & 0xfe) | XIIC_WRITE_OPERATION |
++ XIIC_TX_DYN_START_MASK;
++ if ((i2c->nmsgs == 1) && msg->len == 0)
++ /* no data and last message -> add STOP */
++ data |= XIIC_TX_DYN_STOP_MASK;
++
++ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
++ }
++
++ xiic_fill_tx_fifo(i2c);
++
++ /* Clear any pending Tx empty, Tx Error and then enable them. */
++ xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_ERROR_MASK |
++ XIIC_INTR_BNB_MASK);
++}
++
++static irqreturn_t xiic_isr(int irq, void *dev_id)
++{
++ struct xiic_i2c *i2c = dev_id;
++
++ spin_lock(&i2c->lock);
++ /* disable interrupts globally */
++ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, 0);
++
++ dev_dbg(i2c->adap.dev.parent, "%s entry\n", __func__);
++
++ xiic_process(i2c);
++
++ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
++ spin_unlock(&i2c->lock);
++
++ return IRQ_HANDLED;
++}
++
++static void __xiic_start_xfer(struct xiic_i2c *i2c)
++{
++ int first = 1;
++ int fifo_space = xiic_tx_fifo_space(i2c);
++ dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n",
++ __func__, i2c->tx_msg, fifo_space);
++
++ if (!i2c->tx_msg)
++ return;
++
++ i2c->rx_pos = 0;
++ i2c->tx_pos = 0;
++ i2c->state = STATE_START;
++ while ((fifo_space >= 2) && (first || (i2c->nmsgs > 1))) {
++ if (!first) {
++ i2c->nmsgs--;
++ i2c->tx_msg++;
++ i2c->tx_pos = 0;
++ } else
++ first = 0;
++
++ if (i2c->tx_msg->flags & I2C_M_RD) {
++ /* we dont date putting several reads in the FIFO */
++ xiic_start_recv(i2c);
++ return;
++ } else {
++ xiic_start_send(i2c);
++ if (xiic_tx_space(i2c) != 0) {
++ /* the message could not be completely sent */
++ break;
++ }
++ }
++
++ fifo_space = xiic_tx_fifo_space(i2c);
++ }
++
++ /* there are more messages or the current one could not be completely
++ * put into the FIFO, also enable the half empty interrupt
++ */
++ if (i2c->nmsgs > 1 || xiic_tx_space(i2c))
++ xiic_irq_clr_en(i2c, XIIC_INTR_TX_HALF_MASK);
++
++}
++
++static void xiic_start_xfer(struct xiic_i2c *i2c)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&i2c->lock, flags);
++ xiic_reinit(i2c);
++ /* disable interrupts globally */
++ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, 0);
++ spin_unlock_irqrestore(&i2c->lock, flags);
++
++ __xiic_start_xfer(i2c);
++ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
++}
++
++static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
++{
++ struct xiic_i2c *i2c = i2c_get_adapdata(adap);
++ int err;
++
++ dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
++ xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
++
++ err = xiic_busy(i2c);
++ if (err)
++ return err;
++
++ i2c->tx_msg = msgs;
++ i2c->nmsgs = num;
++
++ xiic_start_xfer(i2c);
++
++ if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
++ (i2c->state == STATE_DONE), HZ))
++ return (i2c->state == STATE_DONE) ? num : -EIO;
++ else {
++ i2c->tx_msg = NULL;
++ i2c->rx_msg = NULL;
++ i2c->nmsgs = 0;
++ return -ETIMEDOUT;
++ }
++}
++
++static u32 xiic_func(struct i2c_adapter *adap)
++{
++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
++}
++
++static const struct i2c_algorithm xiic_algorithm = {
++ .master_xfer = xiic_xfer,
++ .functionality = xiic_func,
++};
++
++static struct i2c_adapter xiic_adapter = {
++ .owner = THIS_MODULE,
++ .name = DRIVER_NAME,
++ .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
++ .algo = &xiic_algorithm,
++};
++
++
++static int __devinit xiic_i2c_probe(struct platform_device *pdev)
++{
++ struct xiic_i2c *i2c;
++ struct xiic_i2c_platform_data *pdata;
++ struct resource *res;
++ int ret, irq;
++ u8 i;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res)
++ goto resource_missing;
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0)
++ goto resource_missing;
++
++ pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
++ if (!pdata)
++ return -EINVAL;
++
++ i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
++ if (!i2c)
++ return -ENOMEM;
++
++ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
++ dev_err(&pdev->dev, "Memory region busy\n");
++ ret = -EBUSY;
++ goto request_mem_failed;
++ }
++
++ i2c->base = ioremap(res->start, resource_size(res));
++ if (!i2c->base) {
++ dev_err(&pdev->dev, "Unable to map registers\n");
++ ret = -EIO;
++ goto map_failed;
++ }
++
++ /* hook up driver to tree */
++ platform_set_drvdata(pdev, i2c);
++ i2c->adap = xiic_adapter;
++ i2c_set_adapdata(&i2c->adap, i2c);
++ i2c->adap.dev.parent = &pdev->dev;
++
++ xiic_reinit(i2c);
++
++ spin_lock_init(&i2c->lock);
++ init_waitqueue_head(&i2c->wait);
++ ret = request_irq(irq, xiic_isr, 0, pdev->name, i2c);
++ if (ret) {
++ dev_err(&pdev->dev, "Cannot claim IRQ\n");
++ goto request_irq_failed;
++ }
++
++ /* add i2c adapter to i2c tree */
++ ret = i2c_add_adapter(&i2c->adap);
++ if (ret) {
++ dev_err(&pdev->dev, "Failed to add adapter\n");
++ goto add_adapter_failed;
++ }
++
++ /* add in known devices to the bus */
++ for (i = 0; i < pdata->num_devices; i++)
++ i2c_new_device(&i2c->adap, pdata->devices + i);
++
++ return 0;
++
++add_adapter_failed:
++ free_irq(irq, i2c);
++request_irq_failed:
++ xiic_deinit(i2c);
++ iounmap(i2c->base);
++map_failed:
++ release_mem_region(res->start, resource_size(res));
++request_mem_failed:
++ kfree(i2c);
++
++ return ret;
++resource_missing:
++ dev_err(&pdev->dev, "IRQ or Memory resource is missing\n");
++ return -ENOENT;
++}
++
++static int __devexit xiic_i2c_remove(struct platform_device* pdev)
++{
++ struct xiic_i2c *i2c = platform_get_drvdata(pdev);
++ struct resource *res;
++
++ /* remove adapter & data */
++ i2c_del_adapter(&i2c->adap);
++
++ xiic_deinit(i2c);
++
++ platform_set_drvdata(pdev, NULL);
++
++ free_irq(platform_get_irq(pdev, 0), i2c);
++
++ iounmap(i2c->base);
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (res)
++ release_mem_region(res->start, resource_size(res));
++
++ kfree(i2c);
++
++ return 0;
++}
++
++
++/* work with hotplug and coldplug */
++MODULE_ALIAS("platform:"DRIVER_NAME);
++
++static struct platform_driver xiic_i2c_driver = {
++ .probe = xiic_i2c_probe,
++ .remove = __devexit_p(xiic_i2c_remove),
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = DRIVER_NAME,
++ },
++};
++
++static int __init xiic_i2c_init(void)
++{
++ return platform_driver_register(&xiic_i2c_driver);
++}
++
++static void __exit xiic_i2c_exit(void)
++{
++ platform_driver_unregister(&xiic_i2c_driver);
++}
++
++module_init(xiic_i2c_init);
++module_exit(xiic_i2c_exit);
++
++MODULE_AUTHOR("info@mocean-labs.com");
++MODULE_DESCRIPTION("Xilinx I2C bus driver");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
+index 7ef0d14..62a1696 100644
+--- a/drivers/input/touchscreen/tsc2007.c
++++ b/drivers/input/touchscreen/tsc2007.c
+@@ -29,6 +29,7 @@
+
+ #define TS_POLL_DELAY 1 /* ms delay between samples */
+ #define TS_POLL_PERIOD 1 /* ms delay between samples */
++#define TS_SLOW_POLL_PERIOD 20 /* ms delay between pen down check */
+
+ #define TSC2007_MEASURE_TEMP0 (0x0 << 4)
+ #define TSC2007_MEASURE_AUX (0x2 << 4)
+@@ -77,6 +78,7 @@ struct tsc2007 {
+ u16 x_plate_ohms;
+
+ bool pendown;
++ bool ignore_next_irq;
+ int irq;
+
+ int (*get_pendown_state)(void);
+@@ -228,14 +230,39 @@ static void tsc2007_work(struct work_struct *work)
+ if (ts->pendown)
+ schedule_delayed_work(&ts->work,
+ msecs_to_jiffies(TS_POLL_PERIOD));
+- else
+- enable_irq(ts->irq);
++ else {
++ /* if we don't have the get pen down state callback we
++ * ignore the next IRQ because it is provoked when we checked
++ * the touch pressure.
++ * If the user really touches the screen we will get a new
++ * interrupt anyway so it is safe to ignore it
++ *
++ * This is basically implementing this part of the manual:
++ * "In both cases previously listed, it is recommended that
++ * whenever the host writes to the TSC2007, the master
++ * processor masks the interrupt associated to PENIRQ.
++ * This masking prevents false triggering of interrupts when
++ * the PENIRQ line is disabled in the cases previously listed."
++ */
++ if (!ts->get_pendown_state)
++ ts->ignore_next_irq = true;
++ if (ts->irq)
++ enable_irq(ts->irq);
++ else
++ schedule_delayed_work(&ts->work,
++ msecs_to_jiffies(TS_SLOW_POLL_PERIOD));
++ }
+ }
+
+ static irqreturn_t tsc2007_irq(int irq, void *handle)
+ {
+ struct tsc2007 *ts = handle;
+
++ if (ts->ignore_next_irq) {
++ ts->ignore_next_irq = false;
++ return IRQ_HANDLED;
++ }
++
+ if (!ts->get_pendown_state || likely(ts->get_pendown_state())) {
+ disable_irq_nosync(ts->irq);
+ schedule_delayed_work(&ts->work,
+@@ -250,15 +277,18 @@ static irqreturn_t tsc2007_irq(int irq, void *handle)
+
+ static void tsc2007_free_irq(struct tsc2007 *ts)
+ {
+- free_irq(ts->irq, ts);
+- if (cancel_delayed_work_sync(&ts->work)) {
+- /*
+- * Work was pending, therefore we need to enable
+- * IRQ here to balance the disable_irq() done in the
+- * interrupt handler.
+- */
+- enable_irq(ts->irq);
+- }
++ if (ts->irq) {
++ free_irq(ts->irq, ts);
++ if (cancel_delayed_work_sync(&ts->work)) {
++ /*
++ * Work was pending, therefore we need to enable
++ * IRQ here to balance the disable_irq() done in the
++ * interrupt handler.
++ */
++ enable_irq(ts->irq);
++ }
++ } else
++ cancel_delayed_work_sync(&ts->work);
+ }
+
+ static int __devinit tsc2007_probe(struct i2c_client *client,
+@@ -312,12 +342,16 @@ static int __devinit tsc2007_probe(struct i2c_client *client,
+ if (pdata->init_platform_hw)
+ pdata->init_platform_hw();
+
+- err = request_irq(ts->irq, tsc2007_irq, 0,
+- client->dev.driver->name, ts);
+- if (err < 0) {
+- dev_err(&client->dev, "irq %d busy?\n", ts->irq);
+- goto err_free_mem;
+- }
++
++ if (ts->irq) {
++ err = request_irq(ts->irq, tsc2007_irq, 0,
++ client->dev.driver->name, ts);
++ if (err < 0) {
++ dev_err(&client->dev, "irq %d busy?\n", ts->irq);
++ goto err_free_mem;
++ }
++ } else
++ schedule_delayed_work(&ts->work, TS_SLOW_POLL_PERIOD);
+
+ /* Prepare for touch readings - power down ADC and enable PENIRQ */
+ err = tsc2007_xfer(ts, PWRDOWN);
+diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
+index 3f40f37..c542897 100644
+--- a/drivers/media/radio/Kconfig
++++ b/drivers/media/radio/Kconfig
+@@ -417,6 +417,18 @@ config RADIO_TEA5764_XTAL
+ Say Y here if TEA5764 have a 32768 Hz crystal in circuit, say N
+ here if TEA5764 reference frequency is connected in FREQIN.
+
++config RADIO_SAA7706H
++ tristate "SAA7706H Car Radio DSP"
++ depends on I2C && VIDEO_V4L2
++ ---help---
++ Say Y here if you want to use the SAA7706H Car radio Digital
++ Signal Processor, found for instance on the Russellville development
++ board. On the russellville the device is connected to internal
++ timberdale I2C bus.
++
++ To compile this driver as a module, choose M here: the
++ module will be called SAA7706H.
++
+ config RADIO_TEF6862
+ tristate "TEF6862 Car Radio Enhanced Selectivity Tuner"
+ depends on I2C && VIDEO_V4L2
+@@ -429,4 +441,14 @@ config RADIO_TEF6862
+ To compile this driver as a module, choose M here: the
+ module will be called TEF6862.
+
++config RADIO_TIMBERDALE
++ tristate "Enable the Timberdale radio driver"
++ depends on MFD_TIMBERDALE && VIDEO_V4L2 && HAS_IOMEM && I2C
++ select RADIO_TEF6862
++ select RADIO_SAA7706H
++ ---help---
++ This is a kind of umbrella driver for the Radio Tuner and DSP
++ found behind the Timberdale FPGA on the Russellville board.
++ Enabling this driver will automatically select the DSP and tuner.
++
+ endif # RADIO_ADAPTERS
+diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
+index 01922ad..f615583 100644
+--- a/drivers/media/radio/Makefile
++++ b/drivers/media/radio/Makefile
+@@ -23,6 +23,8 @@ obj-$(CONFIG_USB_DSBR) += dsbr100.o
+ obj-$(CONFIG_RADIO_SI470X) += si470x/
+ obj-$(CONFIG_USB_MR800) += radio-mr800.o
+ obj-$(CONFIG_RADIO_TEA5764) += radio-tea5764.o
++obj-$(CONFIG_RADIO_SAA7706H) += saa7706h.o
+ obj-$(CONFIG_RADIO_TEF6862) += tef6862.o
++obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
+
+ EXTRA_CFLAGS += -Isound
+diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
+new file mode 100644
+index 0000000..ee8618a
+--- /dev/null
++++ b/drivers/media/radio/radio-timb.c
+@@ -0,0 +1,464 @@
++/*
++ * radio-timb.c Timberdale FPGA Radio driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/version.h>
++#include <linux/io.h>
++#include <media/v4l2-ioctl.h>
++#include <media/v4l2-device.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/i2c.h>
++#include <media/timb_radio.h>
++
++#define DRIVER_NAME "timb-radio"
++
++#define RDS_BLOCK_SIZE 4
++#define RDS_BUFFER_SIZE (RDS_BLOCK_SIZE * 100)
++
++struct timbradio {
++ struct mutex lock; /* for mutual exclusion */
++ void __iomem *membase;
++ struct timb_radio_platform_data pdata;
++ struct v4l2_subdev *sd_tuner;
++ struct v4l2_subdev *sd_dsp;
++ struct video_device *video_dev;
++ struct v4l2_device v4l2_dev;
++ /* RDS related */
++ int open_count;
++ int rds_irq;
++ wait_queue_head_t read_queue;
++ unsigned char buffer[RDS_BUFFER_SIZE];
++ unsigned int rd_index;
++ unsigned int wr_index;
++};
++
++
++static int timbradio_vidioc_querycap(struct file *file, void *priv,
++ struct v4l2_capability *v)
++{
++ strlcpy(v->driver, DRIVER_NAME, sizeof(v->driver));
++ strlcpy(v->card, "Timberdale Radio", sizeof(v->card));
++ snprintf(v->bus_info, sizeof(v->bus_info), "platform:"DRIVER_NAME);
++ v->version = KERNEL_VERSION(0, 0, 1);
++ v->capabilities =
++ V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
++ return 0;
++}
++
++static int timbradio_vidioc_g_tuner(struct file *file, void *priv,
++ struct v4l2_tuner *v)
++{
++ struct timbradio *tr = video_drvdata(file);
++ return v4l2_subdev_call(tr->sd_tuner, tuner, g_tuner, v);
++}
++
++static int timbradio_vidioc_s_tuner(struct file *file, void *priv,
++ struct v4l2_tuner *v)
++{
++ struct timbradio *tr = video_drvdata(file);
++ return v4l2_subdev_call(tr->sd_tuner, tuner, s_tuner, v);
++}
++
++static int timbradio_vidioc_g_input(struct file *filp, void *priv,
++ unsigned int *i)
++{
++ *i = 0;
++ return 0;
++}
++
++static int timbradio_vidioc_s_input(struct file *filp, void *priv,
++ unsigned int i)
++{
++ return i ? -EINVAL : 0;
++}
++
++static int timbradio_vidioc_g_audio(struct file *file, void *priv,
++ struct v4l2_audio *a)
++{
++ a->index = 0;
++ strlcpy(a->name, "Radio", sizeof(a->name));
++ a->capability = V4L2_AUDCAP_STEREO;
++ return 0;
++}
++
++
++static int timbradio_vidioc_s_audio(struct file *file, void *priv,
++ struct v4l2_audio *a)
++{
++ return a->index ? -EINVAL : 0;
++}
++
++static int timbradio_vidioc_s_frequency(struct file *file, void *priv,
++ struct v4l2_frequency *f)
++{
++ struct timbradio *tr = video_drvdata(file);
++ return v4l2_subdev_call(tr->sd_tuner, tuner, s_frequency, f);
++}
++
++static int timbradio_vidioc_g_frequency(struct file *file, void *priv,
++ struct v4l2_frequency *f)
++{
++ struct timbradio *tr = video_drvdata(file);
++ return v4l2_subdev_call(tr->sd_tuner, tuner, g_frequency, f);
++}
++
++static int timbradio_vidioc_queryctrl(struct file *file, void *priv,
++ struct v4l2_queryctrl *qc)
++{
++ struct timbradio *tr = video_drvdata(file);
++ return v4l2_subdev_call(tr->sd_dsp, core, queryctrl, qc);
++}
++
++static int timbradio_vidioc_g_ctrl(struct file *file, void *priv,
++ struct v4l2_control *ctrl)
++{
++ struct timbradio *tr = video_drvdata(file);
++ return v4l2_subdev_call(tr->sd_dsp, core, g_ctrl, ctrl);
++}
++
++static int timbradio_vidioc_s_ctrl(struct file *file, void *priv,
++ struct v4l2_control *ctrl)
++{
++ struct timbradio *tr = video_drvdata(file);
++ return v4l2_subdev_call(tr->sd_dsp, core, s_ctrl, ctrl);
++}
++
++static const struct v4l2_ioctl_ops timbradio_ioctl_ops = {
++ .vidioc_querycap = timbradio_vidioc_querycap,
++ .vidioc_g_tuner = timbradio_vidioc_g_tuner,
++ .vidioc_s_tuner = timbradio_vidioc_s_tuner,
++ .vidioc_g_frequency = timbradio_vidioc_g_frequency,
++ .vidioc_s_frequency = timbradio_vidioc_s_frequency,
++ .vidioc_g_input = timbradio_vidioc_g_input,
++ .vidioc_s_input = timbradio_vidioc_s_input,
++ .vidioc_g_audio = timbradio_vidioc_g_audio,
++ .vidioc_s_audio = timbradio_vidioc_s_audio,
++ .vidioc_queryctrl = timbradio_vidioc_queryctrl,
++ .vidioc_g_ctrl = timbradio_vidioc_g_ctrl,
++ .vidioc_s_ctrl = timbradio_vidioc_s_ctrl
++};
++
++static irqreturn_t timbradio_irq(int irq, void *devid)
++{
++ struct timbradio *tr = devid;
++ u32 data = ioread32(tr->membase);
++
++ tr->buffer[tr->wr_index++] = data >> 24;
++ tr->buffer[tr->wr_index++] = data >> 16;
++ tr->buffer[tr->wr_index++] = data >> 8;
++ tr->buffer[tr->wr_index++] = data;
++ tr->wr_index %= RDS_BUFFER_SIZE;
++
++ wake_up(&tr->read_queue);
++
++ /* new RDS data received, read it */
++ return IRQ_HANDLED;
++}
++
++/**************************************************************************
++ * File Operations Interface
++ **************************************************************************/
++
++static ssize_t timbradio_rds_fops_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct timbradio *tr = video_drvdata(file);
++ unsigned int outblocks = 0;
++
++ if (count < sizeof(struct v4l2_rds_data))
++ return -EINVAL;
++
++ /* block if no new data available */
++ while (tr->wr_index == tr->rd_index) {
++ if (file->f_flags & O_NONBLOCK)
++ return -EWOULDBLOCK;
++
++ if (wait_event_interruptible(tr->read_queue,
++ tr->wr_index != tr->rd_index))
++ return -EINTR;
++ }
++
++ mutex_lock(&tr->lock);
++ count /= sizeof(struct v4l2_rds_data);
++
++ while (outblocks < count) {
++ struct v4l2_rds_data rds_data;
++
++ rds_data.msb = tr->buffer[tr->rd_index++];
++ rds_data.lsb = tr->buffer[tr->rd_index++];
++ tr->rd_index %= RDS_BUFFER_SIZE;
++
++ rds_data.block = V4L2_RDS_BLOCK_A;
++
++ if (copy_to_user(buf + outblocks * sizeof(rds_data), &rds_data,
++ sizeof(rds_data))) {
++ mutex_unlock(&tr->lock);
++ return -EFAULT;
++ }
++
++ outblocks++;
++
++ if (tr->rd_index == tr->wr_index)
++ break;
++ }
++ mutex_unlock(&tr->lock);
++
++ return outblocks * sizeof(struct v4l2_rds_data);
++}
++
++static unsigned int timbradio_rds_fops_poll(struct file *file,
++ struct poll_table_struct *pts)
++{
++ struct timbradio *tr = video_drvdata(file);
++
++ poll_wait(file, &tr->read_queue, pts);
++
++ if (tr->rd_index != tr->wr_index)
++ return POLLIN | POLLRDNORM;
++
++ return 0;
++}
++
++static int timbradio_rds_fops_open(struct file *file)
++{
++ struct timbradio *tr = video_drvdata(file);
++ struct i2c_adapter *adapt;
++ int err = 0;
++
++ mutex_lock(&tr->lock);
++ if (tr->open_count)
++ goto out;
++
++ /* device currently not open, check if the DSP and tuner is not
++ * yet found, in that case find them
++ */
++
++ /* find the I2C bus */
++ adapt = i2c_get_adapter(tr->pdata.i2c_adapter);
++ if (!adapt) {
++ printk(KERN_ERR DRIVER_NAME": No I2C bus\n");
++ err = -ENODEV;
++ goto out;
++ }
++
++ /* now find the tuner and dsp */
++ if (!tr->sd_dsp)
++ tr->sd_dsp = v4l2_i2c_new_subdev_board(&tr->v4l2_dev, adapt,
++ tr->pdata.dsp.module_name, tr->pdata.dsp.info, NULL);
++
++ if (!tr->sd_tuner)
++ tr->sd_tuner = v4l2_i2c_new_subdev_board(&tr->v4l2_dev, adapt,
++ tr->pdata.tuner.module_name, tr->pdata.tuner.info,
++ NULL);
++
++ i2c_put_adapter(adapt);
++
++ if (!tr->sd_tuner || !tr->sd_dsp) {
++ printk(KERN_ERR DRIVER_NAME
++ ": Failed to get tuner or DSP\n");
++ err = -ENODEV;
++ goto out;
++ }
++
++ /* enable the IRQ for receiving RDS data */
++ err = request_irq(tr->rds_irq, timbradio_irq, 0, DRIVER_NAME, tr);
++out:
++ if (!err)
++ tr->open_count++;
++ mutex_unlock(&tr->lock);
++ return err;
++}
++
++static int timbradio_rds_fops_release(struct file *file)
++{
++ struct timbradio *tr = video_drvdata(file);
++
++ mutex_lock(&tr->lock);
++ tr->open_count--;
++ if (!tr->open_count) {
++ free_irq(tr->rds_irq, tr);
++
++ tr->wr_index = 0;
++ tr->rd_index = 0;
++
++ /* cancel read processes */
++ wake_up_interruptible(&tr->read_queue);
++ }
++ mutex_unlock(&tr->lock);
++
++ return 0;
++}
++
++
++static const struct v4l2_file_operations timbradio_fops = {
++ .owner = THIS_MODULE,
++ .ioctl = video_ioctl2,
++ .read = timbradio_rds_fops_read,
++ .poll = timbradio_rds_fops_poll,
++ .open = timbradio_rds_fops_open,
++ .release = timbradio_rds_fops_release,
++};
++
++static const struct video_device timbradio_template = {
++ .name = "Timberdale Radio",
++ .fops = &timbradio_fops,
++ .ioctl_ops = &timbradio_ioctl_ops,
++ .release = video_device_release_empty,
++ .minor = -1
++};
++
++
++static int __devinit timbradio_probe(struct platform_device *pdev)
++{
++ struct timb_radio_platform_data *pdata = pdev->dev.platform_data;
++ struct timbradio *tr;
++ struct resource *iomem;
++ int irq;
++ int err;
++
++ if (!pdata) {
++ printk(KERN_ERR DRIVER_NAME": Platform data missing\n");
++ err = -EINVAL;
++ goto err;
++ }
++
++ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!iomem) {
++ err = -ENODEV;
++ goto err;
++ }
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0) {
++ err = -ENODEV;
++ goto err;
++ }
++
++ if (!request_mem_region(iomem->start, resource_size(iomem),
++ DRIVER_NAME)) {
++ err = -EBUSY;
++ goto err;
++ }
++
++ tr = kzalloc(sizeof(*tr), GFP_KERNEL);
++ if (!tr) {
++ err = -ENOMEM;
++ goto err_alloc;
++ }
++ mutex_init(&tr->lock);
++
++ tr->membase = ioremap(iomem->start, resource_size(iomem));
++ if (!tr->membase) {
++ err = -ENOMEM;
++ goto err_ioremap;
++ }
++
++ tr->pdata = *pdata;
++
++ tr->video_dev = video_device_alloc();
++ if (!tr->video_dev) {
++ err = -ENOMEM;
++ goto err_video_req;
++ }
++ *tr->video_dev = timbradio_template;
++ tr->rds_irq = irq;
++ init_waitqueue_head(&tr->read_queue);
++
++ strlcpy(tr->v4l2_dev.name, DRIVER_NAME, sizeof(tr->v4l2_dev.name));
++ err = v4l2_device_register(NULL, &tr->v4l2_dev);
++ if (err)
++ goto err_v4l2_dev;
++
++ tr->video_dev->v4l2_dev = &tr->v4l2_dev;
++
++ err = video_register_device(tr->video_dev, VFL_TYPE_RADIO, -1);
++ if (err) {
++ printk(KERN_ALERT DRIVER_NAME": Error reg video\n");
++ goto err_video_req;
++ }
++
++ video_set_drvdata(tr->video_dev, tr);
++
++ platform_set_drvdata(pdev, tr);
++ return 0;
++
++err_video_req:
++ v4l2_device_unregister(&tr->v4l2_dev);
++err_v4l2_dev:
++ if (tr->video_dev->minor != -1)
++ video_unregister_device(tr->video_dev);
++ else
++ video_device_release(tr->video_dev);
++ iounmap(tr->membase);
++err_ioremap:
++ kfree(tr);
++err_alloc:
++ release_mem_region(iomem->start, resource_size(iomem));
++err:
++ printk(KERN_ERR DRIVER_NAME ": Failed to register: %d\n", err);
++
++ return err;
++}
++
++static int __devexit timbradio_remove(struct platform_device *pdev)
++{
++ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ struct timbradio *tr = platform_get_drvdata(pdev);
++
++ if (tr->video_dev->minor != -1)
++ video_unregister_device(tr->video_dev);
++ else
++ video_device_release(tr->video_dev);
++
++ v4l2_device_unregister(&tr->v4l2_dev);
++
++ iounmap(tr->membase);
++ release_mem_region(iomem->start, resource_size(iomem));
++ kfree(tr);
++
++ return 0;
++}
++
++static struct platform_driver timbradio_platform_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = timbradio_probe,
++ .remove = timbradio_remove,
++};
++
++/*--------------------------------------------------------------------------*/
++
++static int __init timbradio_init(void)
++{
++ return platform_driver_register(&timbradio_platform_driver);
++}
++
++static void __exit timbradio_exit(void)
++{
++ platform_driver_unregister(&timbradio_platform_driver);
++}
++
++module_init(timbradio_init);
++module_exit(timbradio_exit);
++
++MODULE_DESCRIPTION("Timberdale Radio driver");
++MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("platform:"DRIVER_NAME);
+diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c
+new file mode 100644
+index 0000000..8bd6725
+--- /dev/null
++++ b/drivers/media/radio/saa7706h.c
+@@ -0,0 +1,451 @@
++/*
++ * saa7706.c Philips SAA7706H Car Radio DSP driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/interrupt.h>
++#include <linux/i2c.h>
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++
++#define DRIVER_NAME "saa7706h"
++
++/* the I2C memory map looks like this
++
++ $1C00 - $FFFF Not Used
++ $2200 - $3FFF Reserved YRAM (DSP2) space
++ $2000 - $21FF YRAM (DSP2)
++ $1FF0 - $1FFF Hardware Registers
++ $1280 - $1FEF Reserved XRAM (DSP2) space
++ $1000 - $127F XRAM (DSP2)
++ $0FFF DSP CONTROL
++ $0A00 - $0FFE Reserved
++ $0980 - $09FF Reserved YRAM (DSP1) space
++ $0800 - $097F YRAM (DSP1)
++ $0200 - $07FF Not Used
++ $0180 - $01FF Reserved XRAM (DSP1) space
++ $0000 - $017F XRAM (DSP1)
++*/
++
++#define SAA7706H_REG_CTRL 0x0fff
++#define SAA7706H_CTRL_BYP_PLL 0x0001
++#define SAA7706H_CTRL_PLL_DIV_MASK 0x003e
++#define SAA7706H_CTRL_PLL3_62975MHZ 0x003e
++#define SAA7706H_CTRL_DSP_TURBO 0x0040
++#define SAA7706H_CTRL_PC_RESET_DSP1 0x0080
++#define SAA7706H_CTRL_PC_RESET_DSP2 0x0100
++#define SAA7706H_CTRL_DSP1_ROM_EN_MASK 0x0600
++#define SAA7706H_CTRL_DSP1_FUNC_PROM 0x0000
++#define SAA7706H_CTRL_DSP2_ROM_EN_MASK 0x1800
++#define SAA7706H_CTRL_DSP2_FUNC_PROM 0x0000
++#define SAA7706H_CTRL_DIG_SIL_INTERPOL 0x8000
++
++#define SAA7706H_REG_EVALUATION 0x1ff0
++#define SAA7706H_EVAL_DISABLE_CHARGE_PUMP 0x000001
++#define SAA7706H_EVAL_DCS_CLOCK 0x000002
++#define SAA7706H_EVAL_GNDRC1_ENABLE 0x000004
++#define SAA7706H_EVAL_GNDRC2_ENABLE 0x000008
++
++#define SAA7706H_REG_CL_GEN1 0x1ff3
++#define SAA7706H_CL_GEN1_MIN_LOOPGAIN_MASK 0x00000f
++#define SAA7706H_CL_GEN1_LOOPGAIN_MASK 0x0000f0
++#define SAA7706H_CL_GEN1_COARSE_RATION 0xffff00
++
++#define SAA7706H_REG_CL_GEN2 0x1ff4
++#define SAA7706H_CL_GEN2_WSEDGE_FALLING 0x000001
++#define SAA7706H_CL_GEN2_STOP_VCO 0x000002
++#define SAA7706H_CL_GEN2_FRERUN 0x000004
++#define SAA7706H_CL_GEN2_ADAPTIVE 0x000008
++#define SAA7706H_CL_GEN2_FINE_RATIO_MASK 0x0ffff0
++
++#define SAA7706H_REG_CL_GEN4 0x1ff6
++#define SAA7706H_CL_GEN4_BYPASS_PLL1 0x001000
++#define SAA7706H_CL_GEN4_PLL1_DIV_MASK 0x03e000
++#define SAA7706H_CL_GEN4_DSP1_TURBO 0x040000
++
++#define SAA7706H_REG_SEL 0x1ff7
++#define SAA7706H_SEL_DSP2_SRCA_MASK 0x000007
++#define SAA7706H_SEL_DSP2_FMTA_MASK 0x000031
++#define SAA7706H_SEL_DSP2_SRCB_MASK 0x0001c0
++#define SAA7706H_SEL_DSP2_FMTB_MASK 0x000e00
++#define SAA7706H_SEL_DSP1_SRC_MASK 0x003000
++#define SAA7706H_SEL_DSP1_FMT_MASK 0x01c003
++#define SAA7706H_SEL_SPDIF2 0x020000
++#define SAA7706H_SEL_HOST_IO_FMT_MASK 0x1c0000
++#define SAA7706H_SEL_EN_HOST_IO 0x200000
++
++#define SAA7706H_REG_IAC 0x1ff8
++#define SAA7706H_REG_CLK_SET 0x1ff9
++#define SAA7706H_REG_CLK_COEFF 0x1ffa
++#define SAA7706H_REG_INPUT_SENS 0x1ffb
++#define SAA7706H_INPUT_SENS_RDS_VOL_MASK 0x0003f
++#define SAA7706H_INPUT_SENS_FM_VOL_MASK 0x00fc0
++#define SAA7706H_INPUT_SENS_FM_MPX 0x01000
++#define SAA7706H_INPUT_SENS_OFF_FILTER_A_EN 0x02000
++#define SAA7706H_INPUT_SENS_OFF_FILTER_B_EN 0x04000
++#define SAA7706H_REG_PHONE_NAV_AUDIO 0x1ffc
++#define SAA7706H_REG_IO_CONF_DSP2 0x1ffd
++#define SAA7706H_REG_STATUS_DSP2 0x1ffe
++#define SAA7706H_REG_PC_DSP2 0x1fff
++
++#define SAA7706H_DSP1_MOD0 0x0800
++#define SAA7706H_DSP1_ROM_VER 0x097f
++#define SAA7706H_DSP2_MPTR0 0x1000
++
++#define SAA7706H_DSP1_MODPNTR 0x0000
++
++#define SAA7706H_DSP2_XMEM_CONTLLCW 0x113e
++#define SAA7706H_DSP2_XMEM_BUSAMP 0x114a
++#define SAA7706H_DSP2_XMEM_FDACPNTR 0x11f9
++#define SAA7706H_DSP2_XMEM_IIS1PNTR 0x11fb
++
++#define SAA7706H_DSP2_YMEM_PVGA 0x212a
++#define SAA7706H_DSP2_YMEM_PVAT1 0x212b
++#define SAA7706H_DSP2_YMEM_PVAT 0x212c
++#define SAA7706H_DSP2_YMEM_ROM_VER 0x21ff
++
++#define SUPPORTED_DSP1_ROM_VER 0x667
++
++struct saa7706h_state {
++ struct v4l2_subdev sd;
++ unsigned muted;
++};
++
++static inline struct saa7706h_state *to_state(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct saa7706h_state, sd);
++}
++
++static int saa7706h_i2c_send(struct i2c_client *client, const u8 *data, int len)
++{
++ int err = i2c_master_send(client, data, len);
++ if (err == len)
++ return 0;
++ return err > 0 ? -EIO : err;
++}
++
++static int saa7706h_i2c_transfer(struct i2c_client *client,
++ struct i2c_msg *msgs, int num)
++{
++ int err = i2c_transfer(client->adapter, msgs, num);
++ if (err == num)
++ return 0;
++ return err > 0 ? -EIO : err;
++}
++
++static int saa7706h_set_reg24(struct v4l2_subdev *sd, u16 reg, u32 val)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u8 buf[5];
++ int pos = 0;
++
++ buf[pos++] = reg >> 8;
++ buf[pos++] = reg;
++ buf[pos++] = val >> 16;
++ buf[pos++] = val >> 8;
++ buf[pos++] = val;
++
++ return saa7706h_i2c_send(client, buf, pos);
++}
++
++static int saa7706h_set_reg24_err(struct v4l2_subdev *sd, u16 reg, u32 val,
++ int *err)
++{
++ return *err ? *err : saa7706h_set_reg24(sd, reg, val);
++}
++
++static int saa7706h_set_reg16(struct v4l2_subdev *sd, u16 reg, u16 val)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u8 buf[4];
++ int pos = 0;
++
++ buf[pos++] = reg >> 8;
++ buf[pos++] = reg;
++ buf[pos++] = val >> 8;
++ buf[pos++] = val;
++
++ return saa7706h_i2c_send(client, buf, pos);
++}
++
++static int saa7706h_set_reg16_err(struct v4l2_subdev *sd, u16 reg, u16 val,
++ int *err)
++{
++ return *err ? *err : saa7706h_set_reg16(sd, reg, val);
++}
++
++static int saa7706h_get_reg16(struct v4l2_subdev *sd, u16 reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u8 buf[2];
++ int err;
++ u8 regaddr[] = {reg >> 8, reg};
++ struct i2c_msg msg[] = { {client->addr, 0, sizeof(regaddr), regaddr},
++ {client->addr, I2C_M_RD, sizeof(buf), buf} };
++
++ err = saa7706h_i2c_transfer(client, msg, ARRAY_SIZE(msg));
++ if (err)
++ return err;
++
++ return buf[0] << 8 | buf[1];
++}
++
++static int saa7706h_unmute(struct v4l2_subdev *sd)
++{
++ struct saa7706h_state *state = to_state(sd);
++ int err = 0;
++
++ err = saa7706h_set_reg16_err(sd, SAA7706H_REG_CTRL,
++ SAA7706H_CTRL_PLL3_62975MHZ | SAA7706H_CTRL_PC_RESET_DSP1 |
++ SAA7706H_CTRL_PC_RESET_DSP2, &err);
++
++ /* newer versions of the chip requires a small sleep after reset */
++ msleep(1);
++
++ err = saa7706h_set_reg16_err(sd, SAA7706H_REG_CTRL,
++ SAA7706H_CTRL_PLL3_62975MHZ, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_EVALUATION, 0, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_CL_GEN1, 0x040022, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_CL_GEN2,
++ SAA7706H_CL_GEN2_WSEDGE_FALLING, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_CL_GEN4, 0x024080, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_SEL, 0x200080, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_IAC, 0xf4caed, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_CLK_SET, 0x124334, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_CLK_COEFF, 0x004a1a,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_INPUT_SENS, 0x0071c7,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_PHONE_NAV_AUDIO,
++ 0x0e22ff, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_IO_CONF_DSP2, 0x001ff8,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_STATUS_DSP2, 0x080003,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_PC_DSP2, 0x000004, &err);
++
++ err = saa7706h_set_reg16_err(sd, SAA7706H_DSP1_MOD0, 0x0c6c, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP2_MPTR0, 0x000b4b, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP1_MODPNTR, 0x000600, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP1_MODPNTR, 0x0000c0, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP2_XMEM_CONTLLCW, 0x000819,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP2_XMEM_CONTLLCW, 0x00085a,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP2_XMEM_BUSAMP, 0x7fffff,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP2_XMEM_FDACPNTR, 0x2000cb,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP2_XMEM_IIS1PNTR, 0x2000cb,
++ &err);
++
++ err = saa7706h_set_reg16_err(sd, SAA7706H_DSP2_YMEM_PVGA, 0x0f80, &err);
++
++ err = saa7706h_set_reg16_err(sd, SAA7706H_DSP2_YMEM_PVAT1, 0x0800,
++ &err);
++
++ err = saa7706h_set_reg16_err(sd, SAA7706H_DSP2_YMEM_PVAT, 0x0800, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP2_XMEM_CONTLLCW, 0x000905,
++ &err);
++ if (!err)
++ state->muted = 0;
++ return err;
++}
++
++static int saa7706h_mute(struct v4l2_subdev *sd)
++{
++ struct saa7706h_state *state = to_state(sd);
++ int err;
++
++ err = saa7706h_set_reg16(sd, SAA7706H_REG_CTRL,
++ SAA7706H_CTRL_PLL3_62975MHZ | SAA7706H_CTRL_PC_RESET_DSP1 |
++ SAA7706H_CTRL_PC_RESET_DSP2);
++ if (!err)
++ state->muted = 1;
++ return err;
++}
++
++static int saa7706h_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
++{
++ switch (qc->id) {
++ case V4L2_CID_AUDIO_MUTE:
++ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
++ }
++ return -EINVAL;
++}
++
++static int saa7706h_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct saa7706h_state *state = to_state(sd);
++
++ switch (ctrl->id) {
++ case V4L2_CID_AUDIO_MUTE:
++ ctrl->value = state->muted;
++ return 0;
++ }
++ return -EINVAL;
++}
++
++static int saa7706h_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ switch (ctrl->id) {
++ case V4L2_CID_AUDIO_MUTE:
++ if (ctrl->value)
++ return saa7706h_mute(sd);
++ return saa7706h_unmute(sd);
++ }
++ return -EINVAL;
++}
++
++static int saa7706h_g_chip_ident(struct v4l2_subdev *sd,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_SAA7706H, 0);
++}
++
++static const struct v4l2_subdev_core_ops saa7706h_core_ops = {
++ .g_chip_ident = saa7706h_g_chip_ident,
++ .queryctrl = saa7706h_queryctrl,
++ .g_ctrl = saa7706h_g_ctrl,
++ .s_ctrl = saa7706h_s_ctrl,
++};
++
++static const struct v4l2_subdev_ops saa7706h_ops = {
++ .core = &saa7706h_core_ops,
++};
++
++/*
++ * Generic i2c probe
++ * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
++ */
++
++static int __devinit saa7706h_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct saa7706h_state *state;
++ struct v4l2_subdev *sd;
++ int err;
++
++ /* Check if the adapter supports the needed features */
++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
++ return -EIO;
++
++ v4l_info(client, "chip found @ 0x%02x (%s)\n",
++ client->addr << 1, client->adapter->name);
++
++ state = kmalloc(sizeof(struct saa7706h_state), GFP_KERNEL);
++ if (state == NULL)
++ return -ENOMEM;
++ sd = &state->sd;
++ v4l2_i2c_subdev_init(sd, client, &saa7706h_ops);
++
++ /* check the rom versions */
++ err = saa7706h_get_reg16(sd, SAA7706H_DSP1_ROM_VER);
++ if (err < 0)
++ goto err;
++ if (err != SUPPORTED_DSP1_ROM_VER)
++ v4l2_warn(sd, "Unknown DSP1 ROM code version: 0x%x\n", err);
++
++ state->muted = 1;
++
++ /* startup in a muted state */
++ err = saa7706h_mute(sd);
++ if (err)
++ goto err;
++
++ return 0;
++
++err:
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_state(sd));
++
++ printk(KERN_ERR DRIVER_NAME ": Failed to probe: %d\n", err);
++
++ return err;
++}
++
++static int __devexit saa7706h_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ saa7706h_mute(sd);
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_state(sd));
++ return 0;
++}
++
++static const struct i2c_device_id saa7706h_id[] = {
++ {DRIVER_NAME, 0},
++ {},
++};
++
++MODULE_DEVICE_TABLE(i2c, saa7706h_id);
++
++static struct i2c_driver saa7706h_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = DRIVER_NAME,
++ },
++ .probe = saa7706h_probe,
++ .remove = saa7706h_remove,
++ .id_table = saa7706h_id,
++};
++
++static __init int saa7706h_init(void)
++{
++ return i2c_add_driver(&saa7706h_driver);
++}
++
++static __exit void saa7706h_exit(void)
++{
++ i2c_del_driver(&saa7706h_driver);
++}
++
++module_init(saa7706h_init);
++module_exit(saa7706h_exit);
++
++MODULE_DESCRIPTION("SAA7706H Car Radio DSP driver");
++MODULE_AUTHOR("Mocean Laboratories");
++MODULE_LICENSE("GPL v2");
++
+diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
+index 2f83be7..51aa8f5 100644
+--- a/drivers/media/video/Kconfig
++++ b/drivers/media/video/Kconfig
+@@ -923,6 +923,14 @@ config VIDEO_OMAP2
+ ---help---
+ This is a v4l2 driver for the TI OMAP2 camera capture interface
+
++config VIDEO_TIMBERDALE
++ tristate "Support for timberdale Video In/LogiWIN"
++ depends on VIDEO_V4L2 && I2C
++ select TIMB_DMA
++ select VIDEO_ADV7180
++ ---help---
++ Add support for the Video In peripherial of the timberdale FPGA.
++
+ #
+ # USB Multimedia device configuration
+ #
+diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
+index 2af68ee..58ece5c 100644
+--- a/drivers/media/video/Makefile
++++ b/drivers/media/video/Makefile
+@@ -162,6 +162,8 @@ obj-$(CONFIG_ARCH_DAVINCI) += davinci/
+
+ obj-$(CONFIG_VIDEO_AU0828) += au0828/
+
++obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o
++
+ obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
+ obj-$(CONFIG_VIDEO_SAA7164) += saa7164/
+
+diff --git a/drivers/media/video/adv7180.c b/drivers/media/video/adv7180.c
+index 0826f0d..eb8e32c 100644
+--- a/drivers/media/video/adv7180.c
++++ b/drivers/media/video/adv7180.c
+@@ -90,6 +90,9 @@
+ #define ADV7180_IMR3_ADI 0x4C
+ #define ADV7180_IMR4_ADI 0x50
+
++#define ADV7180_NTSC_V_BIT_END_REG 0xE6
++#define ADV7180_NTSC_V_BIT_END_MANUAL_NVEND 0x4F
++
+ struct adv7180_state {
+ struct v4l2_subdev sd;
+ struct work_struct work;
+@@ -348,6 +351,14 @@ static __devinit int adv7180_probe(struct i2c_client *client,
+ if (ret < 0)
+ goto err_unreg_subdev;
+
++
++ /* Manually set V bit end position in NTSC mode */
++ ret = i2c_smbus_write_byte_data(client,
++ ADV7180_NTSC_V_BIT_END_REG,
++ ADV7180_NTSC_V_BIT_END_MANUAL_NVEND);
++ if (ret < 0)
++ goto err_unreg_subdev;
++
+ /* read current norm */
+ __adv7180_status(client, NULL, &state->curr_norm);
+
+diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
+new file mode 100644
+index 0000000..b232f61
+--- /dev/null
++++ b/drivers/media/video/timblogiw.c
+@@ -0,0 +1,1038 @@
++/*
++ * timblogiw.c timberdale FPGA LogiWin Video In driver
++ * Copyright (c) 2009-2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA LogiWin Video In
++ */
++
++#include <linux/list.h>
++#include <linux/version.h>
++#include <linux/dma-mapping.h>
++#include <media/v4l2-ioctl.h>
++#include <media/v4l2-device.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include "timblogiw.h"
++#include <linux/i2c.h>
++
++#define DRIVER_NAME "timb-video"
++
++#define TIMBLOGIW_CTRL 0x40
++
++#define TIMBLOGIW_H_SCALE 0x20
++#define TIMBLOGIW_V_SCALE 0x28
++
++#define TIMBLOGIW_X_CROP 0x58
++#define TIMBLOGIW_Y_CROP 0x60
++
++#define TIMBLOGIW_W_CROP 0x00
++#define TIMBLOGIW_H_CROP 0x08
++
++#define TIMBLOGIW_VERSION_CODE 0x03
++
++#define TIMBLOGIW_BUF 0x04
++#define TIMBLOGIW_TBI 0x2c
++#define TIMBLOGIW_BPL 0x30
++
++#define BYTES_PER_LINE (720 * 2)
++
++#define DMA_BUFFER_SIZE (BYTES_PER_LINE * 576)
++
++#define TIMBLOGIW_VIDEO_FORMAT V4L2_PIX_FMT_UYVY
++
++#define TIMBLOGIW_HAS_DECODER(lw) (lw->pdata.encoder.module_name)
++
++static void timblogiw_release_buffers(struct timblogiw *lw);
++
++const struct timblogiw_tvnorm timblogiw_tvnorms[] = {
++ {
++ .std = V4L2_STD_PAL,
++ .width = 720,
++ .height = 576
++ },
++ {
++ .std = V4L2_STD_NTSC,
++ .width = 720,
++ .height = 480
++ }
++};
++
++static int timblogiw_bytes_per_line(const struct timblogiw_tvnorm *norm)
++{
++ return norm->width * 2;
++}
++
++
++static int timblogiw_frame_size(const struct timblogiw_tvnorm *norm)
++{
++ return norm->height * timblogiw_bytes_per_line(norm);
++}
++
++static const struct timblogiw_tvnorm *timblogiw_get_norm(const v4l2_std_id std)
++{
++ int i;
++ for (i = 0; i < ARRAY_SIZE(timblogiw_tvnorms); i++)
++ if (timblogiw_tvnorms[i].std & std)
++ return timblogiw_tvnorms + i;
++
++ /* default to first element */
++ return timblogiw_tvnorms;
++}
++
++static void timblogiw_dma_cb(void *data)
++{
++ struct timblogiw *lw = (struct timblogiw *)data;
++
++ dev_dbg(&lw->video_dev.dev, "%s: frame RX\n", __func__);
++ tasklet_schedule(&lw->tasklet);
++}
++
++static int __timblogiw_start_dma(struct timblogiw *lw)
++{
++ struct timbdma_transfer *transfer = lw->dma.transfer + lw->dma.curr;
++ struct dma_async_tx_descriptor *desc;
++ int sg_elems;
++ int bytes_per_desc =
++ TIMBLOGIW_LINES_PER_DESC *
++ timblogiw_bytes_per_line(lw->cur_norm);
++
++ sg_elems = timblogiw_frame_size(lw->cur_norm) / bytes_per_desc;
++ sg_elems +=
++ (timblogiw_frame_size(lw->cur_norm) % bytes_per_desc) ? 1 : 0;
++
++ dev_dbg(&lw->video_dev.dev, "Preparing DMA descriptor, elems: %d\n",
++ sg_elems);
++
++ desc = lw->chan->device->device_prep_slave_sg(lw->chan,
++ transfer->sg, sg_elems, DMA_FROM_DEVICE,
++ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
++ if (!desc)
++ return -ENOMEM;
++
++ desc->callback_param = lw;
++ desc->callback = timblogiw_dma_cb;
++ dev_dbg(&lw->video_dev.dev, "Submitting DMA descriptor\n");
++ transfer->cookie = desc->tx_submit(desc);
++
++ return 0;
++}
++
++static void timblogiw_handleframe(unsigned long arg)
++{
++ struct timblogiw_frame *f;
++ struct timblogiw *lw = (struct timblogiw *)arg;
++
++ if (lw->stream == STREAM_OFF)
++ return;
++
++ spin_lock(&lw->queue_lock);
++
++ if (!lw->dma.filled) {
++ /* Got a frame, store it, and flip to next DMA buffer */
++ lw->dma.filled = lw->dma.transfer + lw->dma.curr;
++ lw->dma.curr = !lw->dma.curr;
++ } else if (lw->dma.filled)
++ dev_dbg(&lw->video_dev.dev, "No free frame\n");
++
++ __timblogiw_start_dma(lw);
++
++
++ if (lw->dma.filled && !list_empty(&lw->inqueue)) {
++ /* put the entry in the outqueue */
++ f = list_entry(lw->inqueue.next, struct timblogiw_frame, frame);
++
++ /* sync memory and unmap */
++ dma_sync_single_for_cpu(lw->dev,
++ sg_dma_address(lw->dma.filled->sg),
++ timblogiw_frame_size(lw->cur_norm), DMA_FROM_DEVICE);
++
++ /* copy data from the DMA buffer */
++ memcpy(f->bufmem, lw->dma.filled->buf, f->buf.length);
++ /* buffer consumed */
++ lw->dma.filled = NULL;
++
++ do_gettimeofday(&f->buf.timestamp);
++ f->buf.sequence = ++lw->frame_count;
++ f->buf.field = V4L2_FIELD_NONE;
++ f->state = F_DONE;
++ f->buf.bytesused = f->buf.length;
++ list_move_tail(&f->frame, &lw->outqueue);
++ /* wake up any waiter */
++ wake_up(&lw->wait_frame);
++ } else {
++ /* No user buffer available, consume buffer anyway
++ * who wants an old video frame?
++ */
++ lw->dma.filled = NULL;
++ }
++ spin_unlock(&lw->queue_lock);
++}
++
++
++static void timblogiw_empty_framequeues(struct timblogiw *lw)
++{
++ u32 i;
++
++ dev_dbg(&lw->video_dev.dev, "%s entry\n", __func__);
++
++ INIT_LIST_HEAD(&lw->inqueue);
++ INIT_LIST_HEAD(&lw->outqueue);
++
++ for (i = 0; i < lw->num_frames; i++) {
++ lw->frame[i].state = F_UNUSED;
++ lw->frame[i].buf.bytesused = 0;
++ }
++}
++
++static bool timblogiw_dma_filter_fn(struct dma_chan *chan, void *filter_param)
++{
++ return chan->chan_id == (int)filter_param;
++}
++
++static u32 timblogiw_request_buffers(struct timblogiw *lw, u32 count)
++{
++ /* needs to be page aligned cause the */
++ /* buffers can be mapped individually! */
++ const size_t imagesize = PAGE_ALIGN(timblogiw_frame_size(lw->cur_norm));
++ struct timbdma_transfer *t0 = lw->dma.transfer;
++ struct timbdma_transfer *t1 = lw->dma.transfer + 1;
++ int bytes_per_desc = TIMBLOGIW_LINES_PER_DESC * BYTES_PER_LINE;
++ dma_cap_mask_t mask;
++ void *buff = NULL;
++ dma_addr_t addr;
++ u32 size;
++ int ret;
++ int i;
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_SLAVE, mask);
++
++ dev_dbg(&lw->video_dev.dev, "%s - request of %i buffers of size %zi\n",
++ __func__, count, imagesize);
++
++ t0->buf = kzalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
++ if (!t0->buf)
++ goto err;
++
++ t1->buf = kzalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
++ if (!t1->buf)
++ goto err;
++
++ sg_init_table(t0->sg, ARRAY_SIZE(t0->sg));
++ sg_init_table(t1->sg, ARRAY_SIZE(t1->sg));
++
++ /* map up the DMA buffers */
++ addr = dma_map_single(lw->dev, t0->buf, DMA_BUFFER_SIZE,
++ DMA_FROM_DEVICE);
++ ret = dma_mapping_error(lw->dev, addr);
++ if (ret)
++ goto err;
++
++ for (i = 0, size = 0; size < DMA_BUFFER_SIZE; i++) {
++ sg_dma_address(t0->sg + i) = addr + size;
++ size += bytes_per_desc;
++ sg_dma_len(t0->sg + i) = (size > DMA_BUFFER_SIZE) ?
++ (bytes_per_desc - (size - DMA_BUFFER_SIZE)) :
++ bytes_per_desc;
++ }
++
++ addr = dma_map_single(lw->dev, t1->buf, DMA_BUFFER_SIZE,
++ DMA_FROM_DEVICE);
++ ret = dma_mapping_error(lw->dev, addr);
++ if (ret)
++ goto err;
++
++ for (i = 0, size = 0; size < DMA_BUFFER_SIZE; i++) {
++ sg_dma_address(t1->sg + i) = addr + size;
++ size += bytes_per_desc;
++ sg_dma_len(t1->sg + i) = (size > DMA_BUFFER_SIZE) ?
++ (bytes_per_desc - (size - DMA_BUFFER_SIZE)) :
++ bytes_per_desc;
++ }
++
++ if (count > TIMBLOGIW_NUM_FRAMES)
++ count = TIMBLOGIW_NUM_FRAMES;
++
++ lw->num_frames = count;
++ while (lw->num_frames > 0) {
++ buff = vmalloc_32(lw->num_frames * imagesize);
++ if (buff) {
++ memset(buff, 0, lw->num_frames * imagesize);
++ break;
++ }
++ lw->num_frames--;
++ }
++
++ for (i = 0; i < lw->num_frames; i++) {
++ lw->frame[i].bufmem = buff + i * imagesize;
++ lw->frame[i].buf.index = i;
++ lw->frame[i].buf.m.offset = i * imagesize;
++ lw->frame[i].buf.length = timblogiw_frame_size(lw->cur_norm);
++ lw->frame[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ lw->frame[i].buf.sequence = 0;
++ lw->frame[i].buf.field = V4L2_FIELD_NONE;
++ lw->frame[i].buf.memory = V4L2_MEMORY_MMAP;
++ lw->frame[i].buf.flags = 0;
++ }
++
++ lw->dma.curr = 0;
++ lw->dma.filled = NULL;
++
++ /* find the DMA channel */
++ lw->chan = dma_request_channel(mask, timblogiw_dma_filter_fn,
++ (void *)lw->pdata.dma_channel);
++ if (!lw->chan) {
++ dev_err(&lw->video_dev.dev, "Failed to get DMA channel\n");
++ goto err;
++ }
++
++ return lw->num_frames;
++err:
++ timblogiw_release_buffers(lw);
++
++ return 0;
++}
++
++static void timblogiw_release_buffers(struct timblogiw *lw)
++{
++ struct timbdma_transfer *t0 = lw->dma.transfer;
++ struct timbdma_transfer *t1 = lw->dma.transfer + 1;
++
++ dev_dbg(&lw->video_dev.dev, "%s entry\n", __func__);
++
++ if (lw->chan)
++ dma_release_channel(lw->chan);
++ lw->chan = NULL;
++
++
++ if (lw->frame[0].bufmem != NULL) {
++ vfree(lw->frame[0].bufmem);
++ lw->frame[0].bufmem = NULL;
++ }
++
++ if (sg_dma_address(t0->sg))
++ dma_unmap_single(lw->dev, sg_dma_address(t0->sg),
++ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
++ sg_dma_address(t0->sg) = 0;
++
++ if (sg_dma_address(t1->sg))
++ dma_unmap_single(lw->dev, sg_dma_address(t1->sg),
++ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
++ sg_dma_address(t1->sg) = 0;
++
++ if (t0->buf != NULL)
++ kfree(t0->buf);
++ t0->buf = NULL;
++
++ if (t1->buf != NULL)
++ kfree(t1->buf);
++ t1->buf = NULL;
++
++ t0->cookie = -1;
++ t1->cookie = -1;
++
++ lw->num_frames = TIMBLOGIW_NUM_FRAMES;
++}
++
++/* IOCTL functions */
++
++static int timblogiw_g_fmt(struct file *file, void *priv,
++ struct v4l2_format *format)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s entry\n", __func__);
++
++ if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ format->fmt.pix.width = lw->cur_norm->width;
++ format->fmt.pix.height = lw->cur_norm->height;
++ format->fmt.pix.pixelformat = TIMBLOGIW_VIDEO_FORMAT;
++ format->fmt.pix.bytesperline = timblogiw_bytes_per_line(lw->cur_norm);
++ format->fmt.pix.sizeimage = timblogiw_frame_size(lw->cur_norm);
++ format->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
++ format->fmt.pix.field = V4L2_FIELD_NONE;
++ return 0;
++}
++
++static int timblogiw_try_fmt(struct file *file, void *priv,
++ struct v4l2_format *format)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++ struct v4l2_pix_format *pix = &format->fmt.pix;
++
++ dev_dbg(&vdev->dev,
++ "%s - width=%d, height=%d, pixelformat=%d, field=%d\n"
++ "bytes per line %d, size image: %d, colorspace: %d\n",
++ __func__,
++ pix->width, pix->height, pix->pixelformat, pix->field,
++ pix->bytesperline, pix->sizeimage, pix->colorspace);
++
++ if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ if (pix->field != V4L2_FIELD_NONE)
++ return -EINVAL;
++
++ if (pix->pixelformat != TIMBLOGIW_VIDEO_FORMAT)
++ return -EINVAL;
++
++ if ((lw->cur_norm->height != pix->height) ||
++ (lw->cur_norm->width != pix->width)) {
++ pix->width = lw->cur_norm->width;
++ pix->height = lw->cur_norm->height;
++ }
++
++ return 0;
++}
++
++static int timblogiw_querycap(struct file *file, void *priv,
++ struct v4l2_capability *cap)
++{
++ struct video_device *vdev = video_devdata(file);
++
++ dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
++ memset(cap, 0, sizeof(*cap));
++ strncpy(cap->card, "Timberdale Video", sizeof(cap->card)-1);
++ strncpy(cap->driver, "Timblogiw", sizeof(cap->card)-1);
++ cap->version = TIMBLOGIW_VERSION_CODE;
++ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
++ V4L2_CAP_STREAMING;
++
++ return 0;
++}
++
++static int timblogiw_enum_fmt(struct file *file, void *priv,
++ struct v4l2_fmtdesc *fmt)
++{
++ struct video_device *vdev = video_devdata(file);
++
++ dev_dbg(&vdev->dev, "%s, index: %d\n", __func__, fmt->index);
++
++ if (fmt->index != 0)
++ return -EINVAL;
++ memset(fmt, 0, sizeof(*fmt));
++ fmt->index = 0;
++ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ strncpy(fmt->description, "4:2:2, packed, YUYV",
++ sizeof(fmt->description)-1);
++ fmt->pixelformat = TIMBLOGIW_VIDEO_FORMAT;
++ memset(fmt->reserved, 0, sizeof(fmt->reserved));
++
++ return 0;
++}
++
++static int timblogiw_reqbufs(struct file *file, void *priv,
++ struct v4l2_requestbuffers *rb)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s: entry\n", __func__);
++
++ if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
++ rb->memory != V4L2_MEMORY_MMAP)
++ return -EINVAL;
++
++ timblogiw_empty_framequeues(lw);
++
++ timblogiw_release_buffers(lw);
++ if (rb->count)
++ rb->count = timblogiw_request_buffers(lw, rb->count);
++
++ dev_dbg(&vdev->dev, "%s: io method is mmap. num bufs %i\n",
++ __func__, rb->count);
++
++ return 0;
++}
++
++static int timblogiw_querybuf(struct file *file, void *priv,
++ struct v4l2_buffer *b)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s: entry\n", __func__);
++
++ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
++ b->index >= lw->num_frames)
++ return -EINVAL;
++
++ memcpy(b, &lw->frame[b->index].buf, sizeof(*b));
++
++ if (lw->frame[b->index].vma_use_count)
++ b->flags |= V4L2_BUF_FLAG_MAPPED;
++
++ if (lw->frame[b->index].state == F_DONE)
++ b->flags |= V4L2_BUF_FLAG_DONE;
++ else if (lw->frame[b->index].state != F_UNUSED)
++ b->flags |= V4L2_BUF_FLAG_QUEUED;
++
++ return 0;
++}
++
++static int timblogiw_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
++ b->index >= lw->num_frames)
++ return -EINVAL;
++
++ if (lw->frame[b->index].state != F_UNUSED)
++ return -EAGAIN;
++
++ if (!lw->frame[b->index].bufmem)
++ return -EINVAL;
++
++ if (b->memory != V4L2_MEMORY_MMAP)
++ return -EINVAL;
++
++ lw->frame[b->index].state = F_QUEUED;
++
++ spin_lock_bh(&lw->queue_lock);
++ list_add_tail(&lw->frame[b->index].frame, &lw->inqueue);
++ spin_unlock_bh(&lw->queue_lock);
++
++ return 0;
++}
++
++static int timblogiw_dqbuf(struct file *file, void *priv,
++ struct v4l2_buffer *b)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++ struct timblogiw_frame *f;
++ int ret = 0;
++
++ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ dev_dbg(&vdev->dev, "%s: illegal buf type!\n", __func__);
++ return -EINVAL;
++ }
++
++ if (list_empty(&lw->outqueue)) {
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ ret = wait_event_interruptible(lw->wait_frame,
++ !list_empty(&lw->outqueue));
++ if (ret)
++ return ret;
++ }
++
++ spin_lock_bh(&lw->queue_lock);
++ f = list_entry(lw->outqueue.next,
++ struct timblogiw_frame, frame);
++ list_del(lw->outqueue.next);
++ spin_unlock_bh(&lw->queue_lock);
++
++ f->state = F_UNUSED;
++ memcpy(b, &f->buf, sizeof(*b));
++
++ if (f->vma_use_count)
++ b->flags |= V4L2_BUF_FLAG_MAPPED;
++
++ return 0;
++}
++
++static int timblogiw_g_std(struct file *file, void *priv, v4l2_std_id *std)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s: entry\n", __func__);
++
++ *std = lw->cur_norm->std;
++ return 0;
++}
++
++static int timblogiw_s_std(struct file *file, void *priv, v4l2_std_id *std)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++ int err = 0;
++
++ dev_dbg(&vdev->dev, "%s: entry\n", __func__);
++
++ if (TIMBLOGIW_HAS_DECODER(lw))
++ err = v4l2_subdev_call(lw->sd_enc, core, s_std, *std);
++
++ if (!err)
++ lw->cur_norm = timblogiw_get_norm(*std);
++
++ return err;
++}
++
++static int timblogiw_enuminput(struct file *file, void *priv,
++ struct v4l2_input *inp)
++{
++ struct video_device *vdev = video_devdata(file);
++
++ dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
++
++ if (inp->index != 0)
++ return -EINVAL;
++
++ memset(inp, 0, sizeof(*inp));
++ inp->index = 0;
++
++ strncpy(inp->name, "Timb input 1", sizeof(inp->name) - 1);
++ inp->type = V4L2_INPUT_TYPE_CAMERA;
++ inp->std = V4L2_STD_ALL;
++
++ return 0;
++}
++
++static int timblogiw_g_input(struct file *file, void *priv,
++ unsigned int *input)
++{
++ struct video_device *vdev = video_devdata(file);
++
++ dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
++
++ *input = 0;
++
++ return 0;
++}
++
++static int timblogiw_s_input(struct file *file, void *priv, unsigned int input)
++{
++ struct video_device *vdev = video_devdata(file);
++
++ dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
++
++ if (input != 0)
++ return -EINVAL;
++ return 0;
++}
++
++static int timblogiw_streamon(struct file *file, void *priv, unsigned int type)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++ struct timblogiw_frame *f;
++
++ dev_dbg(&vdev->dev, "%s: entry\n", __func__);
++
++ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ dev_dbg(&vdev->dev, "%s - No capture device\n", __func__);
++ return -EINVAL;
++ }
++
++ if (list_empty(&lw->inqueue)) {
++ dev_dbg(&vdev->dev, "%s - inqueue is empty\n", __func__);
++ return -EINVAL;
++ }
++
++ if (lw->stream == STREAM_ON)
++ return 0;
++
++ lw->stream = STREAM_ON;
++
++ f = list_entry(lw->inqueue.next,
++ struct timblogiw_frame, frame);
++
++ dev_dbg(&vdev->dev, "%s - f size: %d, bpr: %d, dma addr: %x\n",
++ __func__, timblogiw_frame_size(lw->cur_norm),
++ timblogiw_bytes_per_line(lw->cur_norm),
++ (int)sg_dma_address(lw->dma.transfer[lw->dma.curr].sg));
++
++ __timblogiw_start_dma(lw);
++
++ return 0;
++}
++
++static void timblogiw_stopstream(struct timblogiw *lw)
++{
++ if (lw->stream == STREAM_ON) {
++ /* The FPGA might be busy copying the current frame, we have
++ * to wait for the frame to finish
++ */
++ spin_lock_bh(&lw->queue_lock);
++ lw->stream = STREAM_OFF;
++ spin_unlock_bh(&lw->queue_lock);
++
++ dma_sync_wait(lw->chan,
++ (lw->dma.transfer + lw->dma.curr)->cookie);
++ }
++}
++
++static int timblogiw_streamoff(struct file *file, void *priv,
++ unsigned int type)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s entry\n", __func__);
++
++ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ timblogiw_stopstream(lw);
++
++ timblogiw_empty_framequeues(lw);
++
++ return 0;
++}
++
++static int timblogiw_querystd(struct file *file, void *priv, v4l2_std_id *std)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s entry\n", __func__);
++
++ if (TIMBLOGIW_HAS_DECODER(lw))
++ return v4l2_subdev_call(lw->sd_enc, video, querystd, std);
++ else {
++ *std = lw->cur_norm->std;
++ return 0;
++ }
++}
++
++static int timblogiw_enum_framesizes(struct file *file, void *priv,
++ struct v4l2_frmsizeenum *fsize)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s - index: %d, format: %d\n", __func__,
++ fsize->index, fsize->pixel_format);
++
++ if ((fsize->index != 0) ||
++ (fsize->pixel_format != TIMBLOGIW_VIDEO_FORMAT))
++ return -EINVAL;
++
++ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
++ fsize->discrete.width = lw->cur_norm->width;
++ fsize->discrete.height = lw->cur_norm->height;
++
++ return 0;
++}
++
++
++/*******************************
++ * Device Operations functions *
++ *******************************/
++
++static int timblogiw_open(struct file *file)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++ v4l2_std_id std = V4L2_STD_UNKNOWN;
++ int err = 0;
++
++ dev_dbg(&vdev->dev, "%s: entry\n", __func__);
++
++ spin_lock_init(&lw->queue_lock);
++ init_waitqueue_head(&lw->wait_frame);
++
++ mutex_lock(&lw->lock);
++
++ if (TIMBLOGIW_HAS_DECODER(lw) && !lw->sd_enc) {
++ struct i2c_adapter *adapt;
++
++ /* find the video decoder */
++ adapt = i2c_get_adapter(lw->pdata.i2c_adapter);
++ if (!adapt) {
++ dev_err(&vdev->dev, "No I2C bus #%d\n",
++ lw->pdata.i2c_adapter);
++ err = -ENODEV;
++ goto out;
++ }
++
++ /* now find the encoder */
++ lw->sd_enc = v4l2_i2c_new_subdev_board(&lw->v4l2_dev, adapt,
++ lw->pdata.encoder.module_name, lw->pdata.encoder.info,
++ NULL);
++
++ i2c_put_adapter(adapt);
++
++ if (!lw->sd_enc) {
++ dev_err(&vdev->dev, "Failed to get encoder: %s\n",
++ lw->pdata.encoder.module_name);
++ err = -ENODEV;
++ goto out;
++ }
++ }
++
++ timblogiw_querystd(file, NULL, &std);
++ lw->cur_norm = timblogiw_get_norm(std);
++
++ lw->stream = STREAM_OFF;
++ lw->num_frames = TIMBLOGIW_NUM_FRAMES;
++
++ timblogiw_empty_framequeues(lw);
++
++out:
++ mutex_unlock(&lw->lock);
++
++ return err;
++}
++
++static int timblogiw_close(struct file *file)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
++
++ mutex_lock(&lw->lock);
++
++ timblogiw_stopstream(lw);
++
++ timblogiw_release_buffers(lw);
++
++ mutex_unlock(&lw->lock);
++ return 0;
++}
++
++static ssize_t timblogiw_read(struct file *file, char __user *data,
++ size_t count, loff_t *ppos)
++{
++ struct video_device *vdev = video_devdata(file);
++
++ dev_dbg(&vdev->dev, "%s - read request\n", __func__);
++
++ return -EINVAL;
++}
++
++static void timblogiw_vm_open(struct vm_area_struct *vma)
++{
++ struct timblogiw_frame *f = vma->vm_private_data;
++ f->vma_use_count++;
++}
++
++static void timblogiw_vm_close(struct vm_area_struct *vma)
++{
++ struct timblogiw_frame *f = vma->vm_private_data;
++ f->vma_use_count--;
++}
++
++static struct vm_operations_struct timblogiw_vm_ops = {
++ .open = timblogiw_vm_open,
++ .close = timblogiw_vm_close,
++};
++
++static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++ unsigned long size = vma->vm_end - vma->vm_start, start = vma->vm_start;
++ void *pos;
++ u32 i;
++ int ret = -EINVAL;
++
++ dev_dbg(&vdev->dev, "%s: entry\n", __func__);
++
++ if (mutex_lock_interruptible(&lw->lock))
++ return -ERESTARTSYS;
++
++ if (!(vma->vm_flags & VM_WRITE) ||
++ size != PAGE_ALIGN(lw->frame[0].buf.length))
++ goto error_unlock;
++
++ for (i = 0; i < lw->num_frames; i++)
++ if ((lw->frame[i].buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
++ break;
++
++ if (i == lw->num_frames) {
++ dev_dbg(&vdev->dev,
++ "%s - user supplied mapping address is out of range\n",
++ __func__);
++ goto error_unlock;
++ }
++
++ vma->vm_flags |= VM_IO;
++ vma->vm_flags |= VM_RESERVED; /* Do not swap out this VMA */
++
++ pos = lw->frame[i].bufmem;
++ while (size > 0) { /* size is page-aligned */
++ if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
++ dev_dbg(&vdev->dev, "%s - vm_insert_page failed\n",
++ __func__);
++ ret = -EAGAIN;
++ goto error_unlock;
++ }
++ start += PAGE_SIZE;
++ pos += PAGE_SIZE;
++ size -= PAGE_SIZE;
++ }
++
++ vma->vm_ops = &timblogiw_vm_ops;
++ vma->vm_private_data = &lw->frame[i];
++ timblogiw_vm_open(vma);
++ ret = 0;
++
++error_unlock:
++ mutex_unlock(&lw->lock);
++ return ret;
++}
++
++static const __devinitdata struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
++ .vidioc_querycap = timblogiw_querycap,
++ .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
++ .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
++ .vidioc_try_fmt_vid_cap = timblogiw_try_fmt,
++ .vidioc_s_fmt_vid_cap = timblogiw_try_fmt,
++ .vidioc_reqbufs = timblogiw_reqbufs,
++ .vidioc_querybuf = timblogiw_querybuf,
++ .vidioc_qbuf = timblogiw_qbuf,
++ .vidioc_dqbuf = timblogiw_dqbuf,
++ .vidioc_g_std = timblogiw_g_std,
++ .vidioc_s_std = timblogiw_s_std,
++ .vidioc_enum_input = timblogiw_enuminput,
++ .vidioc_g_input = timblogiw_g_input,
++ .vidioc_s_input = timblogiw_s_input,
++ .vidioc_streamon = timblogiw_streamon,
++ .vidioc_streamoff = timblogiw_streamoff,
++ .vidioc_querystd = timblogiw_querystd,
++ .vidioc_enum_framesizes = timblogiw_enum_framesizes,
++};
++
++static const __devinitdata struct v4l2_file_operations timblogiw_fops = {
++ .owner = THIS_MODULE,
++ .open = timblogiw_open,
++ .release = timblogiw_close,
++ .ioctl = video_ioctl2, /* V4L2 ioctl handler */
++ .mmap = timblogiw_mmap,
++ .read = timblogiw_read,
++};
++
++static const __devinitdata struct video_device timblogiw_template = {
++ .name = TIMBLOGIWIN_NAME,
++ .fops = &timblogiw_fops,
++ .ioctl_ops = &timblogiw_ioctl_ops,
++ .release = video_device_release_empty,
++ .minor = -1,
++ .tvnorms = V4L2_STD_PAL | V4L2_STD_NTSC
++};
++
++static int __devinit timblogiw_probe(struct platform_device *pdev)
++{
++ int err;
++ struct timblogiw *lw = NULL;
++ struct timb_video_platform_data *pdata = pdev->dev.platform_data;
++
++ if (!pdata) {
++ dev_err(&pdev->dev, "No platform data\n");
++ err = -EINVAL;
++ goto err;
++ }
++
++ if (!pdata->encoder.module_name)
++ dev_info(&pdev->dev, "Running without decoder\n");
++
++ lw = kzalloc(sizeof(*lw), GFP_KERNEL);
++ if (!lw) {
++ err = -ENOMEM;
++ goto err;
++ }
++
++ lw->cur_norm = timblogiw_tvnorms;
++
++ if (pdev->dev.parent)
++ lw->dev = pdev->dev.parent;
++ else
++ lw->dev = &pdev->dev;
++
++ memcpy(&lw->pdata, pdata, sizeof(lw->pdata));
++
++ mutex_init(&lw->lock);
++
++ lw->video_dev = timblogiw_template;
++
++ strlcpy(lw->v4l2_dev.name, DRIVER_NAME, sizeof(lw->v4l2_dev.name));
++ err = v4l2_device_register(NULL, &lw->v4l2_dev);
++ if (err)
++ goto err_register;
++
++ lw->video_dev.v4l2_dev = &lw->v4l2_dev;
++
++ err = video_register_device(&lw->video_dev, VFL_TYPE_GRABBER, 0);
++ if (err) {
++ dev_err(&pdev->dev, "Error reg video: %d\n", err);
++ goto err_request;
++ }
++
++ tasklet_init(&lw->tasklet, timblogiw_handleframe, (unsigned long)lw);
++
++ platform_set_drvdata(pdev, lw);
++ video_set_drvdata(&lw->video_dev, lw);
++
++ return 0;
++
++err_request:
++ v4l2_device_unregister(&lw->v4l2_dev);
++err_register:
++ kfree(lw);
++err:
++ dev_err(&pdev->dev, "Failed to register: %d\n", err);
++
++ return err;
++}
++
++static int timblogiw_remove(struct platform_device *pdev)
++{
++ struct timblogiw *lw = platform_get_drvdata(pdev);
++
++ video_unregister_device(&lw->video_dev);
++
++ v4l2_device_unregister(&lw->v4l2_dev);
++
++ tasklet_kill(&lw->tasklet);
++ kfree(lw);
++
++ platform_set_drvdata(pdev, NULL);
++
++ return 0;
++}
++
++static struct platform_driver timblogiw_platform_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = timblogiw_probe,
++ .remove = timblogiw_remove,
++};
++
++/*--------------------------------------------------------------------------*/
++
++static int __init timblogiw_init(void)
++{
++ return platform_driver_register(&timblogiw_platform_driver);
++}
++
++static void __exit timblogiw_exit(void)
++{
++ platform_driver_unregister(&timblogiw_platform_driver);
++}
++
++module_init(timblogiw_init);
++module_exit(timblogiw_exit);
++
++MODULE_DESCRIPTION("Timberdale Video In driver");
++MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("platform:"DRIVER_NAME);
++
+diff --git a/drivers/media/video/timblogiw.h b/drivers/media/video/timblogiw.h
+new file mode 100644
+index 0000000..56931e3
+--- /dev/null
++++ b/drivers/media/video/timblogiw.h
+@@ -0,0 +1,94 @@
++/*
++ * timblogiw.h timberdale FPGA LogiWin Video In driver defines
++ * Copyright (c) 2009-2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA LogiWin Video In
++ */
++
++#ifndef _TIMBLOGIW_H
++#define _TIMBLOGIW_H
++
++#include <linux/interrupt.h>
++#include <media/timb_video.h>
++#include <linux/dmaengine.h>
++#include <linux/scatterlist.h>
++
++#define TIMBLOGIWIN_NAME "Timberdale Video-In"
++
++#define TIMBLOGIW_NUM_FRAMES 10
++
++#define TIMBLOGIW_LINES_PER_DESC 45
++
++enum timblogiw_stream_state {
++ STREAM_OFF,
++ STREAM_ON,
++};
++
++enum timblogiw_frame_state {
++ F_UNUSED = 0,
++ F_QUEUED,
++ F_DONE,
++};
++
++struct timblogiw_frame {
++ void *bufmem;
++ struct v4l2_buffer buf;
++ enum timblogiw_frame_state state;
++ struct list_head frame;
++ unsigned long vma_use_count;
++};
++
++struct timblogiw_tvnorm {
++ v4l2_std_id std;
++ u16 width;
++ u16 height;
++};
++
++struct timbdma_transfer {
++ void *buf;
++ struct scatterlist sg[16];
++ dma_cookie_t cookie;
++};
++
++struct timblogiw_dma_control {
++ struct timbdma_transfer transfer[2];
++ struct timbdma_transfer *filled;
++ int curr;
++};
++
++struct timblogiw {
++ struct timblogiw_frame frame[TIMBLOGIW_NUM_FRAMES];
++ int num_frames;
++ unsigned int frame_count;
++ struct list_head inqueue, outqueue;
++ spinlock_t queue_lock; /* mutual exclusion */
++ enum timblogiw_stream_state stream;
++ struct video_device video_dev;
++ struct v4l2_device v4l2_dev;
++ struct mutex lock; /* mutual exclusion */
++ wait_queue_head_t wait_frame;
++ struct timblogiw_tvnorm const *cur_norm;
++ struct device *dev;
++ struct timblogiw_dma_control dma;
++ struct tasklet_struct tasklet;
++ struct timb_video_platform_data pdata;
++ struct v4l2_subdev *sd_enc; /* encoder */
++ struct dma_chan *chan;
++};
++
++#endif /* _TIMBLOGIW_H */
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index 8782978..b0e7fbb 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -348,6 +348,17 @@ config AB4500_CORE
+ read/write functions for the devices to get access to this chip.
+ This chip embeds various other multimedia funtionalities as well.
+
++config MFD_TIMBERDALE
++ tristate "Support for the Timberdale FPGA"
++ select MFD_CORE
++ depends on PCI
++ ---help---
++ This is the core driver for the timberdale FPGA. This device is a
++ multifunctioanl device which may provide numerous interfaces.
++
++ The timberdale FPGA can be found on the Intel Atom development board
++ for automotive in-vehicle infontainment board called Russellville.
++
+ endmenu
+
+ menu "Multimedia Capabilities Port drivers"
+diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
+index e09eb48..53375ac 100644
+--- a/drivers/mfd/Makefile
++++ b/drivers/mfd/Makefile
+@@ -55,4 +55,6 @@ obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
+ obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
+ obj-$(CONFIG_AB4500_CORE) += ab4500-core.o
+ obj-$(CONFIG_MFD_88PM8607) += 88pm8607.o
+-obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
+\ No newline at end of file
++obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
++
++obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
+diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
+new file mode 100644
+index 0000000..06d2523
+--- /dev/null
++++ b/drivers/mfd/timberdale.c
+@@ -0,0 +1,1008 @@
++/*
++ * timberdale.c timberdale FPGA mfd shim driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/msi.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/mfd/core.h>
++#include <linux/irq.h>
++
++#include <linux/timb_gpio.h>
++
++#include <linux/i2c.h>
++#include <linux/i2c-ocores.h>
++#include <linux/i2c-xiic.h>
++#include <linux/i2c/tsc2007.h>
++#include <linux/can/platform/ascb.h>
++
++#include <linux/spi/spi.h>
++#include <linux/spi/xilinx_spi.h>
++#include <linux/spi/max7301.h>
++#include <linux/spi/mc33880.h>
++
++#include <media/timb_video.h>
++#include <media/timb_radio.h>
++#include <linux/most/timbmlb.h>
++
++#include <linux/timb_dma.h>
++
++#include <sound/timbi2s.h>
++
++#include <linux/ks8842.h>
++
++#include "timberdale.h"
++
++#define DRIVER_NAME "timberdale"
++
++struct timberdale_device {
++ resource_size_t ctl_mapbase;
++ unsigned char __iomem *ctl_membase;
++ struct {
++ u32 major;
++ u32 minor;
++ u32 config;
++ } fw;
++};
++
++/*--------------------------------------------------------------------------*/
++
++static struct tsc2007_platform_data timberdale_tsc2007_platform_data = {
++ .model = 2003,
++ .x_plate_ohms = 100
++};
++
++static struct ascb_platform_data timberdale_ascb_platform_data = {
++ .gpio_pin = GPIO_PIN_ASCB
++};
++
++static struct i2c_board_info timberdale_i2c_board_info[] = {
++ {
++ I2C_BOARD_INFO("tsc2007", 0x48),
++ .platform_data = &timberdale_tsc2007_platform_data,
++ .irq = IRQ_TIMBERDALE_TSC_INT
++ },
++ {
++ I2C_BOARD_INFO("ascb-can", 0x18),
++ .platform_data = &timberdale_ascb_platform_data,
++ }
++};
++
++static __devinitdata struct xiic_i2c_platform_data
++timberdale_xiic_platform_data = {
++ .devices = timberdale_i2c_board_info,
++ .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
++};
++
++static __devinitdata struct ocores_i2c_platform_data
++timberdale_ocores_platform_data = {
++ .regstep = 4,
++ .clock_khz = 62500,
++ .devices = timberdale_i2c_board_info,
++ .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
++};
++
++const static __devinitconst struct resource timberdale_xiic_resources[] = {
++ {
++ .start = XIICOFFSET,
++ .end = XIICEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_I2C,
++ .end = IRQ_TIMBERDALE_I2C,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const static __devinitconst struct resource timberdale_ocores_resources[] = {
++ {
++ .start = OCORESOFFSET,
++ .end = OCORESEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_I2C,
++ .end = IRQ_TIMBERDALE_I2C,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const struct max7301_platform_data timberdale_max7301_platform_data = {
++ .base = 200
++};
++
++const struct mc33880_platform_data timberdale_mc33880_platform_data = {
++ .base = 100
++};
++
++static struct spi_board_info timberdale_spi_16bit_board_info[] = {
++ {
++ .modalias = "max7301",
++ .max_speed_hz = 26000,
++ .chip_select = 2,
++ .mode = SPI_MODE_0,
++ .platform_data = &timberdale_max7301_platform_data
++ },
++};
++
++static struct spi_board_info timberdale_spi_8bit_board_info[] = {
++ {
++ .modalias = "mc33880",
++ .max_speed_hz = 4000,
++ .chip_select = 1,
++ .mode = SPI_MODE_1,
++ .platform_data = &timberdale_mc33880_platform_data
++ },
++};
++
++static __devinitdata struct xspi_platform_data timberdale_xspi_platform_data = {
++ /* Current(2009-03-06) revision of
++ * Timberdale we can handle 3 chip selects
++ */
++ .num_chipselect = 3,
++ .little_endian = true,
++ /* bits per word and devices will be filled in runtime depending
++ * on the HW config
++ */
++};
++
++const static __devinitconst struct resource timberdale_spi_resources[] = {
++ {
++ .start = SPIOFFSET,
++ .end = SPIEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_SPI,
++ .end = IRQ_TIMBERDALE_SPI,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct ks8842_platform_data
++ timberdale_ks8842_platform_data = {
++ .rx_dma_channel = DMA_ETH_RX,
++ .tx_dma_channel = DMA_ETH_TX
++};
++
++const static __devinitconst struct resource timberdale_eth_resources[] = {
++ {
++ .start = ETHOFFSET,
++ .end = ETHEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_ETHSW_IF,
++ .end = IRQ_TIMBERDALE_ETHSW_IF,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct timbgpio_platform_data
++ timberdale_gpio_platform_data = {
++ .gpio_base = 0,
++ .nr_pins = GPIO_NR_PINS,
++ .irq_base = 200,
++};
++
++const static __devinitconst struct resource timberdale_gpio_resources[] = {
++ {
++ .start = GPIOOFFSET,
++ .end = GPIOEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_GPIO,
++ .end = IRQ_TIMBERDALE_GPIO,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct timbmlb_platform_data
++ timberdale_mlb_platform_data = {
++ .reset_pin = GPIO_PIN_INIC_RST,
++ .rx_dma_channel = DMA_MLB_RX,
++ .tx_dma_channel = DMA_MLB_TX
++};
++
++const static __devinitconst struct resource timberdale_most_resources[] = {
++ {
++ .start = MOSTOFFSET,
++ .end = MOSTEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_MLB,
++ .end = IRQ_TIMBERDALE_MLB,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const static __devinitconst struct resource timberdale_mlogicore_resources[] = {
++ {
++ .start = MLCOREOFFSET,
++ .end = MLCOREEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_MLCORE,
++ .end = IRQ_TIMBERDALE_MLCORE,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = IRQ_TIMBERDALE_MLCORE_BUF,
++ .end = IRQ_TIMBERDALE_MLCORE_BUF,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const static __devinitconst struct resource timberdale_uart_resources[] = {
++ {
++ .start = UARTOFFSET,
++ .end = UARTEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_UART,
++ .end = IRQ_TIMBERDALE_UART,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const static __devinitconst struct resource timberdale_uartlite_resources[] = {
++ {
++ .start = UARTLITEOFFSET,
++ .end = UARTLITEEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_UARTLITE,
++ .end = IRQ_TIMBERDALE_UARTLITE,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct timbi2s_bus_data timbi2s_bus_data[] = {
++ {
++ .rx = 0,
++ .sample_rate = 8000,
++ },
++ {
++ .rx = 1,
++ .sample_rate = 8000,
++ },
++ {
++ .rx = 1,
++ .sample_rate = 44100,
++ },
++};
++
++static __devinitdata struct timbi2s_platform_data timbi2s_platform_data = {
++ .busses = timbi2s_bus_data,
++ .num_busses = ARRAY_SIZE(timbi2s_bus_data),
++ .main_clk = 62500000,
++};
++
++const static __devinitconst struct resource timberdale_i2s_resources[] = {
++ {
++ .start = I2SOFFSET,
++ .end = I2SEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_I2S,
++ .end = IRQ_TIMBERDALE_I2S,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct i2c_board_info timberdale_adv7180_i2c_board_info = {
++ /* Requires jumper JP9 to be off */
++ I2C_BOARD_INFO("adv7180", 0x42 >> 1),
++ .irq = IRQ_TIMBERDALE_ADV7180
++};
++
++static __devinitdata struct timb_video_platform_data
++ timberdale_video_platform_data = {
++ .dma_channel = DMA_VIDEO_RX,
++ .i2c_adapter = 0,
++ .encoder = {
++ .module_name = "adv7180",
++ .info = &timberdale_adv7180_i2c_board_info
++ }
++};
++
++const static __devinitconst struct resource timberdale_radio_resources[] = {
++ {
++ .start = RDSOFFSET,
++ .end = RDSEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_RDS,
++ .end = IRQ_TIMBERDALE_RDS,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct i2c_board_info timberdale_tef6868_i2c_board_info = {
++ I2C_BOARD_INFO("tef6862", 0x60)
++};
++
++static __devinitdata struct i2c_board_info timberdale_saa7706_i2c_board_info = {
++ I2C_BOARD_INFO("saa7706h", 0x1C)
++};
++
++static __devinitdata struct timb_radio_platform_data
++ timberdale_radio_platform_data = {
++ .i2c_adapter = 0,
++ .tuner = {
++ .module_name = "tef6862",
++ .info = &timberdale_tef6868_i2c_board_info
++ },
++ .dsp = {
++ .module_name = "saa7706h",
++ .info = &timberdale_saa7706_i2c_board_info
++ }
++};
++
++const static __devinitconst struct resource timberdale_video_resources[] = {
++ {
++ .start = LOGIWOFFSET,
++ .end = LOGIWEND,
++ .flags = IORESOURCE_MEM,
++ },
++ /*
++ note that the "frame buffer" is located in DMA area
++ starting at 0x1200000
++ */
++};
++
++static __devinitdata struct timb_dma_platform_data timb_dma_platform_data = {
++ .nr_channels = 10,
++ .channels = {
++ {
++ /* UART RX */
++ .rx = true,
++ .descriptors = 2,
++ .descriptor_elements = 1
++ },
++ {
++ /* UART TX */
++ .rx = false,
++ .descriptors = 2,
++ .descriptor_elements = 1
++ },
++ {
++ /* MLB RX */
++ .rx = true,
++ .descriptors = 2,
++ .descriptor_elements = 1
++ },
++ {
++ /* MLB TX */
++ .rx = false,
++ .descriptors = 2,
++ .descriptor_elements = 1
++ },
++ {
++ /* Video RX */
++ .rx = true,
++ .bytes_per_line = 1440,
++ .descriptors = 2,
++ .descriptor_elements = 16
++ },
++ {
++ /* Video framedrop */
++ },
++ {
++ /* SDHCI RX */
++ .rx = true,
++ },
++ {
++ /* SDHCI TX */
++ },
++ {
++ /* ETH RX */
++ .rx = true,
++ .descriptors = 2,
++ .descriptor_elements = 1
++ },
++ {
++ /* ETH TX */
++ .rx = false,
++ .descriptors = 2,
++ .descriptor_elements = 1
++ },
++ }
++};
++
++const static __devinitconst struct resource timberdale_dma_resources[] = {
++ {
++ .start = DMAOFFSET,
++ .end = DMAEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_DMA,
++ .end = IRQ_TIMBERDALE_DMA,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
++ {
++ .name = "timb-dma",
++ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
++ .resources = timberdale_dma_resources,
++ .platform_data = &timb_dma_platform_data,
++ .data_size = sizeof(timb_dma_platform_data),
++ },
++ {
++ .name = "timb-uart",
++ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
++ .resources = timberdale_uart_resources,
++ },
++ {
++ .name = "xiic-i2c",
++ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
++ .resources = timberdale_xiic_resources,
++ .platform_data = &timberdale_xiic_platform_data,
++ .data_size = sizeof(timberdale_xiic_platform_data),
++ },
++ {
++ .name = "timb-gpio",
++ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
++ .resources = timberdale_gpio_resources,
++ .platform_data = &timberdale_gpio_platform_data,
++ .data_size = sizeof(timberdale_gpio_platform_data),
++ },
++ {
++ .name = "timb-i2s",
++ .num_resources = ARRAY_SIZE(timberdale_i2s_resources),
++ .resources = timberdale_i2s_resources,
++ .platform_data = &timbi2s_platform_data,
++ .data_size = sizeof(timbi2s_platform_data),
++ },
++ {
++ .name = "timb-most",
++ .num_resources = ARRAY_SIZE(timberdale_most_resources),
++ .resources = timberdale_most_resources,
++ .platform_data = &timberdale_mlb_platform_data,
++ .data_size = sizeof(timberdale_mlb_platform_data),
++ },
++ {
++ .name = "timb-video",
++ .num_resources = ARRAY_SIZE(timberdale_video_resources),
++ .resources = timberdale_video_resources,
++ .platform_data = &timberdale_video_platform_data,
++ .data_size = sizeof(timberdale_video_platform_data),
++ },
++ {
++ .name = "timb-radio",
++ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
++ .resources = timberdale_radio_resources,
++ .platform_data = &timberdale_radio_platform_data,
++ .data_size = sizeof(timberdale_radio_platform_data),
++ },
++ {
++ .name = "xilinx_spi",
++ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
++ .resources = timberdale_spi_resources,
++ .platform_data = &timberdale_xspi_platform_data,
++ .data_size = sizeof(timberdale_xspi_platform_data),
++ },
++ {
++ .name = "ks8842",
++ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
++ .resources = timberdale_eth_resources,
++ .platform_data = &timberdale_ks8842_platform_data,
++ .data_size = sizeof(timberdale_ks8842_platform_data)
++ },
++};
++
++static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
++ {
++ .name = "timb-dma",
++ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
++ .resources = timberdale_dma_resources,
++ .platform_data = &timb_dma_platform_data,
++ .data_size = sizeof(timb_dma_platform_data),
++ },
++ {
++ .name = "timb-uart",
++ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
++ .resources = timberdale_uart_resources,
++ },
++ {
++ .name = "uartlite",
++ .num_resources = ARRAY_SIZE(timberdale_uartlite_resources),
++ .resources = timberdale_uartlite_resources,
++ },
++ {
++ .name = "xiic-i2c",
++ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
++ .resources = timberdale_xiic_resources,
++ .platform_data = &timberdale_xiic_platform_data,
++ .data_size = sizeof(timberdale_xiic_platform_data),
++ },
++ {
++ .name = "timb-gpio",
++ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
++ .resources = timberdale_gpio_resources,
++ .platform_data = &timberdale_gpio_platform_data,
++ .data_size = sizeof(timberdale_gpio_platform_data),
++ },
++ {
++ .name = "timb-mlogicore",
++ .num_resources = ARRAY_SIZE(timberdale_mlogicore_resources),
++ .resources = timberdale_mlogicore_resources,
++ },
++ {
++ .name = "timb-video",
++ .num_resources = ARRAY_SIZE(timberdale_video_resources),
++ .resources = timberdale_video_resources,
++ .platform_data = &timberdale_video_platform_data,
++ .data_size = sizeof(timberdale_video_platform_data),
++ },
++ {
++ .name = "timb-radio",
++ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
++ .resources = timberdale_radio_resources,
++ .platform_data = &timberdale_radio_platform_data,
++ .data_size = sizeof(timberdale_radio_platform_data),
++ },
++ {
++ .name = "xilinx_spi",
++ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
++ .resources = timberdale_spi_resources,
++ .platform_data = &timberdale_xspi_platform_data,
++ .data_size = sizeof(timberdale_xspi_platform_data),
++ },
++ {
++ .name = "ks8842",
++ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
++ .resources = timberdale_eth_resources,
++ .platform_data = &timberdale_ks8842_platform_data,
++ .data_size = sizeof(timberdale_ks8842_platform_data)
++ },
++};
++
++static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
++ {
++ .name = "timb-dma",
++ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
++ .resources = timberdale_dma_resources,
++ .platform_data = &timb_dma_platform_data,
++ .data_size = sizeof(timb_dma_platform_data),
++ },
++ {
++ .name = "timb-uart",
++ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
++ .resources = timberdale_uart_resources,
++ },
++ {
++ .name = "xiic-i2c",
++ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
++ .resources = timberdale_xiic_resources,
++ .platform_data = &timberdale_xiic_platform_data,
++ .data_size = sizeof(timberdale_xiic_platform_data),
++ },
++ {
++ .name = "timb-gpio",
++ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
++ .resources = timberdale_gpio_resources,
++ .platform_data = &timberdale_gpio_platform_data,
++ .data_size = sizeof(timberdale_gpio_platform_data),
++ },
++ {
++ .name = "timb-video",
++ .num_resources = ARRAY_SIZE(timberdale_video_resources),
++ .resources = timberdale_video_resources,
++ .platform_data = &timberdale_video_platform_data,
++ .data_size = sizeof(timberdale_video_platform_data),
++ },
++ {
++ .name = "timb-radio",
++ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
++ .resources = timberdale_radio_resources,
++ .platform_data = &timberdale_radio_platform_data,
++ .data_size = sizeof(timberdale_radio_platform_data),
++ },
++ {
++ .name = "xilinx_spi",
++ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
++ .resources = timberdale_spi_resources,
++ .platform_data = &timberdale_xspi_platform_data,
++ .data_size = sizeof(timberdale_xspi_platform_data),
++ },
++};
++
++static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
++ {
++ .name = "timb-dma",
++ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
++ .resources = timberdale_dma_resources,
++ .platform_data = &timb_dma_platform_data,
++ .data_size = sizeof(timb_dma_platform_data),
++ },
++ {
++ .name = "timb-uart",
++ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
++ .resources = timberdale_uart_resources,
++ },
++ {
++ .name = "ocores-i2c",
++ .num_resources = ARRAY_SIZE(timberdale_ocores_resources),
++ .resources = timberdale_ocores_resources,
++ .platform_data = &timberdale_ocores_platform_data,
++ .data_size = sizeof(timberdale_ocores_platform_data),
++ },
++ {
++ .name = "timb-gpio",
++ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
++ .resources = timberdale_gpio_resources,
++ .platform_data = &timberdale_gpio_platform_data,
++ .data_size = sizeof(timberdale_gpio_platform_data),
++ },
++ {
++ .name = "timb-i2s",
++ .num_resources = ARRAY_SIZE(timberdale_i2s_resources),
++ .resources = timberdale_i2s_resources,
++ .platform_data = &timbi2s_platform_data,
++ .data_size = sizeof(timbi2s_platform_data),
++ },
++ {
++ .name = "timb-most",
++ .num_resources = ARRAY_SIZE(timberdale_most_resources),
++ .resources = timberdale_most_resources,
++ .platform_data = &timberdale_mlb_platform_data,
++ .data_size = sizeof(timberdale_mlb_platform_data),
++ },
++ {
++ .name = "timb-video",
++ .num_resources = ARRAY_SIZE(timberdale_video_resources),
++ .resources = timberdale_video_resources,
++ .platform_data = &timberdale_video_platform_data,
++ .data_size = sizeof(timberdale_video_platform_data),
++ },
++ {
++ .name = "timb-radio",
++ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
++ .resources = timberdale_radio_resources,
++ .platform_data = &timberdale_radio_platform_data,
++ .data_size = sizeof(timberdale_radio_platform_data),
++ },
++ {
++ .name = "xilinx_spi",
++ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
++ .resources = timberdale_spi_resources,
++ .platform_data = &timberdale_xspi_platform_data,
++ .data_size = sizeof(timberdale_xspi_platform_data),
++ },
++ {
++ .name = "ks8842",
++ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
++ .resources = timberdale_eth_resources,
++ .platform_data = &timberdale_ks8842_platform_data,
++ .data_size = sizeof(timberdale_ks8842_platform_data)
++ },
++};
++
++static const __devinitconst struct resource timberdale_sdhc_resources[] = {
++ /* located in bar 1 and bar 2 */
++ {
++ .start = SDHC0OFFSET,
++ .end = SDHC0END,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_SDHC,
++ .end = IRQ_TIMBERDALE_SDHC,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct mfd_cell timberdale_cells_bar1[] = {
++ {
++ .name = "sdhci",
++ .num_resources = ARRAY_SIZE(timberdale_sdhc_resources),
++ .resources = timberdale_sdhc_resources,
++ },
++};
++
++static __devinitdata struct mfd_cell timberdale_cells_bar2[] = {
++ {
++ .name = "sdhci",
++ .num_resources = ARRAY_SIZE(timberdale_sdhc_resources),
++ .resources = timberdale_sdhc_resources,
++ },
++};
++
++static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++ struct timberdale_device *priv = pci_get_drvdata(pdev);
++
++ return sprintf(buf, "%d.%d.%d\n", priv->fw.major, priv->fw.minor,
++ priv->fw.config);
++}
++
++static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
++
++/*--------------------------------------------------------------------------*/
++
++static int __devinit timb_probe(struct pci_dev *dev,
++ const struct pci_device_id *id)
++{
++ struct timberdale_device *priv;
++ int err, i;
++ resource_size_t mapbase;
++ struct msix_entry *msix_entries = NULL;
++ u8 ip_setup;
++
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++
++ pci_set_drvdata(dev, priv);
++
++ err = pci_enable_device(dev);
++ if (err)
++ goto err_enable;
++
++ mapbase = pci_resource_start(dev, 0);
++ if (!mapbase) {
++ printk(KERN_ERR DRIVER_NAME ": No resource\n");
++ goto err_start;
++ }
++
++ /* create a resource for the PCI master register */
++ priv->ctl_mapbase = mapbase + CHIPCTLOFFSET;
++ if (!request_mem_region(priv->ctl_mapbase, CHIPCTLSIZE, "timb-ctl")) {
++ printk(KERN_ERR DRIVER_NAME ": Failed to request ctl mem\n");
++ goto err_request;
++ }
++
++ priv->ctl_membase = ioremap(priv->ctl_mapbase, CHIPCTLSIZE);
++ if (!priv->ctl_membase) {
++ printk(KERN_ALERT DRIVER_NAME": Map error, ctl\n");
++ goto err_ioremap;
++ }
++
++ /* read the HW config */
++ priv->fw.major = ioread32(priv->ctl_membase + TIMB_REV_MAJOR);
++ priv->fw.minor = ioread32(priv->ctl_membase + TIMB_REV_MINOR);
++ priv->fw.config = ioread32(priv->ctl_membase + TIMB_HW_CONFIG);
++
++ if (priv->fw.major > TIMB_SUPPORTED_MAJOR) {
++ printk(KERN_ERR DRIVER_NAME": The driver supports an older "
++ "version of the FPGA, please update the driver to "
++ "support %d.%d\n", priv->fw.major, priv->fw.minor);
++ goto err_ioremap;
++ }
++ if (priv->fw.major < TIMB_SUPPORTED_MAJOR ||
++ priv->fw.minor < TIMB_REQUIRED_MINOR) {
++ printk(KERN_ERR DRIVER_NAME
++ ": The FPGA image is too old (%d.%d), "
++ "please upgrade the FPGA to at least: %d.%d\n",
++ priv->fw.major, priv->fw.minor,
++ TIMB_SUPPORTED_MAJOR, TIMB_REQUIRED_MINOR);
++ goto err_ioremap;
++ }
++
++ msix_entries = kzalloc(TIMBERDALE_NR_IRQS * sizeof(*msix_entries),
++ GFP_KERNEL);
++ if (!msix_entries)
++ goto err_ioremap;
++
++ for (i = 0; i < TIMBERDALE_NR_IRQS; i++)
++ msix_entries[i].entry = i;
++
++ err = pci_enable_msix(dev, msix_entries, TIMBERDALE_NR_IRQS);
++ if (err) {
++ printk(KERN_WARNING DRIVER_NAME
++ ": MSI-X init failed: %d, expected entries: %d\n",
++ err, TIMBERDALE_NR_IRQS);
++ goto err_msix;
++ }
++
++ err = device_create_file(&dev->dev, &dev_attr_fw_ver);
++ if (err)
++ goto err_create_file;
++
++ /* Reset all FPGA PLB peripherals */
++ iowrite32(0x1, priv->ctl_membase + TIMB_SW_RST);
++
++ /* update IRQ offsets in I2C board info */
++ for (i = 0; i < ARRAY_SIZE(timberdale_i2c_board_info); i++)
++ timberdale_i2c_board_info[i].irq =
++ msix_entries[timberdale_i2c_board_info[i].irq].vector;
++
++ /* Update the SPI configuration depending on the HW (8 or 16 bit) */
++ if (priv->fw.config & TIMB_HW_CONFIG_SPI_8BIT) {
++ timberdale_xspi_platform_data.bits_per_word = 8;
++ timberdale_xspi_platform_data.devices =
++ timberdale_spi_8bit_board_info;
++ timberdale_xspi_platform_data.num_devices =
++ ARRAY_SIZE(timberdale_spi_8bit_board_info);
++ } else {
++ timberdale_xspi_platform_data.bits_per_word = 16;
++ timberdale_xspi_platform_data.devices =
++ timberdale_spi_16bit_board_info;
++ timberdale_xspi_platform_data.num_devices =
++ ARRAY_SIZE(timberdale_spi_16bit_board_info);
++ }
++
++ ip_setup = priv->fw.config & TIMB_HW_VER_MASK;
++ if (ip_setup == TIMB_HW_VER0)
++ err = mfd_add_devices(&dev->dev, -1,
++ timberdale_cells_bar0_cfg0,
++ ARRAY_SIZE(timberdale_cells_bar0_cfg0),
++ &dev->resource[0], msix_entries[0].vector);
++ else if (ip_setup == TIMB_HW_VER1)
++ err = mfd_add_devices(&dev->dev, -1,
++ timberdale_cells_bar0_cfg1,
++ ARRAY_SIZE(timberdale_cells_bar0_cfg1),
++ &dev->resource[0], msix_entries[0].vector);
++ else if (ip_setup == TIMB_HW_VER2)
++ err = mfd_add_devices(&dev->dev, -1,
++ timberdale_cells_bar0_cfg2,
++ ARRAY_SIZE(timberdale_cells_bar0_cfg2),
++ &dev->resource[0], msix_entries[0].vector);
++ else if (ip_setup == TIMB_HW_VER3)
++ err = mfd_add_devices(&dev->dev, -1,
++ timberdale_cells_bar0_cfg3,
++ ARRAY_SIZE(timberdale_cells_bar0_cfg3),
++ &dev->resource[0], msix_entries[0].vector);
++ else {
++ /* unknown version */
++ printk(KERN_ERR"Uknown IP setup: %d.%d.%d\n",
++ priv->fw.major, priv->fw.minor, ip_setup);
++ err = -ENODEV;
++ goto err_mfd;
++ }
++
++ if (err) {
++ printk(KERN_WARNING DRIVER_NAME
++ ": mfd_add_devices failed: %d\n", err);
++ goto err_mfd;
++ }
++
++ err = mfd_add_devices(&dev->dev, 0,
++ timberdale_cells_bar1, ARRAY_SIZE(timberdale_cells_bar1),
++ &dev->resource[1], msix_entries[0].vector);
++ if (err) {
++ printk(KERN_WARNING DRIVER_NAME
++ "mfd_add_devices failed: %d\n", err);
++ goto err_mfd2;
++ }
++
++ /* only version 0 and 3 have the iNand routed to SDHCI */
++ if (((priv->fw.config & TIMB_HW_VER_MASK) == TIMB_HW_VER0) ||
++ ((priv->fw.config & TIMB_HW_VER_MASK) == TIMB_HW_VER3)) {
++ err = mfd_add_devices(&dev->dev, 1, timberdale_cells_bar2,
++ ARRAY_SIZE(timberdale_cells_bar2),
++ &dev->resource[2], msix_entries[0].vector);
++ if (err) {
++ printk(KERN_WARNING DRIVER_NAME
++ ": mfd_add_devices failed: %d\n", err);
++ goto err_mfd2;
++ }
++ }
++
++ kfree(msix_entries);
++
++ printk(KERN_INFO
++ "Found Timberdale Card. Rev: %d.%d, HW config: 0x%02x\n",
++ priv->fw.major, priv->fw.minor, priv->fw.config);
++
++ return 0;
++
++err_mfd2:
++ mfd_remove_devices(&dev->dev);
++err_mfd:
++ device_remove_file(&dev->dev, &dev_attr_fw_ver);
++err_create_file:
++ pci_disable_msix(dev);
++err_msix:
++ iounmap(priv->ctl_membase);
++err_ioremap:
++ release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
++err_request:
++ pci_set_drvdata(dev, NULL);
++err_start:
++ pci_disable_device(dev);
++err_enable:
++ kfree(msix_entries);
++ kfree(priv);
++ pci_set_drvdata(dev, NULL);
++ return -ENODEV;
++}
++
++static void __devexit timb_remove(struct pci_dev *dev)
++{
++ struct timberdale_device *priv = pci_get_drvdata(dev);
++
++ mfd_remove_devices(&dev->dev);
++
++ device_remove_file(&dev->dev, &dev_attr_fw_ver);
++
++ iounmap(priv->ctl_membase);
++ release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
++
++ pci_disable_msix(dev);
++ pci_disable_device(dev);
++ pci_set_drvdata(dev, NULL);
++ kfree(priv);
++}
++
++static struct pci_device_id timberdale_pci_tbl[] = {
++ { PCI_DEVICE(PCI_VENDOR_ID_TIMB, PCI_DEVICE_ID_TIMB) },
++ { 0 }
++};
++MODULE_DEVICE_TABLE(pci, timberdale_pci_tbl);
++
++static struct pci_driver timberdale_pci_driver = {
++ .name = DRIVER_NAME,
++ .id_table = timberdale_pci_tbl,
++ .probe = timb_probe,
++ .remove = __devexit_p(timb_remove),
++};
++
++static int __init timberdale_init(void)
++{
++ int err;
++
++ err = pci_register_driver(&timberdale_pci_driver);
++ if (err < 0) {
++ printk(KERN_ERR
++ "Failed to register PCI driver for %s device.\n",
++ timberdale_pci_driver.name);
++ return -ENODEV;
++ }
++
++ printk(KERN_INFO "Driver for %s has been successfully registered.\n",
++ timberdale_pci_driver.name);
++
++ return 0;
++}
++
++static void __exit timberdale_exit(void)
++{
++ pci_unregister_driver(&timberdale_pci_driver);
++
++ printk(KERN_INFO "Driver for %s has been successfully unregistered.\n",
++ timberdale_pci_driver.name);
++}
++
++module_init(timberdale_init);
++module_exit(timberdale_exit);
++
++MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
++MODULE_VERSION(DRV_VERSION);
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/mfd/timberdale.h b/drivers/mfd/timberdale.h
+new file mode 100644
+index 0000000..e18fcea
+--- /dev/null
++++ b/drivers/mfd/timberdale.h
+@@ -0,0 +1,164 @@
++/*
++ * timberdale.h timberdale FPGA mfd shim driver defines
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA
++ */
++
++#ifndef MFD_TIMBERDALE_H
++#define MFD_TIMBERDALE_H
++
++#define DRV_VERSION "1.0"
++
++/* This driver only support versions >= 3.8 and < 4.0 */
++#define TIMB_SUPPORTED_MAJOR 3
++
++/* This driver only support minor >= 8 */
++#define TIMB_REQUIRED_MINOR 8
++
++/* Registers of the interrupt controller */
++#define ISR 0x00
++#define IPR 0x04
++#define IER 0x08
++#define IAR 0x0c
++#define SIE 0x10
++#define CIE 0x14
++#define MER 0x1c
++
++/* Registers of the control area */
++#define TIMB_REV_MAJOR 0x00
++#define TIMB_REV_MINOR 0x04
++#define TIMB_HW_CONFIG 0x08
++#define TIMB_SW_RST 0x40
++
++/* bits in the TIMB_HW_CONFIG register */
++#define TIMB_HW_CONFIG_SPI_8BIT 0x80
++
++#define TIMB_HW_VER_MASK 0x0f
++#define TIMB_HW_VER0 0x00
++#define TIMB_HW_VER1 0x01
++#define TIMB_HW_VER2 0x02
++#define TIMB_HW_VER3 0x03
++
++#define OCORESOFFSET 0x0
++#define OCORESEND 0x1f
++
++#define SPIOFFSET 0x80
++#define SPIEND 0xff
++
++#define UARTLITEOFFSET 0x100
++#define UARTLITEEND 0x10f
++
++#define RDSOFFSET 0x180
++#define RDSEND 0x183
++
++#define ETHOFFSET 0x300
++#define ETHEND 0x3ff
++
++#define GPIOOFFSET 0x400
++#define GPIOEND 0x7ff
++
++#define CHIPCTLOFFSET 0x800
++#define CHIPCTLEND 0x8ff
++#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET)
++
++#define INTCOFFSET 0xc00
++#define INTCEND 0xfff
++#define INTCSIZE (INTCEND - INTCOFFSET)
++
++#define MOSTOFFSET 0x1000
++#define MOSTEND 0x13ff
++
++#define UARTOFFSET 0x1400
++#define UARTEND 0x17ff
++
++#define XIICOFFSET 0x1800
++#define XIICEND 0x19ff
++
++#define I2SOFFSET 0x1C00
++#define I2SEND 0x1fff
++
++#define LOGIWOFFSET 0x30000
++#define LOGIWEND 0x37fff
++
++#define MLCOREOFFSET 0x40000
++#define MLCOREEND 0x43fff
++
++#define DMAOFFSET 0x01000000
++#define DMAEND 0x013fffff
++
++/* SDHC0 is placed in PCI bar 1 */
++#define SDHC0OFFSET 0x00
++#define SDHC0END 0xff
++
++/* SDHC1 is placed in PCI bar 2 */
++#define SDHC1OFFSET 0x00
++#define SDHC1END 0xff
++
++#define PCI_VENDOR_ID_TIMB 0x10ee
++#define PCI_DEVICE_ID_TIMB 0xa123
++
++#define IRQ_TIMBERDALE_INIC 0
++#define IRQ_TIMBERDALE_MLB 1
++#define IRQ_TIMBERDALE_GPIO 2
++#define IRQ_TIMBERDALE_I2C 3
++#define IRQ_TIMBERDALE_UART 4
++#define IRQ_TIMBERDALE_DMA 5
++#define IRQ_TIMBERDALE_I2S 6
++#define IRQ_TIMBERDALE_TSC_INT 7
++#define IRQ_TIMBERDALE_SDHC 8
++#define IRQ_TIMBERDALE_ADV7180 9
++#define IRQ_TIMBERDALE_ETHSW_IF 10
++#define IRQ_TIMBERDALE_SPI 11
++#define IRQ_TIMBERDALE_UARTLITE 12
++#define IRQ_TIMBERDALE_MLCORE 13
++#define IRQ_TIMBERDALE_MLCORE_BUF 14
++#define IRQ_TIMBERDALE_RDS 15
++
++#define TIMBERDALE_NR_IRQS 16
++
++/* Some of the interrupts are level triggered, some are edge triggered */
++#define IRQ_TIMBERDALE_EDGE_MASK ((1 << IRQ_TIMBERDALE_ADV7180) | \
++ (1 << IRQ_TIMBERDALE_TSC_INT) | \
++ (1 << IRQ_TIMBERDALE_MLB) | (1 << IRQ_TIMBERDALE_INIC))
++
++#define IRQ_TIMBERDALE_LEVEL_MASK ((1 << IRQ_TIMBERDALE_SPI) | \
++ (1 << IRQ_TIMBERDALE_ETHSW_IF) | (1 << IRQ_TIMBERDALE_SDHC) | \
++ (1 << IRQ_TIMBERDALE_I2S) | (1 << IRQ_TIMBERDALE_UART) | \
++ (1 << IRQ_TIMBERDALE_I2C) | (1 << IRQ_TIMBERDALE_GPIO) | \
++ (1 << IRQ_TIMBERDALE_DMA))
++
++#define GPIO_PIN_ASCB 8
++#define GPIO_PIN_INIC_RST 14
++#define GPIO_PIN_BT_RST 15
++#define GPIO_NR_PINS 16
++
++/* DMA Channels */
++#define DMA_UART_RX 0
++#define DMA_UART_TX 1
++#define DMA_MLB_RX 2
++#define DMA_MLB_TX 3
++#define DMA_VIDEO_RX 4
++#define DMA_VIDEO_DROP 5
++#define DMA_SDHCI_RX 6
++#define DMA_SDHCI_TX 7
++#define DMA_ETH_RX 8
++#define DMA_ETH_TX 9
++
++#endif
++
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index dd9a09c..06ec723 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -1730,6 +1730,16 @@ config KS8842
+ This platform driver is for Micrel KSZ8842 / KS8842
+ 2-port ethernet switch chip (managed, VLAN, QoS).
+
++config KS8842_TIMB_DMA
++ bool "Use Timberdale specific DMA engine"
++ depends on KS8842 && MFD_TIMBERDALE
++ select CONFIG_TIMB_DMA
++ help
++ This option enables usage of the timberdale specific DMA engine
++ for the KS8842 driver. Rather than using PIO which results in
++ single accesses over PCIe, the DMA block of the timberdale FPGA
++ will burst data to and from the KS8842.
++
+ config KS8851
+ tristate "Micrel KS8851 SPI"
+ depends on SPI
+diff --git a/drivers/net/Makefile b/drivers/net/Makefile
+index ad1346d..a99b3b8 100644
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -20,6 +20,7 @@ obj-$(CONFIG_CHELSIO_T1) += chelsio/
+ obj-$(CONFIG_CHELSIO_T3) += cxgb3/
+ obj-$(CONFIG_EHEA) += ehea/
+ obj-$(CONFIG_CAN) += can/
++obj-$(CONFIG_MOST) += most/
+ obj-$(CONFIG_BONDING) += bonding/
+ obj-$(CONFIG_ATL1) += atlx/
+ obj-$(CONFIG_ATL2) += atlx/
+diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
+index 5c45cb5..bc93d65 100644
+--- a/drivers/net/ks8842.c
++++ b/drivers/net/ks8842.c
+@@ -1,5 +1,5 @@
+ /*
+- * ks8842_main.c timberdale KS8842 ethernet driver
++ * ks8842.c timberdale KS8842 ethernet driver
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+@@ -26,11 +26,22 @@
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/ethtool.h>
++#ifdef CONFIG_KS8842_TIMB_DMA
++#include <linux/ks8842.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/scatterlist.h>
++#endif
+
+ #define DRV_NAME "ks8842"
+
+ /* Timberdale specific Registers */
+-#define REG_TIMB_RST 0x1c
++#define REG_TIMB_RST 0x1c
++#define REG_TIMB_FIFO 0x20
++#define REG_TIMB_ISR 0x24
++#define REG_TIMB_IER 0x28
++#define REG_TIMB_IAR 0x2C
++#define REQ_TIMB_DMA_RESUME 0x30
+
+ /* KS8842 registers */
+
+@@ -73,6 +84,11 @@
+ #define IRQ_RX_ERROR 0x0080
+ #define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
+ IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
++#ifdef CONFIG_KS8842_TIMB_DMA
++ #define ENABLED_IRQS_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
++ IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
++ #define ENABLED_IRQS_DMA (ENABLED_IRQS_IP | IRQ_RX)
++#endif
+ #define REG_ISR 0x02
+ #define REG_RXSR 0x04
+ #define RXSR_VALID 0x8000
+@@ -111,14 +127,62 @@
+ #define REG_P1CR4 0x02
+ #define REG_P1SR 0x04
+
++#ifdef CONFIG_KS8842_TIMB_DMA
++#define DMA_BUFFER_SIZE 2048
++
++#define DMA_DEV(a) ((a->dev->parent) ? a->dev->parent : a->dev)
++
++#define DMA_ONGOING(a) (a->dma_tx.ongoing | a->dma_rx.ongoing)
++
++struct ks8842_dma_ctl {
++ struct dma_chan *chan;
++ struct dma_async_tx_descriptor *adesc;
++ void *buf;
++ struct scatterlist sg;
++ int channel;
++};
++
++struct ks8842_rx_dma_ctl {
++ struct dma_chan *chan;
++ struct dma_async_tx_descriptor *adesc;
++ struct sk_buff *skb;
++ struct scatterlist sg;
++ struct tasklet_struct tasklet;
++ int channel;
++};
++
++#define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
++ ((adapter)->dma_rx.channel != -1))
++
++#endif
++
+ struct ks8842_adapter {
+ void __iomem *hw_addr;
+ int irq;
+ struct tasklet_struct tasklet;
+ spinlock_t lock; /* spinlock to be interrupt safe */
+- struct platform_device *pdev;
++ struct device *dev;
++ struct work_struct timeout_work;
++ struct net_device *netdev;
++#ifdef CONFIG_KS8842_TIMB_DMA
++ struct ks8842_dma_ctl dma_tx;
++ struct ks8842_rx_dma_ctl dma_rx;
++#endif
+ };
+
++static u8 macaddr[ETH_ALEN];
++
++#ifdef CONFIG_KS8842_TIMB_DMA
++static void ks8842_dma_rx_cb(void *data);
++static void ks8842_dma_tx_cb(void *data);
++
++
++static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
++{
++ iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
++}
++#endif
++
+ static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
+ {
+ iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
+@@ -195,7 +259,6 @@ static void ks8842_reset(struct ks8842_adapter *adapter)
+ msleep(10);
+ iowrite16(0, adapter->hw_addr + REG_GRR);
+ */
+- iowrite16(32, adapter->hw_addr + REG_SELECT_BANK);
+ iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
+ msleep(20);
+ }
+@@ -203,8 +266,10 @@ static void ks8842_reset(struct ks8842_adapter *adapter)
+ static void ks8842_update_link_status(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
+ {
++ u16 p1mbsr = ks8842_read16(adapter, 45, REG_P1MBSR);
++
+ /* check the status of the link */
+- if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) {
++ if (p1mbsr & 0x4) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ } else {
+@@ -241,10 +306,8 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
+ /* Enable QMU Transmit flow control / transmit padding / Transmit CRC */
+ ks8842_write16(adapter, 16, 0x000E, REG_TXCR);
+
+- /* enable the receiver, uni + multi + broadcast + flow ctrl
+- + crc strip */
+- ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400,
+- REG_RXCR);
++ /* enable the receiver, uni + multi + broadcast + crc strip */
++ ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80, REG_RXCR);
+
+ /* TX frame pointer autoincrement */
+ ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR);
+@@ -261,13 +324,11 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
+ /* enable no excessive collison drop */
+ ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
+
+- /* Enable port 1 force flow control / back pressure / transmit / recv */
+- ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2);
++ /* Enable port 1 / back pressure / transmit / recv */
++ ks8842_write16(adapter, 48, 0xE07, REG_P1CR2);
+
+ /* restart port auto-negotiation */
+ ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
+- /* only advertise 10Mbps */
+- ks8842_clear_bits(adapter, 49, 3 << 2, REG_P1CR4);
+
+ /* Enable the transmitter */
+ ks8842_enable_tx(adapter);
+@@ -279,7 +340,17 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
+ ks8842_write16(adapter, 18, 0xffff, REG_ISR);
+
+ /* enable interrupts */
+- ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter)) {
++ iowrite16(ENABLED_IRQS_IP, adapter->hw_addr + REG_TIMB_IER);
++ ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
++ } else {
++#endif
++ ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
++ iowrite16(ENABLED_IRQS, adapter->hw_addr + REG_TIMB_IER);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ }
++#endif
+
+ /* enable the switch */
+ ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
+@@ -302,11 +373,74 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
+ ks8842_write16(adapter, 39, mac, REG_MACAR3);
+ }
+
++static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
++{
++ unsigned long flags;
++ unsigned i;
++
++ spin_lock_irqsave(&adapter->lock, flags);
++ for (i = 0; i < ETH_ALEN; i++) {
++ ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
++ ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
++ REG_MACAR1 + i);
++ }
++ spin_unlock_irqrestore(&adapter->lock, flags);
++}
++
+ static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
+ {
+ return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
+ }
+
++#ifdef CONFIG_KS8842_TIMB_DMA
++static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
++{
++ struct ks8842_adapter *adapter = netdev_priv(netdev);
++ struct ks8842_dma_ctl *ctl = &adapter->dma_tx;
++ u8 *buf = ctl->buf;
++
++ if (ctl->adesc) {
++ dev_dbg(adapter->dev, "%s: TX ongoing\n", __func__);
++ /* transfer ongoing */
++ return NETDEV_TX_BUSY;
++ }
++
++ sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
++
++ /* copy data to the TX buffer */
++ /* the control word, enable IRQ, port 1 and the length */
++ *buf++ = 0x00;
++ *buf++ = 0x01; /* Port 1 */
++ *buf++ = skb->len & 0xff;
++ *buf++ = (skb->len >> 8) & 0xff;
++ skb_copy_from_linear_data(skb, buf, skb->len);
++
++ dma_sync_single_range_for_device(DMA_DEV(adapter),
++ sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
++ DMA_TO_DEVICE);
++
++ /* make sure the length is a multiple of 4 */
++ if (sg_dma_len(&ctl->sg) % 4)
++ sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
++
++ ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
++ &ctl->sg, 1, DMA_TO_DEVICE,
++ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
++ if (!ctl->adesc)
++ return NETDEV_TX_BUSY;
++
++ ctl->adesc->callback_param = netdev;
++ ctl->adesc->callback = ks8842_dma_tx_cb;
++ ctl->adesc->tx_submit(ctl->adesc);
++
++ netdev->stats.tx_bytes += skb->len;
++
++ dev_kfree_skb(skb);
++
++ return NETDEV_TX_OK;
++}
++#endif
++
+ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
+ {
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+@@ -314,7 +448,7 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
+ u32 *ptr = (u32 *)skb->data;
+ u32 ctrl;
+
+- dev_dbg(&adapter->pdev->dev,
++ dev_dbg(adapter->dev,
+ "%s: len %u head %p data %p tail %p end %p\n",
+ __func__, skb->len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb));
+@@ -344,6 +478,116 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
+ return NETDEV_TX_OK;
+ }
+
++#ifdef CONFIG_KS8842_TIMB_DMA
++static int __ks8842_start_new_rx_dma(struct net_device *netdev,
++ struct ks8842_adapter *adapter)
++{
++ struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
++ struct scatterlist *sg = &ctl->sg;
++ int err;
++
++ ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
++ if (ctl->skb) {
++ sg_init_table(sg, 1);
++ sg_dma_address(sg) = dma_map_single(DMA_DEV(adapter),
++ ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
++ err = dma_mapping_error(DMA_DEV(adapter), sg_dma_address(sg));
++ if (unlikely(err)) {
++ sg_dma_address(sg) = 0;
++ goto out;
++ }
++
++ sg_dma_len(sg) = DMA_BUFFER_SIZE;
++
++ ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
++ sg, 1, DMA_FROM_DEVICE,
++ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
++
++ if (!ctl->adesc)
++ goto out;
++
++ ctl->adesc->callback_param = netdev;
++ ctl->adesc->callback = ks8842_dma_rx_cb;
++ ctl->adesc->tx_submit(ctl->adesc);
++ } else {
++ err = -ENOMEM;
++ sg_dma_address(sg) = 0;
++ goto out;
++ }
++
++ return err;
++out:
++ if (sg_dma_address(sg))
++ dma_unmap_single(DMA_DEV(adapter), sg_dma_address(sg),
++ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
++ sg_dma_address(sg) = 0;
++ if (ctl->skb)
++ dev_kfree_skb(ctl->skb);
++
++ ctl->skb = NULL;
++
++ printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
++ return err;
++}
++
++static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
++{
++ struct net_device *netdev = (struct net_device *)arg;
++ struct ks8842_adapter *adapter = netdev_priv(netdev);
++ struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
++ struct sk_buff *skb = ctl->skb;
++ dma_addr_t addr = sg_dma_address(&ctl->sg);
++ u32 status;
++
++ ctl->adesc = NULL;
++
++ /* kick next transfer going */
++ __ks8842_start_new_rx_dma(netdev, adapter);
++
++ /* now handle the data we got */
++ dma_unmap_single(DMA_DEV(adapter), addr, DMA_BUFFER_SIZE,
++ DMA_FROM_DEVICE);
++
++ status = *((u32 *)skb->data);
++
++ dev_dbg(adapter->dev, "%s - rx_data: status: %x\n",
++ __func__, status & 0xffff);
++
++ /* check the status */
++ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
++ int len = (status >> 16) & 0x7ff;
++
++ dev_dbg(adapter->dev, "%s, got package, len: %d, skb: %p\n",
++ __func__, len, skb);
++
++ netdev->stats.rx_packets++;
++ netdev->stats.rx_bytes += len;
++ if (status & RXSR_MULTICAST)
++ netdev->stats.multicast++;
++
++ /* we are not nice to the stack, we want to be nice
++ * to our DMA engine instead, reserve 4 bytes
++ * which is the status word
++ */
++ skb_reserve(skb, 4);
++ skb_put(skb, len);
++
++ skb->protocol = eth_type_trans(skb, netdev);
++ netif_rx(skb);
++ } else {
++ dev_dbg(adapter->dev, "RX error, status: %x\n", status);
++ netdev->stats.rx_errors++;
++ if (status & RXSR_TOO_LONG)
++ netdev->stats.rx_length_errors++;
++ if (status & RXSR_CRC_ERROR)
++ netdev->stats.rx_crc_errors++;
++ if (status & RXSR_RUNT)
++ netdev->stats.rx_frame_errors++;
++ dev_kfree_skb(skb);
++ }
++}
++#endif
++
+ static void ks8842_rx_frame(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
+ {
+@@ -352,14 +596,14 @@ static void ks8842_rx_frame(struct net_device *netdev,
+
+ status &= 0xffff;
+
+- dev_dbg(&adapter->pdev->dev, "%s - rx_data: status: %x\n",
++ dev_dbg(adapter->dev, "%s - rx_data: status: %x\n",
+ __func__, status);
+
+ /* check the status */
+ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
+ struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
+
+- dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
++ dev_dbg(adapter->dev, "%s, got package, len: %d\n",
+ __func__, len);
+ if (skb) {
+ u32 *data;
+@@ -383,7 +627,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
+ } else
+ netdev->stats.rx_dropped++;
+ } else {
+- dev_dbg(&adapter->pdev->dev, "RX error, status: %x\n", status);
++ dev_dbg(adapter->dev, "RX error, status: %x\n", status);
+ netdev->stats.rx_errors++;
+ if (status & RXSR_TOO_LONG)
+ netdev->stats.rx_length_errors++;
+@@ -406,7 +650,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
+ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
+ {
+ u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
+- dev_dbg(&adapter->pdev->dev, "%s Entry - rx_data: %d\n",
++ dev_dbg(adapter->dev, "%s Entry - rx_data: %d\n",
+ __func__, rx_data);
+ while (rx_data) {
+ ks8842_rx_frame(netdev, adapter);
+@@ -417,7 +661,7 @@ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
+ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
+ {
+ u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
+- dev_dbg(&adapter->pdev->dev, "%s - entry, sr: %x\n", __func__, sr);
++ dev_dbg(adapter->dev, "%s - entry, sr: %x\n", __func__, sr);
+ netdev->stats.tx_packets++;
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+@@ -426,7 +670,7 @@ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
+ void ks8842_handle_rx_overrun(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
+ {
+- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
++ dev_dbg(adapter->dev, "%s: entry\n", __func__);
+ netdev->stats.rx_errors++;
+ netdev->stats.rx_fifo_errors++;
+ }
+@@ -445,20 +689,33 @@ void ks8842_tasklet(unsigned long arg)
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ isr = ks8842_read16(adapter, 18, REG_ISR);
+- dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
++ dev_dbg(adapter->dev, "%s - ISR: 0x%x\n", __func__, isr);
++
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter))
++ isr &= ~IRQ_RX;
++#endif
+
+ /* Ack */
+ ks8842_write16(adapter, 18, isr, REG_ISR);
+
++ /* Ack in the timberdale IP as well */
++ iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
++
+ if (!netif_running(netdev))
+ return;
+
+ if (isr & IRQ_LINK_CHANGE)
+ ks8842_update_link_status(netdev, adapter);
+
++ /* should not get IRQ_RX when in DMA mode */
+ if (isr & (IRQ_RX | IRQ_RX_ERROR))
+- ks8842_handle_rx(netdev, adapter);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (!KS8842_USE_DMA(adapter))
++#endif
++ ks8842_handle_rx(netdev, adapter);
+
++ /* should only happen when not doing DMA */
+ if (isr & IRQ_TX)
+ ks8842_handle_tx(netdev, adapter);
+
+@@ -477,8 +734,18 @@ void ks8842_tasklet(unsigned long arg)
+
+ /* re-enable interrupts, put back the bank selection register */
+ spin_lock_irqsave(&adapter->lock, flags);
+- ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter))
++ ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
++ else
++#endif
++ ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
++
+ iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ /* resume DMA operations */
++ ks8842_resume_dma(adapter);
++#endif
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ }
+
+@@ -490,11 +757,17 @@ static irqreturn_t ks8842_irq(int irq, void *devid)
+ irqreturn_t ret = IRQ_NONE;
+
+ isr = ks8842_read16(adapter, 18, REG_ISR);
+- dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
++ dev_dbg(adapter->dev, "%s - ISR: 0x%x\n", __func__, isr);
+
+ if (isr) {
+- /* disable IRQ */
+- ks8842_write16(adapter, 18, 0x00, REG_IER);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter))
++ /* disable all but RX IRQ, since the FPGA relies on it*/
++ ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
++ else
++#endif
++ /* disable IRQ */
++ ks8842_write16(adapter, 18, 0x00, REG_IER);
+
+ /* schedule tasklet */
+ tasklet_schedule(&adapter->tasklet);
+@@ -503,23 +776,159 @@ static irqreturn_t ks8842_irq(int irq, void *devid)
+ }
+
+ iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
+-
++#ifdef CONFIG_KS8842_TIMB_DMA
++ ks8842_resume_dma(adapter);
++#endif
+ return ret;
+ }
+
++#ifdef CONFIG_KS8842_TIMB_DMA
++static void ks8842_dma_rx_cb(void *data)
++{
++ struct net_device *netdev = data;
++ struct ks8842_adapter *adapter = netdev_priv(netdev);
++
++ dev_dbg(adapter->dev, "RX DMA finished\n");
++ /* schedule tasklet */
++ if (adapter->dma_rx.adesc)
++ tasklet_schedule(&adapter->dma_rx.tasklet);
++}
++
++static void ks8842_dma_tx_cb(void *data)
++{
++ struct net_device *netdev = data;
++ struct ks8842_adapter *adapter = netdev_priv(netdev);
++ struct ks8842_dma_ctl *ctl = &adapter->dma_tx;
++
++ dev_dbg(adapter->dev, "TX DMA finished\n");
++
++ if (!ctl->adesc)
++ return;
++
++ netdev->stats.tx_packets++;
++ ctl->adesc = NULL;
++
++ if (netif_queue_stopped(netdev))
++ netif_wake_queue(netdev);
++}
++
++static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
++{
++ struct ks8842_dma_ctl *tx_ctl = &adapter->dma_tx;
++ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
++
++ tx_ctl->adesc = NULL;
++ if (tx_ctl->chan) {
++ tx_ctl->chan->device->device_terminate_all(tx_ctl->chan);
++ dma_release_channel(tx_ctl->chan);
++ }
++ tx_ctl->chan = NULL;
++
++ rx_ctl->adesc = NULL;
++ if (rx_ctl->chan) {
++ rx_ctl->chan->device->device_terminate_all(rx_ctl->chan);
++ dma_release_channel(rx_ctl->chan);
++ }
++ rx_ctl->chan = NULL;
++
++ tasklet_kill(&rx_ctl->tasklet);
++
++ if (sg_dma_address(&rx_ctl->sg))
++ dma_unmap_single(DMA_DEV(adapter), sg_dma_address(&rx_ctl->sg),
++ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
++ sg_dma_address(&rx_ctl->sg) = 0;
++
++ if (sg_dma_address(&tx_ctl->sg))
++ dma_unmap_single(DMA_DEV(adapter), sg_dma_address(&tx_ctl->sg),
++ DMA_BUFFER_SIZE, DMA_TO_DEVICE);
++ sg_dma_address(&tx_ctl->sg) = 0;
++
++ dev_kfree_skb(rx_ctl->skb);
++ rx_ctl->skb = NULL;
++ kfree(tx_ctl->buf);
++ tx_ctl->buf = NULL;
++}
++#endif
++
++
++#ifdef CONFIG_KS8842_TIMB_DMA
++static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
++{
++ return chan->chan_id == (int)filter_param;
++}
++#endif
+
+ /* Netdevice operations */
+
+ static int ks8842_open(struct net_device *netdev)
+ {
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ struct ks8842_dma_ctl *tx_ctl = &adapter->dma_tx;
++ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
++ bool use_dma = false;
++#endif
+ int err;
+
+- dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
++ dev_dbg(adapter->dev, "%s - entry\n", __func__);
++
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter)) {
++ dma_cap_mask_t mask;
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_SLAVE, mask);
++
++ sg_init_table(&tx_ctl->sg, 1);
++
++ tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
++ (void *)tx_ctl->channel);
++ if (!tx_ctl->chan)
++ goto no_dma;
++
++ /* allocate DMA buffer */
++ tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
++ if (!tx_ctl->buf)
++ goto no_dma;
++ sg_dma_address(&tx_ctl->sg) = dma_map_single(DMA_DEV(adapter),
++ tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
++ err = dma_mapping_error(DMA_DEV(adapter),
++ sg_dma_address(&tx_ctl->sg));
++ if (err) {
++ sg_dma_address(&tx_ctl->sg) = 0;
++ goto no_dma;
++ }
++
++ rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
++ (void *)rx_ctl->channel);
++ if (!rx_ctl->chan)
++ goto no_dma;
++
++ tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
++ (unsigned long)netdev);
++
++ /* start RX dma */
++ err = __ks8842_start_new_rx_dma(netdev, adapter);
++ if (err)
++ goto no_dma;
++
++ use_dma = true;
++ }
++no_dma:
++ if (!use_dma) {
++ printk(KERN_WARNING DRV_NAME
++ ": Failed to initiate DMA, falling back to PIO\n");
++ ks8842_dealloc_dma_bufs(adapter);
++ adapter->dma_rx.channel = -1;
++ adapter->dma_tx.channel = -1;
++ }
++#endif
+
+ /* reset the HW */
+ ks8842_reset_hw(adapter);
+
++ ks8842_write_mac_addr(adapter, netdev->dev_addr);
++
+ ks8842_update_link_status(netdev, adapter);
+
+ err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
+@@ -533,11 +942,19 @@ static int ks8842_open(struct net_device *netdev)
+ return 0;
+ }
+
++
+ static int ks8842_close(struct net_device *netdev)
+ {
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+
+- dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
++ dev_dbg(adapter->dev, "%s - entry\n", __func__);
++
++ cancel_work_sync(&adapter->timeout_work);
++
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter))
++ ks8842_dealloc_dma_bufs(adapter);
++#endif
+
+ /* free the irq */
+ free_irq(adapter->irq, adapter);
+@@ -554,8 +971,20 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
+ int ret;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+
+- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
+-
++ dev_dbg(adapter->dev, "%s: entry\n", __func__);
++
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter)) {
++ unsigned long flags;
++ ret = ks8842_tx_frame_dma(skb, netdev);
++ /* for now only allow one transfer at the time */
++ spin_lock_irqsave(&adapter->lock, flags);
++ if (adapter->dma_tx.adesc)
++ netif_stop_queue(netdev);
++ spin_unlock_irqrestore(&adapter->lock, flags);
++ return ret;
++ }
++#endif
+ ret = ks8842_tx_frame(skb, netdev);
+
+ if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8)
+@@ -567,44 +996,77 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
+ static int ks8842_set_mac(struct net_device *netdev, void *p)
+ {
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+- unsigned long flags;
+ struct sockaddr *addr = p;
+ char *mac = (u8 *)addr->sa_data;
+- int i;
+
+- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
++ dev_dbg(adapter->dev, "%s: entry\n", __func__);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, mac, netdev->addr_len);
+
+- spin_lock_irqsave(&adapter->lock, flags);
+- for (i = 0; i < ETH_ALEN; i++) {
+- ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
+- ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
+- REG_MACAR1 + i);
+- }
+- spin_unlock_irqrestore(&adapter->lock, flags);
++ ks8842_write_mac_addr(adapter, mac);
+ return 0;
+ }
+
+-static void ks8842_tx_timeout(struct net_device *netdev)
++static void ks8842_tx_timeout_work(struct work_struct *work)
+ {
+- struct ks8842_adapter *adapter = netdev_priv(netdev);
++ struct ks8842_adapter *adapter =
++ container_of(work, struct ks8842_adapter, timeout_work);
++ struct net_device *netdev = adapter->netdev;
+ unsigned long flags;
+
+- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
++ dev_dbg(adapter->dev, "%s: entry\n", __func__);
+
+ spin_lock_irqsave(&adapter->lock, flags);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter)) {
++ struct ks8842_dma_ctl *tx_ctl = &adapter->dma_tx;
++ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
++
++ tx_ctl->adesc = NULL;
++ tx_ctl->chan->device->device_terminate_all(tx_ctl->chan);
++
++ rx_ctl->adesc = NULL;
++ rx_ctl->chan->device->device_terminate_all(rx_ctl->chan);
++
++ dma_unmap_single(DMA_DEV(adapter), sg_dma_address(&rx_ctl->sg),
++ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
++ sg_dma_address(&rx_ctl->sg) = 0;
++
++ dev_kfree_skb(rx_ctl->skb);
++ rx_ctl->skb = NULL;
++ }
++#endif
++
+ /* disable interrupts */
+ ks8842_write16(adapter, 18, 0, REG_IER);
+ ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
++
++ netif_stop_queue(netdev);
++
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ ks8842_reset_hw(adapter);
+
++ ks8842_write_mac_addr(adapter, netdev->dev_addr);
++
+ ks8842_update_link_status(netdev, adapter);
++
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter))
++ __ks8842_start_new_rx_dma(netdev, adapter);
++#endif
++}
++
++static void ks8842_tx_timeout(struct net_device *netdev)
++{
++ struct ks8842_adapter *adapter = netdev_priv(netdev);
++
++ dev_dbg(adapter->dev, "%s: entry\n", __func__);
++
++ schedule_work(&adapter->timeout_work);
+ }
+
+ static const struct net_device_ops ks8842_netdev_ops = {
+@@ -626,7 +1088,11 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
+ struct resource *iomem;
+ struct net_device *netdev;
+ struct ks8842_adapter *adapter;
++#ifdef CONFIG_KS8842_TIMB_DMA
++ struct ks8842_platform_data *pdata = pdev->dev.platform_data;
++#endif
+ u16 id;
++ unsigned i;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
+@@ -639,6 +1105,8 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
++ adapter->netdev = netdev;
++ INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work);
+ adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
+ if (!adapter->hw_addr)
+ goto err_ioremap;
+@@ -649,15 +1117,37 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
+ goto err_get_irq;
+ }
+
+- adapter->pdev = pdev;
+-
++ adapter->dev = &pdev->dev;
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (pdata && (pdata->tx_dma_channel != -1) &&
++ (pdata->rx_dma_channel != -1)) {
++ adapter->dma_rx.channel = pdata->rx_dma_channel;
++ adapter->dma_tx.channel = pdata->tx_dma_channel;
++ } else {
++ adapter->dma_rx.channel = -1;
++ adapter->dma_tx.channel = -1;
++ }
++#endif
+ tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
+ spin_lock_init(&adapter->lock);
+
+ netdev->netdev_ops = &ks8842_netdev_ops;
+ netdev->ethtool_ops = &ks8842_ethtool_ops;
+
+- ks8842_read_mac_addr(adapter, netdev->dev_addr);
++ /* Check if a mac address was given */
++ for (i = 0; i < netdev->addr_len; i++)
++ if (macaddr[i] != 0)
++ break;
++
++ if (i < netdev->addr_len)
++ /* an address was passed, use it */
++ memcpy(netdev->dev_addr, macaddr, netdev->addr_len);
++ else {
++ ks8842_read_mac_addr(adapter, netdev->dev_addr);
++
++ if (!is_valid_ether_addr(netdev->dev_addr))
++ random_ether_addr(netdev->dev_addr);
++ }
+
+ id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
+
+@@ -723,6 +1213,10 @@ static void __exit ks8842_exit(void)
+ module_init(ks8842_init);
+ module_exit(ks8842_exit);
+
++/* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
++module_param_array(macaddr, byte, NULL, 0);
++MODULE_PARM_DESC(macaddr, "KS8842 MAC address to set");
++
+ MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
+ MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/net/most/Kconfig b/drivers/net/most/Kconfig
+new file mode 100644
+index 0000000..fbf1214
+--- /dev/null
++++ b/drivers/net/most/Kconfig
+@@ -0,0 +1,14 @@
++menu "MOST Device Drivers"
++ depends on MOST
++
++config MOST_TIMB_MLB
++ tristate "The timberdale MOST block"
++ depends on MOST
++ depends on GENERIC_GPIO
++ depends on HAS_IOMEM
++ select TIMB_DMA
++ default N
++ ---help---
++ Adds support for MOST on the timberdale FPGA.
++
++endmenu
+diff --git a/drivers/net/most/Makefile b/drivers/net/most/Makefile
+new file mode 100644
+index 0000000..5879279
+--- /dev/null
++++ b/drivers/net/most/Makefile
+@@ -0,0 +1,6 @@
++#
++# Makefile for the Linux Media Oriented Systems Transport drivers.
++#
++
++obj-$(CONFIG_MOST_TIMB_MLB) += timbmlb.o
++
+diff --git a/drivers/net/most/timbmlb.c b/drivers/net/most/timbmlb.c
+new file mode 100644
+index 0000000..f23e52a
+--- /dev/null
++++ b/drivers/net/most/timbmlb.c
+@@ -0,0 +1,1171 @@
++/*
++ * timbmlb.c Driver for the timberdale MLB block
++ * Copyright (c) 2009-2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/dmaengine.h>
++#include <linux/spinlock.h>
++#include <net/most/most_core.h>
++#include <linux/gpio.h>
++#include <linux/most/timbmlb.h>
++
++#define DRIVER_NAME "timb-most"
++
++#define MLB_REG_CFG 0x00
++#define MLB_REG_CH_CTRL 0x04
++#define MLB_REG_ISR 0x08
++#define MLB_REG_IMR 0x0C
++#define MLB_REG_CH_CFG_1 0x10
++#define MLB_REG_CH_CFG_2 0x14
++#define MLB_REG_CH_CFG_3 0x18
++#define MLB_REG_CH_CFG_4 0x1C
++#define MLB_REG_CH_CFG_5 0x20
++#define MLB_REG_CH_CFG_6 0x24
++#define MLB_REG_CH_CFG_7 0x28
++#define MLB_REG_CTRL_RX 0x2C /* 8 bits */
++#define MLB_REG_CTRL_TX MLB_REG_CTRL_RX
++#define MLB_REG_ASYNC_RX 0x30 /* 32 bits */
++#define MLB_REG_ASYNC_TX MLB_REG_ASYNC_RX
++#define MLB_REG_SYNC_RX 0x34 /* 32 bits */
++#define MLB_REG_SYNC_TX MLB_REG_SYNC_RX
++#define MLB_REG_FIFO_RST 0x38
++
++#define MLB_WR_CFG_CTRL_RX_EMPTY 0x20000
++#define MLB_WR_CFG_ASYNC_RX_EMPTY 0x10000
++#define MLB_CFG_SYNC_TX_EN 0x00200
++#define MLB_CFG_SYNC_RX_EN 0x00100
++#define MLB_CFG_ASYNC_RX_EN 0x00080
++#define MLB_CFG_CTRL_RX_EN 0x00040
++
++#define MLB_CH_CTRL_ASYNC_TX_START 0x8000
++#define MLB_CH_CTRL_ASYNC_RX_BREAK 0x4000
++#define MLB_CH_CTRL_CTRL_TX_START 0x0800
++#define MLB_CH_CTRL_CTRL_RX_BREAK 0x0400
++
++#define MLB_WR_I_SYNC_RX_EMPTY 0x80000
++#define MLB_WR_I_SYNC_RX_ALMOST_FULL 0x40000
++#define MLB_WR_I_SYNC_TX_FULL 0x20000
++#define MLB_WR_I_SYNC_TX_ALMOST_EMPTY 0x10000
++#define MLB_I_ASYNC_TX_READY 0x08000
++#define MLB_I_ASYNC_TX_PROT_ERR 0x04000
++#define MLB_I_ASYNC_TX_RX_BREAK 0x02000
++#define MLB_I_ASYNC_TX_BUSY_BREAK 0x01000
++#define MLB_I_ASYNC_RX_READY 0x00800
++#define MLB_I_ASYNC_RX_PROT_ERR 0x00400
++#define MLB_I_ASYNC_RX_CMD_BREAK 0x00200
++#define MLB_I_SYNC_LOCK 0x00100
++#define MLB_I_CTRL_TX_READY 0x00080
++#define MLB_I_CTRL_TX_PROT_ERR 0x00040
++#define MLB_I_CTRL_TX_RX_BREAK 0x00020
++#define MLB_I_CTRL_TX_BUSY_BREAK 0x00010
++#define MLB_I_CTRL_RX_READY 0x00008
++#define MLB_I_CTRL_RX_PROT_ERR 0x00004
++#define MLB_I_CTRL_RX_CMD_BREAK 0x00002
++#define MLB_I_SYNC_RX_PROT_ERR 0x00001
++
++#define MLB_CH_CFG_NOT_ALLOCATED 0x0000
++#define MLB_CH_CFG_SYNC_TX 0x0001
++#define MLB_CH_CFG_SYNC_RX 0x0002
++#define MLB_CH_CFG_ASYNC_TX 0x0003
++#define MLB_CH_CFG_ASYNC_RX 0x0004
++#define MLB_CH_CFG_CTRL_TX 0x0005
++#define MLB_CH_CFG_CTRL_RX 0x0006
++
++#define MLB_FIFO_RST_CTRL_TX 0x010000
++#define MLB_FIFO_RST_CTRL_RX 0x020000
++#define MLB_FIFO_RST_ASYNC_TX 0x040000
++#define MLB_FIFO_RST_ASYNC_RX 0x080000
++#define MLB_FIFO_RST_SYNC_TX 0x100000
++#define MLB_FIFO_RST_SYNC_RX 0x200000
++#define MLB_FIFO_RST_MLB 0x400000
++#define MLB_FIFO_RST_ALL (MLB_FIFO_RST_CTRL_TX | \
++ MLB_FIFO_RST_CTRL_RX | \
++ MLB_FIFO_RST_ASYNC_TX | \
++ MLB_FIFO_RST_ASYNC_RX | \
++ MLB_FIFO_RST_SYNC_TX | \
++ MLB_FIFO_RST_SYNC_RX | \
++ MLB_FIFO_RST_MLB)
++
++#define ASYNC_SKB_SIZE 1024
++#define SYNC_SKB_SIZE 32
++
++#define SYNC_MAX_DMA_SIZE 4096
++
++#define RX_CHAN 0
++#define TX_CHAN 1
++#define CHANNELS 2
++
++#define SYNC_STATE_DOWN 0
++#define SYNC_STATE_UP 1
++
++#define DMA_DEV(s) ((s->mdev->parent->parent) ? \
++ s->mdev->parent->parent : s->mdev->parent)
++
++struct timbmost {
++ void __iomem *membase;
++ struct most_dev *mdev;
++ int irq;
++ int reset_pin;
++ spinlock_t lock; /* mutual exclusion */
++
++ /* one queue per channel (type) */
++ struct sk_buff_head ctl_q;
++ struct sk_buff_head async_q;
++ struct sk_buff_head sync_q;
++
++ /* The SKB currently written/read into by the DMA engine
++ * only used for the synchronous channel
++ */
++ struct sk_buff *sync_read_skb;
++ dma_addr_t sync_read_handle;
++ struct scatterlist sync_rx_sg;
++ struct sk_buff *sync_write_skb;
++ int sync_write_next_map;
++
++ u8 sync_rx_state;
++ u8 sync_tx_state;
++ int sync_tx_chan_id;
++ int sync_rx_chan_id;
++ struct dma_chan *sync_tx_chan;
++ struct dma_chan *sync_rx_chan;
++ dma_cookie_t sync_tx_cookie;
++ dma_cookie_t sync_rx_cookie;
++ struct tasklet_struct sync_tx_tasklet;
++ struct tasklet_struct sync_rx_tasklet;
++
++ /* channel numbers */
++ u8 ctl_channels[CHANNELS];
++ u8 sync_channels[CHANNELS];
++ u8 async_channels[CHANNELS];
++};
++
++static void timbmost_ctl_write_wake(struct timbmost *self);
++static void timbmost_async_write_wake(struct timbmost *self);
++
++static int skb_dma_map(struct device *dev, struct sk_buff *skb,
++ enum dma_data_direction dir)
++{
++ struct skb_shared_info *sp = skb_shinfo(skb);
++ dma_addr_t map;
++ int i;
++
++ map = dma_map_single(dev, skb->data, skb_headlen(skb), dir);
++ if (dma_mapping_error(dev, map))
++ goto out_err;
++
++ sp->dma_head = map;
++ for (i = 0; i < sp->nr_frags; i++) {
++ skb_frag_t *fp = &sp->frags[i];
++
++ map = dma_map_page(dev, fp->page, fp->page_offset,
++ fp->size, dir);
++ if (dma_mapping_error(dev, map))
++ goto unwind;
++ sp->dma_maps[i] = map;
++ }
++
++ return 0;
++
++unwind:
++ while (--i >= 0) {
++ skb_frag_t *fp = &sp->frags[i];
++
++ dma_unmap_page(dev, sp->dma_maps[i], fp->size, dir);
++ }
++ dma_unmap_single(dev, sp->dma_head, skb_headlen(skb), dir);
++out_err:
++ return -ENOMEM;
++}
++
++static void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
++ enum dma_data_direction dir)
++{
++ struct skb_shared_info *sp = skb_shinfo(skb);
++ int i;
++
++ dma_unmap_single(dev, sp->dma_head, skb_headlen(skb), dir);
++ for (i = 0; i < sp->nr_frags; i++) {
++ skb_frag_t *fp = &sp->frags[i];
++
++ dma_unmap_page(dev, sp->dma_maps[i], fp->size, dir);
++ }
++}
++
++static void __timbmost_dump_regs(struct timbmost *self, const char *caption)
++{
++ dev_dbg(self->mdev->parent, "%s\nMLB_CFG:\t%x\tCH_CTRL:\t%x\n",
++ caption,
++ ioread32(self->membase + MLB_REG_CFG),
++ ioread32(self->membase + MLB_REG_CH_CTRL));
++
++ dev_dbg(self->mdev->parent, "ISTAT:\t%x\tIMASK:\t%x\n",
++ ioread32(self->membase + MLB_REG_ISR),
++ ioread32(self->membase + MLB_REG_IMR));
++
++ dev_dbg(self->mdev->parent, "CH_CFG1:\t%x\tCH_CFG2:\t%x\n",
++ ioread32(self->membase + MLB_REG_CH_CFG_1),
++ ioread32(self->membase + MLB_REG_CH_CFG_2));
++
++ dev_dbg(self->mdev->parent, "CH_CFG3:\t%x\tCH_CFG4:\t%x\n",
++ ioread32(self->membase + MLB_REG_CH_CFG_3),
++ ioread32(self->membase + MLB_REG_CH_CFG_4));
++
++ dev_dbg(self->mdev->parent, "CH_CFG5:\t%x\tCH_CFG6:\t%x\n",
++ ioread32(self->membase + MLB_REG_CH_CFG_5),
++ ioread32(self->membase + MLB_REG_CH_CFG_6));
++
++ dev_dbg(self->mdev->parent, "CH_CFG7:\t%x\n",
++ ioread32(self->membase + MLB_REG_CH_CFG_7));
++}
++
++static void __timbmost_hw_reset(struct timbmost *self)
++{
++ /* disable all interrupts */
++ iowrite32(0, self->membase + MLB_REG_IMR);
++ iowrite32(0, self->membase + MLB_REG_ISR);
++
++ /* disable RX and TX */
++ iowrite32(0, self->membase + MLB_REG_CFG);
++ iowrite32(0, self->membase + MLB_REG_CH_CTRL);
++
++ /* make sure the channels are not allocated */
++ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_1);
++ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_2);
++ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_3);
++ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_4);
++ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_5);
++ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_6);
++
++ /* reset */
++ iowrite32(MLB_FIFO_RST_ALL, self->membase + MLB_REG_FIFO_RST);
++
++ /* reset the INIC */
++ gpio_direction_output(self->reset_pin, 0);
++ msleep(10);
++ gpio_set_value(self->reset_pin, 1);
++}
++
++/* function called in interrupt context by the DMA engine when transfer finishes
++ */
++static void timbmost_dma_tx_cb(void *data)
++{
++ struct timbmost *self = data;
++
++ tasklet_schedule(&self->sync_tx_tasklet);
++}
++
++static void timbmost_dma_rx_cb(void *data)
++{
++ struct timbmost *self = data;
++
++ tasklet_schedule(&self->sync_rx_tasklet);
++}
++
++static void __timbmost_ctl_rx(struct timbmost *self)
++{
++ u32 cfg;
++ do {
++ struct sk_buff *skb =
++ most_skb_alloc(CTL_FRAME_SIZE, GFP_ATOMIC);
++ if (!skb)
++ return;
++
++ do {
++ u32 word = ioread32(self->membase + MLB_REG_CTRL_RX);
++ int i;
++
++ for (i = 0; i < 4; i++)
++ *skb_put(skb, 1) = (word >> (i * 8)) & 0xff;
++
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ } while ((skb->len < CTL_FRAME_SIZE) &&
++ !(cfg & MLB_WR_CFG_CTRL_RX_EMPTY));
++
++ /* deliver SKB upstreams */
++ skb->dev = (void *)self->mdev;
++ most_cb(skb)->channel_type = CHAN_CTL;
++ /* only one channel is supported... */
++ most_cb(skb)->channel = self->ctl_channels[RX_CHAN];
++
++ most_recv_frame(skb);
++ } while (!(cfg & MLB_WR_CFG_CTRL_RX_EMPTY));
++}
++
++static void __timbmost_async_rx(struct timbmost *self)
++{
++ /* TODO: The FIFO is 32bit not 8bit */
++ u32 cfg;
++
++ __timbmost_dump_regs(self, "Before read");
++
++ do {
++ struct sk_buff *skb =
++ most_skb_alloc(ASYNC_SKB_SIZE, GFP_ATOMIC);
++ if (!skb)
++ return;
++
++ do {
++ *skb_put(skb, 1) =
++ ioread32(self->membase + MLB_REG_ASYNC_RX);
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ } while ((skb->len < ASYNC_SKB_SIZE) &&
++ !(cfg & MLB_WR_CFG_ASYNC_RX_EMPTY));
++
++ /* deliver SKB upstreams */
++ skb->dev = (void *)self->mdev;
++ most_cb(skb)->channel_type = CHAN_ASYNC;
++ /* only one channel is supported... */
++ most_cb(skb)->channel = self->async_channels[RX_CHAN];
++
++ most_recv_frame(skb);
++ } while (!(cfg & MLB_WR_CFG_ASYNC_RX_EMPTY));
++}
++
++static void __timbmost_sync_read_wake(struct timbmost *self)
++{
++ struct sk_buff *skb = self->sync_read_skb;
++ struct dma_async_tx_descriptor *desc;
++ struct scatterlist *sg = &self->sync_rx_sg;
++
++ if (skb)
++ return;
++
++ skb = most_skb_alloc(SYNC_SKB_SIZE, GFP_ATOMIC);
++ if (!skb)
++ return;
++
++ sg_init_table(sg, 1);
++ sg_dma_len(sg) = SYNC_SKB_SIZE;
++
++ /* send next fragment */
++ sg_dma_address(sg) = dma_map_single(DMA_DEV(self), skb->data,
++ SYNC_SKB_SIZE, DMA_FROM_DEVICE);
++ if (dma_mapping_error(DMA_DEV(self), sg_dma_address(sg)))
++ goto map_failed;
++
++ desc = self->sync_rx_chan->device->device_prep_slave_sg(
++ self->sync_rx_chan, sg, 1, DMA_FROM_DEVICE,
++ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
++
++ if (desc) {
++ self->sync_read_skb = skb;
++ desc->callback_param = self;
++ desc->callback = timbmost_dma_rx_cb;
++ self->sync_rx_cookie = desc->tx_submit(desc);
++
++ return;
++ }
++
++ dma_unmap_single(DMA_DEV(self), sg_dma_address(sg), SYNC_SKB_SIZE,
++ DMA_FROM_DEVICE);
++map_failed:
++ dev_kfree_skb(skb);
++}
++
++static void timbmost_sync_rx_tasklet(unsigned long arg)
++{
++ struct timbmost *self = (struct timbmost *)arg;
++ struct sk_buff *skb = self->sync_read_skb;
++
++ BUG_ON(!skb);
++
++ /* unmap DMA */
++ dma_unmap_single(DMA_DEV(self), self->sync_read_handle, SYNC_SKB_SIZE,
++ DMA_FROM_DEVICE);
++
++ if (self->sync_rx_state == SYNC_STATE_DOWN) {
++ dev_kfree_skb(skb);
++ self->sync_read_skb = NULL;
++ return;
++ }
++
++ /* set the length */
++ skb_put(skb, SYNC_SKB_SIZE);
++ /* send the SKB upwards */
++ skb->dev = (void *)self->mdev;
++ most_cb(skb)->channel_type = CHAN_SYNC;
++ /* only one channel is supported... */
++ most_cb(skb)->channel = self->sync_channels[RX_CHAN];
++ most_recv_frame(skb);
++ self->sync_read_skb = NULL;
++
++ __timbmost_sync_read_wake(self);
++}
++
++static void __timbmost_sync_write_wake(struct timbmost *self)
++{
++ struct sk_buff *skb = self->sync_write_skb;
++ struct dma_async_tx_descriptor *desc;
++ struct scatterlist sg;
++
++ dev_dbg(self->mdev->parent, "%s entry\n", __func__);
++
++ if (!skb) {
++ /* check for next SKB */
++ skb = skb_dequeue(&self->sync_q);
++ if (!skb)
++ return;
++
++ if (skb_dma_map(DMA_DEV(self), skb, DMA_TO_DEVICE)) {
++ /* failed to dma map? */
++ dev_kfree_skb(skb);
++ return;
++ }
++ /* next dma map to write is the first ... */
++ self->sync_write_next_map = -1;
++ self->sync_write_skb = skb;
++ dev_dbg(self->mdev->parent, "%s: New skb: fragments: %d\n",
++ __func__, skb_shinfo(skb)->nr_frags);
++ }
++
++ sg_init_table(&sg, 1);
++
++ /* send next fragment */
++ if (self->sync_write_next_map < 0) {
++ sg_dma_len(&sg) = skb_headlen(skb);
++ sg_dma_address(&sg) = skb_shinfo(skb)->dma_head;
++ } else {
++ sg_dma_len(&sg) =
++ skb_shinfo(skb)->frags[self->sync_write_next_map].size;
++ sg_dma_address(&sg) =
++ skb_shinfo(skb)->dma_maps[self->sync_write_next_map];
++ }
++ self->sync_write_next_map++;
++ dev_dbg(self->mdev->parent, "%s: Will send %x, len: %d\n",
++ __func__, (uint32_t)sg_dma_address(&sg), sg_dma_len(&sg));
++
++ desc = self->sync_tx_chan->device->device_prep_slave_sg(
++ self->sync_tx_chan, &sg, 1, DMA_TO_DEVICE,
++ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
++
++ desc->callback_param = self;
++ desc->callback = timbmost_dma_tx_cb;
++ self->sync_tx_cookie = desc->tx_submit(desc);
++}
++
++static void timbmost_sync_tx_tasklet(unsigned long arg)
++{
++ struct timbmost *self = (struct timbmost *)arg;
++ struct sk_buff *skb = self->sync_write_skb;
++
++ /* TX done, free current SKB, and check for next */
++ BUG_ON(!skb);
++
++ /* check if this was the last DMA map */
++ if (self->sync_tx_state == SYNC_STATE_DOWN ||
++ self->sync_write_next_map >= skb_shinfo(skb)->nr_frags) {
++
++ /* it was the last... */
++ skb_dma_unmap(DMA_DEV(self), skb, DMA_TO_DEVICE);
++ dev_kfree_skb(skb);
++ self->sync_write_skb = NULL;
++ }
++
++ if (self->sync_tx_state != SYNC_STATE_DOWN)
++ __timbmost_sync_write_wake(self);
++}
++
++static void timbmost_sync_start_write(struct timbmost *self)
++{
++ unsigned long flags;
++ struct sk_buff *skb;
++
++ spin_lock_irqsave(&self->lock, flags);
++ skb = self->sync_write_skb;
++ spin_unlock_irqrestore(&self->lock, flags);
++
++ /* transfer is ongoing */
++ if (skb)
++ return;
++
++ __timbmost_sync_write_wake(self);
++}
++
++static irqreturn_t timbmost_irq(int irq, void *devid)
++{
++ struct timbmost *self = (struct timbmost *)devid;
++ u32 isr, imr;
++
++ isr = ioread32(self->membase + MLB_REG_ISR);
++ imr = ioread32(self->membase + MLB_REG_IMR);
++
++ dev_dbg(self->mdev->parent, "%s: entry, isr: %x, imr: %x\n", __func__,
++ isr, imr);
++
++ /* mask out only enabled interrupts */
++ isr &= imr;
++
++ /* ack */
++ iowrite32(isr, self->membase + MLB_REG_ISR);
++
++ if (isr & MLB_I_ASYNC_TX_READY) {
++ /* disable TX interrupts */
++ imr &= ~(MLB_I_ASYNC_TX_READY | MLB_I_ASYNC_TX_PROT_ERR);
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++ /* schedule to send next package */
++ timbmost_async_write_wake(self);
++ }
++
++ if (isr & MLB_I_ASYNC_RX_READY)
++ /* pass data upstreams */
++ __timbmost_async_rx(self);
++
++ if (isr & MLB_I_CTRL_TX_READY) {
++ /* disable TX interrupts */
++ imr &= ~(MLB_I_CTRL_TX_READY | MLB_I_CTRL_TX_PROT_ERR);
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++ /* schedule to send next package */
++ timbmost_ctl_write_wake(self);
++ }
++
++ if (isr & MLB_I_CTRL_RX_READY)
++ /* pass data upstreams */
++ __timbmost_ctl_rx(self);
++
++ if (isr)
++ return IRQ_HANDLED;
++ else
++ return IRQ_NONE;
++}
++
++static bool timbmost_dma_filter_fn(struct dma_chan *chan, void *filter_param)
++{
++ return chan->chan_id == (int)filter_param;
++}
++
++static int timbmost_open(struct most_dev *mdev)
++{
++ struct timbmost *self = (struct timbmost *)mdev->driver_data;
++ int err;
++ dma_cap_mask_t mask;
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_SLAVE, mask);
++
++ dev_dbg(mdev->parent, "%s\n", __func__);
++
++ skb_queue_head_init(&self->ctl_q);
++ skb_queue_head_init(&self->sync_q);
++ skb_queue_head_init(&self->async_q);
++
++ spin_lock_init(&self->lock);
++
++ /* request the GPIO reset pin */
++ err = gpio_request(self->reset_pin, DRIVER_NAME);
++ if (err) {
++ printk(KERN_ERR DRIVER_NAME
++ " Failed to request reset pin: %d, err: %d\n",
++ self->reset_pin, err);
++ return err;
++ }
++
++ __timbmost_hw_reset(self);
++
++ self->sync_tx_cookie = -1;
++ self->sync_rx_cookie = -1;
++
++ self->sync_tx_chan = dma_request_channel(mask, timbmost_dma_filter_fn,
++ (void *)self->sync_tx_chan_id);
++ if (!self->sync_tx_chan) {
++ err = -ENODEV;
++ goto err_tx_chan;
++ }
++
++ self->sync_rx_chan = dma_request_channel(mask, timbmost_dma_filter_fn,
++ (void *)self->sync_rx_chan_id);
++ if (!self->sync_rx_chan) {
++ err = -ENODEV;
++ goto err_rx_chan;
++ }
++
++ /* request IRQ */
++ err = request_irq(self->irq, timbmost_irq, IRQF_SHARED, "timb-most",
++ self);
++ if (err)
++ goto err_req_irq;
++
++ return 0;
++
++err_req_irq:
++ dma_release_channel(self->sync_rx_chan);
++err_rx_chan:
++ dma_release_channel(self->sync_tx_chan);
++err_tx_chan:
++ gpio_free(self->reset_pin);
++ return err;
++}
++
++static void timbmost_stop_sync_dma(struct timbmost *self)
++{
++ if (self->sync_read_skb) {
++ most_dbg("Stopping RX\n");
++
++ self->sync_rx_chan->device->device_terminate_all(
++ self->sync_rx_chan);
++ }
++
++ if (self->sync_write_skb) {
++ /* just let it complete */
++ dma_sync_wait(self->sync_tx_chan, self->sync_rx_cookie);
++
++ most_dbg("Stopping TX\n");
++ }
++}
++
++static int timbmost_close(struct most_dev *mdev)
++{
++ struct timbmost *self = (struct timbmost *)mdev->driver_data;
++
++ dev_dbg(mdev->parent, "%s\n", __func__);
++
++ /* free IRQ */
++ free_irq(self->irq, self);
++
++ __timbmost_hw_reset(self);
++
++ /* free GPIO */
++ gpio_free(self->reset_pin);
++
++ /* empty all queues */
++ skb_queue_purge(&self->ctl_q);
++ skb_queue_purge(&self->sync_q);
++ skb_queue_purge(&self->async_q);
++
++ dma_release_channel(self->sync_rx_chan);
++ dma_release_channel(self->sync_tx_chan);
++
++ return 0;
++}
++
++static int __timbmost_conf_channel(struct timbmost *self, u8 channel,
++ u8 channel_mask)
++{
++ int register_offset;
++ int shift;
++ u32 ch_cfg;
++
++ /* only even channel numbers are allowed */
++ if (channel % 2 || channel > 0x3e || channel == 0) {
++ printk(KERN_WARNING DRIVER_NAME": Invalid channel: %d\n",
++ channel);
++ return -EINVAL;
++ }
++
++ channel = (channel / 2) - 1;
++ /* the channel conf is spread out over the 7 channel config registers
++ * each register configures 5 channels, each reg is 32bit
++ */
++ register_offset = MLB_REG_CH_CFG_1 + (channel / 5) * 4;
++
++ /* each register configures 5 channels, 3 bit per channel
++ * lowest bits configures highest channel
++ */
++ shift = (4 - (channel % 5)) * 3;
++
++ ch_cfg = ioread32(self->membase + register_offset);
++ ch_cfg &= ~(0x7 << shift);
++ ch_cfg |= (channel_mask & 0x7) << shift;
++ iowrite32(ch_cfg, self->membase + register_offset);
++ return 0;
++}
++
++static int timbmost_conf_channel(struct most_dev *mdev,
++ enum most_chan_type type, u8 channel, u8 flags)
++{
++ struct timbmost *self = (struct timbmost *)mdev->driver_data;
++ unsigned long irq_flags;
++ u32 imr, cfg;
++ int err = -EINVAL;
++ int chan_idx = (flags & MOST_CONF_FLAG_TX) ? TX_CHAN : RX_CHAN;
++
++ dev_dbg(mdev->parent, "%s: channel: %d, flags: %x\n",
++ __func__, channel, flags);
++
++ if (flags & MOST_CONF_FLAG_UP) {
++ switch (type) {
++ case CHAN_CTL:
++ spin_lock_irqsave(&self->lock, irq_flags);
++ /* we only support one channel at the time */
++ if (self->ctl_channels[chan_idx])
++ goto error;
++
++ /* reset the FIFO */
++ iowrite32((chan_idx == TX_CHAN) ? MLB_FIFO_RST_CTRL_TX :
++ MLB_FIFO_RST_CTRL_RX,
++ self->membase + MLB_REG_FIFO_RST);
++
++ err = __timbmost_conf_channel(self, channel,
++ (chan_idx == TX_CHAN) ? MLB_CH_CFG_CTRL_TX :
++ MLB_CH_CFG_CTRL_RX);
++ if (err)
++ goto error;
++
++ if (chan_idx == RX_CHAN) {
++ /* enable the receiver */
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ cfg |= MLB_CFG_CTRL_RX_EN;
++ iowrite32(cfg, self->membase + MLB_REG_CFG);
++
++ /* enable RX interrupts */
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ imr |= (MLB_I_CTRL_RX_READY |
++ MLB_I_CTRL_RX_PROT_ERR |
++ MLB_I_CTRL_RX_CMD_BREAK);
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++ }
++ self->ctl_channels[chan_idx] = channel;
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++ break;
++ case CHAN_SYNC:
++ spin_lock_irqsave(&self->lock, irq_flags);
++ /* we only support one channel at the time */
++ if (self->sync_channels[chan_idx])
++ goto error;
++
++ /* reset the FIFO */
++ iowrite32((chan_idx == TX_CHAN) ? MLB_FIFO_RST_SYNC_TX :
++ MLB_FIFO_RST_SYNC_RX,
++ self->membase + MLB_REG_FIFO_RST);
++
++ err = __timbmost_conf_channel(self, channel,
++ (chan_idx == TX_CHAN) ? MLB_CH_CFG_SYNC_TX :
++ MLB_CH_CFG_SYNC_RX);
++ if (err)
++ goto error;
++
++ if (chan_idx == RX_CHAN) {
++ /* enable the receiver */
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ cfg |= MLB_CFG_SYNC_RX_EN;
++ iowrite32(cfg, self->membase + MLB_REG_CFG);
++
++ /* enable prot error interrupts */
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ imr |= MLB_I_SYNC_RX_PROT_ERR;
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++ /* start RX DMA */
++ self->sync_rx_state = SYNC_STATE_UP;
++ __timbmost_sync_read_wake(self);
++ } else {
++ /* enable the transmitter */
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ cfg |= MLB_CFG_SYNC_TX_EN;
++ iowrite32(cfg, self->membase + MLB_REG_CFG);
++ self->sync_tx_state = SYNC_STATE_UP;
++ }
++
++ self->sync_channels[chan_idx] = channel;
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++
++ break;
++ case CHAN_ASYNC:
++ spin_lock_irqsave(&self->lock, irq_flags);
++ /* we only support one channel at the time */
++ if (self->async_channels[chan_idx])
++ goto error;
++
++ /* reset the FIFO */
++ iowrite32((chan_idx == TX_CHAN) ?
++ MLB_FIFO_RST_ASYNC_TX : MLB_FIFO_RST_ASYNC_RX,
++ self->membase + MLB_REG_FIFO_RST);
++
++ err = __timbmost_conf_channel(self, channel,
++ (chan_idx == TX_CHAN) ? MLB_CH_CFG_ASYNC_TX :
++ MLB_CH_CFG_ASYNC_RX);
++ if (err)
++ goto error;
++
++ if (chan_idx == RX_CHAN) {
++ /* enable the receiver */
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ cfg |= MLB_CFG_ASYNC_RX_EN;
++ iowrite32(cfg, self->membase + MLB_REG_CFG);
++
++ /* enable RX interrupts */
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ imr |= (MLB_I_ASYNC_RX_READY |
++ MLB_I_ASYNC_RX_PROT_ERR |
++ MLB_I_ASYNC_RX_CMD_BREAK);
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++ }
++ self->async_channels[chan_idx] = channel;
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++ break;
++ default:
++ printk(KERN_WARNING "timbmlb: Unknown channel type\n");
++ return -EINVAL;
++ }
++ } else {
++ switch (type) {
++ case CHAN_CTL:
++ /* stop any ongoing transfer */
++ spin_lock_irqsave(&self->lock, irq_flags);
++ if (self->ctl_channels[chan_idx] != channel)
++ goto error;
++
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ imr &= ~(MLB_I_CTRL_TX_READY |
++ MLB_I_CTRL_TX_PROT_ERR |
++ MLB_I_CTRL_TX_RX_BREAK |
++ MLB_I_CTRL_TX_BUSY_BREAK |
++ MLB_I_CTRL_RX_READY |
++ MLB_I_CTRL_RX_PROT_ERR |
++ MLB_I_CTRL_RX_CMD_BREAK);
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++
++ /* disable CTL RX */
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ cfg &= ~MLB_CFG_CTRL_RX_EN;
++ iowrite32(cfg, self->membase + MLB_REG_CFG);
++
++ err = __timbmost_conf_channel(self, channel,
++ MLB_CH_CFG_NOT_ALLOCATED);
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++ skb_queue_purge(&self->ctl_q);
++ self->ctl_channels[chan_idx] = 0;
++ return err;
++ case CHAN_SYNC:
++ spin_lock_irqsave(&self->lock, irq_flags);
++
++ /* TODO: Separate RX and TX */
++ self->sync_tx_state = SYNC_STATE_DOWN;
++ self->sync_rx_state = SYNC_STATE_DOWN;
++
++ /* stop any ongoing transfer */
++ if (self->sync_channels[chan_idx] != channel)
++ goto error;
++
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++ /* stop DMA */
++ timbmost_stop_sync_dma(self);
++ spin_lock_irqsave(&self->lock, irq_flags);
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ imr &= ~MLB_I_SYNC_RX_PROT_ERR;
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++
++ /* disable SYNC TX/RX */
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ cfg &= ~(MLB_CFG_SYNC_TX_EN |
++ MLB_CFG_SYNC_RX_EN);
++ iowrite32(cfg, self->membase + MLB_REG_CFG);
++
++ err = __timbmost_conf_channel(self, channel,
++ MLB_CH_CFG_NOT_ALLOCATED);
++ self->sync_write_skb = NULL;
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++ skb_queue_purge(&self->sync_q);
++ self->sync_channels[chan_idx] = 0;
++ return err;
++ case CHAN_ASYNC:
++ /* stop any ongoing transfer */
++ spin_lock_irqsave(&self->lock, irq_flags);
++ if (self->async_channels[chan_idx] != channel)
++ goto error;
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ imr &= ~(MLB_I_ASYNC_TX_READY |
++ MLB_I_ASYNC_TX_PROT_ERR |
++ MLB_I_ASYNC_TX_RX_BREAK |
++ MLB_I_ASYNC_TX_BUSY_BREAK |
++ MLB_I_ASYNC_RX_READY |
++ MLB_I_ASYNC_RX_PROT_ERR |
++ MLB_I_ASYNC_RX_CMD_BREAK);
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++
++ /* disable CTL RX */
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ cfg &= ~MLB_CFG_ASYNC_RX_EN;
++ iowrite32(cfg, self->membase + MLB_REG_CFG);
++
++ err = __timbmost_conf_channel(self, channel,
++ MLB_CH_CFG_NOT_ALLOCATED);
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++ skb_queue_purge(&self->async_q);
++ self->async_channels[chan_idx] = 0;
++ return err;
++ default:
++ return -EINVAL;
++ }
++ }
++ return 0;
++
++error:
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++ return err;
++}
++
++static void timbmost_ctl_write_wake(struct timbmost *self)
++{
++ unsigned long flags;
++ u32 imr;
++ u32 isr;
++ struct sk_buff *skb;
++ int i;
++
++ dev_dbg(self->mdev->parent, "%s entry\n", __func__);
++ __timbmost_dump_regs(self, "Before write");
++
++ spin_lock_irqsave(&self->lock, flags);
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ isr = ioread32(self->membase + MLB_REG_ISR);
++ spin_unlock_irqrestore(&self->lock, flags);
++
++ /* check if the hardware is currently writing
++ check with isr to workaround FPGA interrupt bug */
++ if (imr & ~isr & MLB_I_CTRL_TX_READY)
++ return;
++
++ /* check if we have sync */
++ if (!(isr & MLB_I_SYNC_LOCK))
++ return;
++
++ skb = skb_dequeue(&self->ctl_q);
++ if (!skb)
++ return;
++
++ /* now write to the FIFO */
++ for (i = 0; i < skb->len;) {
++ u32 word = 0;
++ int j;
++
++ for (j = 0; j < 4 && i < skb->len; j++, i++)
++ word |= ((u8 *)skb->data)[i] << j * 8;
++
++ iowrite32(word, self->membase + MLB_REG_CTRL_TX);
++ }
++
++ /* data is in the FIFO, enable proper interrupts */
++ spin_lock_irqsave(&self->lock, flags);
++ imr = ioread32(self->membase + MLB_REG_IMR) | MLB_I_CTRL_TX_READY |
++ MLB_I_CTRL_TX_PROT_ERR;
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++ /* start TX */
++ iowrite32(MLB_CH_CTRL_CTRL_TX_START, self->membase + MLB_REG_CH_CTRL);
++ spin_unlock_irqrestore(&self->lock, flags);
++
++ kfree_skb(skb);
++}
++
++static void timbmost_async_write_wake(struct timbmost *self)
++{
++ unsigned long flags;
++ u32 imr;
++ u32 isr;
++ struct sk_buff *skb;
++ int i;
++
++ spin_lock_irqsave(&self->lock, flags);
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ isr = ioread32(self->membase + MLB_REG_ISR);
++ spin_unlock_irqrestore(&self->lock, flags);
++
++ /* check if the hardware is currently writing */
++ if (imr & MLB_I_ASYNC_TX_READY)
++ return;
++
++ /* check if we have sync */
++ if (!(isr & MLB_I_SYNC_LOCK))
++ return;
++
++ skb = skb_dequeue(&self->async_q);
++ if (!skb)
++ return;
++
++ /* TODO: The FIFO is 32bit not 8bit */
++ /* now write to the FIFO */
++ for (i = 0; i < skb->len; i++)
++ iowrite32(skb->data[i], self->membase + MLB_REG_ASYNC_TX);
++
++ /* data is in the FIFO, enable proper interrupts */
++ spin_lock_irqsave(&self->lock, flags);
++ imr = ioread32(self->membase + MLB_REG_IMR) | MLB_I_ASYNC_TX_READY |
++ MLB_I_ASYNC_TX_PROT_ERR;
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++ /* start TX */
++ iowrite32(MLB_CH_CTRL_ASYNC_TX_START, self->membase + MLB_REG_CH_CTRL);
++ spin_unlock_irqrestore(&self->lock, flags);
++
++ kfree_skb(skb);
++}
++
++static int timbmost_send(struct sk_buff *skb)
++{
++ struct most_dev *mdev = (struct most_dev *)skb->dev;
++ struct timbmost *self = (struct timbmost *)mdev->driver_data;
++
++ dev_dbg(mdev->parent, "%s, type: %d\n",
++ __func__, most_cb(skb)->channel_type);
++
++ switch (most_cb(skb)->channel_type) {
++ case CHAN_CTL:
++ skb_queue_tail(&self->ctl_q, skb);
++ timbmost_ctl_write_wake(self);
++ break;
++ case CHAN_ASYNC:
++ skb_queue_tail(&self->async_q, skb);
++ timbmost_async_write_wake(self);
++ break;
++ case CHAN_SYNC:
++ skb_queue_tail(&self->sync_q, skb);
++ timbmost_sync_start_write(self);
++ break;
++ default:
++ printk(KERN_WARNING "%s: Got unsupported channel type: %d\n",
++ __func__, most_cb(skb)->channel_type);
++ kfree_skb(skb);
++ break;
++ }
++
++ return 0;
++}
++
++static int timbmost_probe(struct platform_device *dev)
++{
++ int err;
++ struct timbmost *self = NULL;
++ struct resource *iomem;
++ struct timbmlb_platform_data *pdata = dev->dev.platform_data;
++
++ if (!pdata) {
++ printk(KERN_ERR DRIVER_NAME "No platform data supplied\n");
++ err = -EINVAL;
++ goto err_mem;
++ }
++
++ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
++ if (!iomem) {
++ err = -EINVAL;
++ goto err_mem;
++ }
++
++ self = kzalloc(sizeof(*self), GFP_KERNEL);
++ if (!self) {
++ err = -ENOMEM;
++ goto err_mem;
++ }
++
++ self->sync_rx_chan_id = pdata->rx_dma_channel;
++ self->sync_tx_chan_id = pdata->tx_dma_channel;
++ tasklet_init(&self->sync_rx_tasklet, timbmost_sync_rx_tasklet,
++ (unsigned long)self);
++ tasklet_init(&self->sync_tx_tasklet, timbmost_sync_tx_tasklet,
++ (unsigned long)self);
++
++ self->mdev = most_alloc_dev();
++ if (!self->mdev) {
++ err = -ENOMEM;
++ goto err_mem;
++ }
++
++ self->mdev->owner = THIS_MODULE;
++ self->mdev->driver_data = self;
++ self->mdev->parent = &dev->dev;
++ self->mdev->open = timbmost_open;
++ self->mdev->close = timbmost_close;
++ self->mdev->send = timbmost_send;
++ self->mdev->conf_channel = timbmost_conf_channel;
++
++ if (!request_mem_region(iomem->start,
++ resource_size(iomem), "timb-most")) {
++ err = -EBUSY;
++ goto err_mem;
++ }
++
++ self->membase = ioremap(iomem->start, resource_size(iomem));
++ if (!self->membase) {
++ printk(KERN_ERR "timbmost: Failed to remap I/O memory\n");
++ err = -ENOMEM;
++ goto err_ioremap;
++ }
++
++ self->reset_pin = pdata->reset_pin;
++
++ /* find interrupt */
++ self->irq = platform_get_irq(dev, 0);
++ if (self->irq < 0) {
++ err = self->irq;
++ goto err_get_irq;
++ }
++
++ /* register to the MOST layer */
++ err = most_register_dev(self->mdev);
++ if (err)
++ goto err_register;
++
++
++ platform_set_drvdata(dev, self);
++
++ return 0;
++
++err_get_irq:
++err_register:
++ iounmap(self->membase);
++err_ioremap:
++ release_mem_region(iomem->start, resource_size(iomem));
++err_mem:
++ if (self && self->mdev)
++ most_free_dev(self->mdev);
++
++ kfree(self);
++
++ printk(KERN_ERR "timb-most: Failed to register: %d\n", err);
++
++ return err;
++}
++
++static int timbmost_remove(struct platform_device *dev)
++{
++ struct timbmost *self = platform_get_drvdata(dev);
++ struct resource *iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
++
++ most_unregister_dev(self->mdev);
++ iounmap(self->membase);
++ release_mem_region(iomem->start, resource_size(iomem));
++ most_free_dev(self->mdev);
++ kfree(self);
++ return 0;
++}
++
++static struct platform_driver timbmost_platform_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = timbmost_probe,
++ .remove = timbmost_remove,
++};
++
++/*--------------------------------------------------------------------------*/
++
++static int __init timbmost_init(void)
++{
++ return platform_driver_register(&timbmost_platform_driver);
++}
++
++static void __exit timbmost_exit(void)
++{
++ platform_driver_unregister(&timbmost_platform_driver);
++}
++
++module_init(timbmost_init);
++module_exit(timbmost_exit);
++
++MODULE_DESCRIPTION("Timberdale MLB driver");
++MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("platform:timb-most");
+diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
+index 34b31da..cd19a35 100644
+--- a/drivers/serial/timbuart.c
++++ b/drivers/serial/timbuart.c
+@@ -31,6 +31,7 @@
+
+ struct timbuart_port {
+ struct uart_port port;
++ struct uart_driver uart_driver;
+ struct tasklet_struct tasklet;
+ int usedma;
+ u32 last_ier;
+@@ -410,7 +411,7 @@ static struct uart_ops timbuart_ops = {
+ .verify_port = timbuart_verify_port
+ };
+
+-static struct uart_driver timbuart_driver = {
++static const __devinitconst struct uart_driver timbuart_driver_template = {
+ .owner = THIS_MODULE,
+ .driver_name = "timberdale_uart",
+ .dev_name = "ttyTU",
+@@ -419,7 +420,7 @@ static struct uart_driver timbuart_driver = {
+ .nr = 1
+ };
+
+-static int timbuart_probe(struct platform_device *dev)
++static int __devinit timbuart_probe(struct platform_device *dev)
+ {
+ int err;
+ struct timbuart_port *uart;
+@@ -433,6 +434,8 @@ static int timbuart_probe(struct platform_device *dev)
+ goto err_mem;
+ }
+
++ uart->uart_driver = timbuart_driver_template;
++
+ uart->usedma = 0;
+
+ uart->port.uartclk = 3250000 * 16;
+@@ -461,11 +464,11 @@ static int timbuart_probe(struct platform_device *dev)
+
+ tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
+
+- err = uart_register_driver(&timbuart_driver);
++ err = uart_register_driver(&uart->uart_driver);
+ if (err)
+ goto err_register;
+
+- err = uart_add_one_port(&timbuart_driver, &uart->port);
++ err = uart_add_one_port(&uart->uart_driver, &uart->port);
+ if (err)
+ goto err_add_port;
+
+@@ -474,7 +477,7 @@ static int timbuart_probe(struct platform_device *dev)
+ return 0;
+
+ err_add_port:
+- uart_unregister_driver(&timbuart_driver);
++ uart_unregister_driver(&uart->uart_driver);
+ err_register:
+ kfree(uart);
+ err_mem:
+@@ -484,13 +487,13 @@ err_mem:
+ return err;
+ }
+
+-static int timbuart_remove(struct platform_device *dev)
++static int __devexit timbuart_remove(struct platform_device *dev)
+ {
+ struct timbuart_port *uart = platform_get_drvdata(dev);
+
+ tasklet_kill(&uart->tasklet);
+- uart_remove_one_port(&timbuart_driver, &uart->port);
+- uart_unregister_driver(&timbuart_driver);
++ uart_remove_one_port(&uart->uart_driver, &uart->port);
++ uart_unregister_driver(&uart->uart_driver);
+ kfree(uart);
+
+ return 0;
+diff --git a/include/linux/can/platform/ascb.h b/include/linux/can/platform/ascb.h
+new file mode 100644
+index 0000000..817162b
+--- /dev/null
++++ b/include/linux/can/platform/ascb.h
+@@ -0,0 +1,8 @@
++#ifndef _CAN_PLATFORM_ASCB_H_
++#define _CAN_PLATFORM_ASCB_H_
++
++struct ascb_platform_data {
++ int gpio_pin;
++};
++
++#endif
+diff --git a/include/linux/i2c-xiic.h b/include/linux/i2c-xiic.h
+new file mode 100644
+index 0000000..4f9f225
+--- /dev/null
++++ b/include/linux/i2c-xiic.h
+@@ -0,0 +1,43 @@
++/*
++ * i2c-xiic.h
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Xilinx IIC
++ */
++
++#ifndef _LINUX_I2C_XIIC_H
++#define _LINUX_I2C_XIIC_H
++
++/**
++ * struct xiic_i2c_platform_data - Platform data of the Xilinx I2C driver
++ * @num_devices: Number of devices that shall be added when the driver
++ * is probed.
++ * @devices: The actuall devices to add.
++ *
++ * This purpose of this platform data struct is to be able to provide a number
++ * of devices that should be added to the I2C bus. The reason is that sometimes
++ * the I2C board info is not enough, a new PCI board can for instance be
++ * plugged into a standard PC, and the bus number might be unknown at
++ * early init time.
++ */
++struct xiic_i2c_platform_data {
++ u8 num_devices;
++ struct i2c_board_info const *devices;
++};
++
++#endif /* _LINUX_I2C_XIIC_H */
+diff --git a/include/linux/ks8842.h b/include/linux/ks8842.h
+new file mode 100644
+index 0000000..1d59c45
+--- /dev/null
++++ b/include/linux/ks8842.h
+@@ -0,0 +1,34 @@
++/*
++ * ks8842.h KS8842 platform data struct definition
++ * Copyright (c) 2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#ifndef _LINUX_KS8842_H
++#define _LINUX_KS8842_H
++
++/**
++ * struct ks8842_platform_data - Platform data of the KS8842 network driver
++ * @rx_dma_channel: The DMA channel to use for RX, -1 for none.
++ * @tx_dma_channel: The DMA channel to use for RX, -1 for none.
++ *
++ * If no pointer is provided as platform data no DMA will be used
++ */
++struct ks8842_platform_data {
++ int rx_dma_channel;
++ int tx_dma_channel;
++};
++
++#endif
+diff --git a/include/linux/most/timbmlb.h b/include/linux/most/timbmlb.h
+new file mode 100644
+index 0000000..a3b8c76
+--- /dev/null
++++ b/include/linux/most/timbmlb.h
+@@ -0,0 +1,11 @@
++#ifndef __LINUX_MOST_TIMBMLB_H
++#define __LINUX_MOST_TIMBMLB_H
++
++/* Timberdale MLB IP */
++struct timbmlb_platform_data {
++ int reset_pin; /* pin used for reset of the INIC */
++ int rx_dma_channel;
++ int tx_dma_channel;
++};
++
++#endif
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index 7b3aae2..3334368 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -189,7 +189,8 @@ struct ucred {
+ #define AF_ISDN 34 /* mISDN sockets */
+ #define AF_PHONET 35 /* Phonet sockets */
+ #define AF_IEEE802154 36 /* IEEE802154 sockets */
+-#define AF_MAX 37 /* For now.. */
++#define AF_MOST 37 /* Media Oriented Systems Transport */
++#define AF_MAX 38 /* For now.. */
+
+ /* Protocol families, same as address families. */
+ #define PF_UNSPEC AF_UNSPEC
+@@ -229,6 +230,7 @@ struct ucred {
+ #define PF_ISDN AF_ISDN
+ #define PF_PHONET AF_PHONET
+ #define PF_IEEE802154 AF_IEEE802154
++#define PF_MOST AF_MOST
+ #define PF_MAX AF_MAX
+
+ /* Maximum queue length specifiable by listen. */
+diff --git a/include/linux/timb_dma.h b/include/linux/timb_dma.h
+new file mode 100644
+index 0000000..bb043e9
+--- /dev/null
++++ b/include/linux/timb_dma.h
+@@ -0,0 +1,55 @@
++/*
++ * timb_dma.h timberdale FPGA DMA driver defines
++ * Copyright (c) 2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA DMA engine
++ */
++
++#ifndef _LINUX_TIMB_DMA_H
++#define _LINUX_TIMB_DMA_H
++
++/**
++ * struct timb_dma_platform_data_channel - Description of each individual
++ * DMA channel for the timberdale DMA driver
++ * @rx: true if this channel handles data in the direction to
++ * the CPU.
++ * @bytes_per_line: Number of bytes per line, this is specific for channels
++ * handling video data. For other channels this shall be left to 0.
++ * @descriptors: Number of descriptors to allocate for this channel.
++ * @descriptor_elements: Number of elements in each descriptor.
++ *
++ */
++struct timb_dma_platform_data_channel {
++ bool rx;
++ unsigned int bytes_per_line;
++ unsigned int descriptors;
++ unsigned int descriptor_elements;
++};
++
++/**
++ * struct timb_dma_platform_data - Platform data of the timberdale DMA driver
++ * @nr_channels: Number of defined channels in the channels array.
++ * @channels: Definition of the each channel.
++ *
++ */
++struct timb_dma_platform_data {
++ unsigned nr_channels;
++ struct timb_dma_platform_data_channel channels[32];
++};
++
++#endif
+diff --git a/include/media/timb_radio.h b/include/media/timb_radio.h
+new file mode 100644
+index 0000000..fcd32a3
+--- /dev/null
++++ b/include/media/timb_radio.h
+@@ -0,0 +1,36 @@
++/*
++ * timb_radio.h Platform struct for the Timberdale radio driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#ifndef _TIMB_RADIO_
++#define _TIMB_RADIO_ 1
++
++#include <linux/i2c.h>
++
++struct timb_radio_platform_data {
++ int i2c_adapter; /* I2C adapter where the tuner and dsp are attached */
++ struct {
++ const char *module_name;
++ struct i2c_board_info *info;
++ } tuner;
++ struct {
++ const char *module_name;
++ struct i2c_board_info *info;
++ } dsp;
++};
++
++#endif
+diff --git a/include/media/timb_video.h b/include/media/timb_video.h
+new file mode 100644
+index 0000000..55334ad
+--- /dev/null
++++ b/include/media/timb_video.h
+@@ -0,0 +1,34 @@
++/*
++ * timb_video.h Platform struct for the Timberdale video driver
++ * Copyright (c) 2009-2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#ifndef _TIMB_VIDEO_
++#define _TIMB_VIDEO_ 1
++
++#include <linux/i2c.h>
++
++struct timb_video_platform_data {
++ int dma_channel;
++ int i2c_adapter; /* The I2C adapter where the encoder is attached */
++ struct {
++ const char *module_name;
++ struct i2c_board_info *info;
++ } encoder;
++};
++
++#endif
++
+diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h
+index 6cc107d..5341e3d 100644
+--- a/include/media/v4l2-chip-ident.h
++++ b/include/media/v4l2-chip-ident.h
+@@ -155,6 +155,9 @@ enum {
+ /* module adv7343: just ident 7343 */
+ V4L2_IDENT_ADV7343 = 7343,
+
++ /* module saa7706h: just ident 7706 */
++ V4L2_IDENT_SAA7706H = 7706,
++
+ /* module wm8739: just ident 8739 */
+ V4L2_IDENT_WM8739 = 8739,
+
+diff --git a/include/net/most/async.h b/include/net/most/async.h
+new file mode 100644
+index 0000000..5a4d49d
+--- /dev/null
++++ b/include/net/most/async.h
+@@ -0,0 +1,12 @@
++#ifndef __ASYNC_H
++#define __ASYNC_H
++
++struct sockaddr_mostasync {
++ sa_family_t most_family;
++ unsigned short most_dev;
++ unsigned char rx_channel;
++ unsigned char tx_channel;
++};
++
++#endif
++
+diff --git a/include/net/most/ctl.h b/include/net/most/ctl.h
+new file mode 100644
+index 0000000..d34726c
+--- /dev/null
++++ b/include/net/most/ctl.h
+@@ -0,0 +1,12 @@
++#ifndef __CTL_H
++#define __CTL_H
++
++struct sockaddr_mostctl {
++ sa_family_t most_family;
++ unsigned short most_dev;
++ unsigned char rx_channel;
++ unsigned char tx_channel;
++};
++
++#endif
++
+diff --git a/include/net/most/dev.h b/include/net/most/dev.h
+new file mode 100644
+index 0000000..bd6dc48
+--- /dev/null
++++ b/include/net/most/dev.h
+@@ -0,0 +1,27 @@
++#ifndef __DEV_H
++#define __DEV_H
++
++struct sockaddr_mostdev {
++ sa_family_t most_family;
++ unsigned short most_dev;
++};
++
++
++/* MOST Dev ioctl defines */
++#define MOSTDEVUP _IOW('M', 201, int)
++#define MOSTDEVDOWN _IOW('M', 202, int)
++
++#define MOSTGETDEVLIST _IOR('M', 210, int)
++
++struct most_dev_req {
++ uint16_t dev_id;
++};
++
++struct most_dev_list_req {
++ uint16_t dev_num;
++ struct most_dev_req dev_req[0];
++};
++
++
++#endif
++
+diff --git a/include/net/most/most.h b/include/net/most/most.h
+new file mode 100644
+index 0000000..8ce75e2
+--- /dev/null
++++ b/include/net/most/most.h
+@@ -0,0 +1,110 @@
++#ifndef __MOST_H
++#define __MOST_H
++
++#include <net/sock.h>
++
++#ifndef AF_MOST
++#define AF_MOST 37
++#define PF_MOST AF_MOST
++#endif
++
++/* Reserve for core and drivers use */
++#define MOST_SKB_RESERVE 8
++
++#define CTL_FRAME_SIZE 32
++
++#define MOSTPROTO_DEV 0
++#define MOSTPROTO_CTL 1
++#define MOSTPROTO_SYNC 2
++#define MOSTPROTO_ASYNC 3
++
++#define MOST_NO_CHANNEL 0xFE
++
++enum {
++ MOST_CONNECTED = 1, /* Equal to TCP_ESTABLISHED makes net code happy */
++ MOST_OPEN,
++ MOST_BOUND,
++};
++
++
++struct most_skb_cb {
++ __u8 channel_type;
++ __u8 channel;
++};
++#define most_cb(skb) ((struct most_skb_cb *)(skb->cb))
++
++struct most_sock {
++ struct sock sk;
++ u8 channel_type;
++ u8 rx_channel;
++ u8 tx_channel;
++ int dev_id;
++ struct most_dev *mdev;
++};
++#define most_sk(sk) ((struct most_sock *)sk)
++
++static inline struct sock *most_sk_alloc(struct net *net,
++ struct proto *pops, u8 channel_type)
++{
++ struct sock *sk = sk_alloc(net, PF_MOST, GFP_ATOMIC, pops);
++ if (sk) {
++ most_sk(sk)->channel_type = channel_type;
++ most_sk(sk)->dev_id = -1;
++ }
++
++ return sk;
++}
++static inline struct sk_buff *most_skb_alloc(unsigned int len, gfp_t how)
++{
++ struct sk_buff *skb = alloc_skb(len + MOST_SKB_RESERVE, how);
++
++ if (skb)
++ skb_reserve(skb, MOST_SKB_RESERVE);
++
++ return skb;
++}
++
++static inline struct sk_buff *most_skb_send_alloc(struct sock *sk,
++ unsigned long len, int nb, int *err)
++{
++ struct sk_buff *skb =
++ sock_alloc_send_skb(sk, len + MOST_SKB_RESERVE, nb, err);
++
++ if (skb)
++ skb_reserve(skb, MOST_SKB_RESERVE);
++
++ return skb;
++}
++
++struct most_sock_list {
++ struct hlist_head head;
++ rwlock_t lock;
++};
++
++struct most_dev *most_dev_get(int index);
++
++int most_sock_register(int proto, struct net_proto_family *ops);
++int most_sock_unregister(int proto);
++void most_sock_link(struct sock *s);
++void most_sock_unlink(struct sock *sk);
++
++int most_send_to_sock(int dev_id, struct sk_buff *skb);
++
++/* default implementation of socket operations */
++int most_sock_release(struct socket *sock);
++int most_sock_bind(struct socket *sock, int dev_id, u8 rx_chan, u8 tx_chan);
++int most_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
++int most_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
++ struct msghdr *msg, size_t len, int flags);
++int most_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
++ struct msghdr *msg, size_t len);
++int most_sock_setsockopt(struct socket *sock, int level, int optname,
++ char __user *optval, unsigned int optlen);
++int most_sock_getsockopt(struct socket *sock, int level, int optname,
++ char __user *optval, int __user *optlen);
++
++extern int dev_sock_init(void);
++extern void dev_sock_cleanup(void);
++
++#endif /* __MOST_H */
++
+diff --git a/include/net/most/most_core.h b/include/net/most/most_core.h
+new file mode 100644
+index 0000000..9373d89
+--- /dev/null
++++ b/include/net/most/most_core.h
+@@ -0,0 +1,137 @@
++#ifndef __MOST_CORE_H
++#define __MOST_CORE_H
++
++#include <net/most/most.h>
++
++enum most_chan_type {
++ CHAN_CTL = 0,
++ CHAN_SYNC,
++ CHAN_ASYNC,
++ CHAN_DEV
++};
++
++#define MOST_CONF_FLAG_UP 0x01
++#define MOST_CONF_FLAG_TX 0x02
++
++enum most_dev_state {
++ MOST_DEV_DOWN = 0,
++ MOST_DEV_UP
++};
++
++struct most_dev {
++
++ struct list_head list;
++ atomic_t refcnt;
++
++ char name[8];
++
++ __u16 id;
++ enum most_dev_state state;
++
++ struct module *owner;
++
++ struct tasklet_struct rx_task;
++ struct tasklet_struct tx_task;
++
++ struct sk_buff_head rx_q;
++ struct sk_buff_head ctl_q;
++ struct sk_buff_head async_q;
++ struct sk_buff_head sync_q;
++
++ /* set by the driver */
++
++ void *driver_data;
++ struct device *parent;
++
++ int (*open)(struct most_dev *mdev);
++ int (*close)(struct most_dev *mdev);
++ int (*conf_channel)(struct most_dev *mdev, enum most_chan_type type,
++ u8 channel, u8 flags);
++ int (*send)(struct sk_buff *skb);
++ int (*can_send)(struct sk_buff *skb);
++};
++
++#ifdef DEBUG
++#define most_dbg(...) printk(__VA_ARGS__)
++#else
++#define most_dbg(...)
++#endif
++
++static inline struct most_dev *most_dev_hold(struct most_dev *d)
++{
++ if (try_module_get(d->owner))
++ return d;
++ return NULL;
++}
++
++static inline void most_dev_put(struct most_dev *d)
++{
++ module_put(d->owner);
++}
++
++static inline void most_sched_tx(struct most_dev *mdev)
++{
++ tasklet_schedule(&mdev->tx_task);
++}
++
++static inline void most_sched_rx(struct most_dev *mdev)
++{
++ tasklet_schedule(&mdev->rx_task);
++}
++
++static inline int most_recv_frame(struct sk_buff *skb)
++{
++ struct most_dev *mdev = (struct most_dev *) skb->dev;
++
++ /* Time stamp */
++ __net_timestamp(skb);
++
++ /* Queue frame for rx task */
++ skb_queue_tail(&mdev->rx_q, skb);
++ most_sched_rx(mdev);
++ return 0;
++}
++
++static inline int __most_configure_channel(struct most_dev *mdev,
++ u8 channel_type, u8 channel, u8 up)
++{
++ if (mdev->state != MOST_DEV_UP)
++ return -ENETDOWN;
++
++ if (mdev->conf_channel)
++ if (channel != MOST_NO_CHANNEL)
++ return mdev->conf_channel(mdev, channel_type, channel,
++ up);
++ return 0;
++}
++
++static inline int most_configure_channels(struct most_dev *mdev,
++ struct most_sock *sk, u8 up)
++{
++ int err;
++ u8 flags = (up) ? MOST_CONF_FLAG_UP : 0;
++
++ err = __most_configure_channel(mdev, sk->channel_type, sk->rx_channel,
++ flags);
++ if (err)
++ return err;
++
++ err = __most_configure_channel(mdev, sk->channel_type, sk->tx_channel,
++ flags | MOST_CONF_FLAG_TX);
++ if (err)
++ __most_configure_channel(mdev, sk->channel_type, sk->rx_channel,
++ (up) ? 0 : MOST_CONF_FLAG_UP);
++ return err;
++}
++
++struct most_dev *most_alloc_dev(void);
++void most_free_dev(struct most_dev *mdev);
++int most_register_dev(struct most_dev *mdev);
++int most_unregister_dev(struct most_dev *mdev);
++
++int most_get_dev_list(void __user *arg);
++int most_open_dev(u16 dev_id);
++int most_close_dev(u16 dev_id);
++
++#endif
++
+diff --git a/include/net/most/sync.h b/include/net/most/sync.h
+new file mode 100644
+index 0000000..aa89d16
+--- /dev/null
++++ b/include/net/most/sync.h
+@@ -0,0 +1,12 @@
++#ifndef __SYNC_H
++#define __SYNC_H
++
++struct sockaddr_mostsync {
++ sa_family_t most_family;
++ unsigned short most_dev;
++ unsigned char rx_channel;
++ unsigned char tx_channel;
++};
++
++#endif
++
+diff --git a/include/sound/timbi2s.h b/include/sound/timbi2s.h
+new file mode 100644
+index 0000000..ebfecfe
+--- /dev/null
++++ b/include/sound/timbi2s.h
+@@ -0,0 +1,33 @@
++/*
++ * timbi2s.h timberdale FPGA I2S platform data
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++#ifndef __INCLUDE_SOUND_TIMBI2S_H
++#define __INCLUDE_SOUND_TIMBI2S_H
++
++struct timbi2s_bus_data {
++ u8 rx;
++ u16 sample_rate;
++ const char *name;
++};
++
++struct timbi2s_platform_data {
++ const struct timbi2s_bus_data *busses;
++ int num_busses;
++ u32 main_clk;
++};
++
++#endif
+diff --git a/net/Kconfig b/net/Kconfig
+index 041c35e..063b84a 100644
+--- a/net/Kconfig
++++ b/net/Kconfig
+@@ -255,6 +255,7 @@ source "net/can/Kconfig"
+ source "net/irda/Kconfig"
+ source "net/bluetooth/Kconfig"
+ source "net/rxrpc/Kconfig"
++source "net/most/Kconfig"
+
+ config FIB_RULES
+ bool
+diff --git a/net/Makefile b/net/Makefile
+index 1542e72..3e4fe8f 100644
+--- a/net/Makefile
++++ b/net/Makefile
+@@ -43,6 +43,7 @@ obj-$(CONFIG_ATM) += atm/
+ obj-$(CONFIG_DECNET) += decnet/
+ obj-$(CONFIG_ECONET) += econet/
+ obj-$(CONFIG_PHONET) += phonet/
++obj-$(CONFIG_MOST) += most/
+ ifneq ($(CONFIG_VLAN_8021Q),)
+ obj-y += 8021q/
+ endif
+diff --git a/net/most/Kconfig b/net/most/Kconfig
+new file mode 100644
+index 0000000..a9fd7f2
+--- /dev/null
++++ b/net/most/Kconfig
+@@ -0,0 +1,38 @@
++#
++# Media Oriented Systems Transport (MOST) network layer core configuration
++#
++
++menuconfig MOST
++ depends on NET
++ tristate "MOST bus subsystem support"
++ ---help---
++ Media Oriented Systems Transport (MOST) is a multimedia
++ communications protocol in the automotive industry.
++
++ If you want MOST support you should say Y here.
++
++config MOST_CTL
++ tristate "Support for Control data over MOST"
++ depends on MOST
++ default N
++ ---help---
++ Support for the control channel of the MOST bus.
++
++config MOST_ASYNC
++ tristate "Support for Asynchronous data over MOST"
++ depends on MOST
++ default N
++ ---help---
++ Support for the asyncronous channel of the MOST bus. Normally
++ used for software download od file transfers.
++
++config MOST_SYNC
++ tristate "Support for Synchronous data over MOST"
++ depends on MOST
++ default N
++ ---help---
++ Support for synchronous channles of the MOST bus. Normally used
++ for streaming media such as audio and video.
++
++
++source "drivers/net/most/Kconfig"
+diff --git a/net/most/Makefile b/net/most/Makefile
+new file mode 100644
+index 0000000..2d81b3d
+--- /dev/null
++++ b/net/most/Makefile
+@@ -0,0 +1,15 @@
++#
++# Makefile for the Linux Media Oriented Systems Transport core.
++#
++
++obj-$(CONFIG_MOST) += most.o
++most-objs := af_most.o most_core.o most_sock.o dev_sock.o
++
++obj-$(CONFIG_MOST_CTL) += ctl.o
++ctl-objs := ctl_sock.o
++
++obj-$(CONFIG_MOST_SYNC) += sync.o
++sync-objs := sync_sock.o
++
++obj-$(CONFIG_MOST_ASYNC) += async.o
++async-objs := async_sock.o
+diff --git a/net/most/af_most.c b/net/most/af_most.c
+new file mode 100644
+index 0000000..157df9f
+--- /dev/null
++++ b/net/most/af_most.c
+@@ -0,0 +1,170 @@
++/*
++ * af_most.c Support for the MOST address family
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <net/most/most.h>
++
++#define MOST_MAX_PROTO 4
++static struct net_proto_family *most_proto[MOST_MAX_PROTO];
++static DEFINE_RWLOCK(most_proto_lock);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++static struct lock_class_key most_lock_key[MOST_MAX_PROTO];
++static const char *most_key_strings[MOST_MAX_PROTO] = {
++ "sk_lock-AF_MOST-MOSTPROTO_DEV",
++ "sk_lock-AF_MOST-MOSTPROTO_CTL",
++ "sk_lock-AF_MOST-MOSTPROTO_SYNC",
++ "sk_lock-AF_MOST-MOSTPROTO_ASYNC",
++};
++
++static struct lock_class_key most_slock_key[MOST_MAX_PROTO];
++static const char *most_slock_key_strings[MOST_MAX_PROTO] = {
++ "slock-AF_MOST-MOSTPROTO_DEV",
++ "slock-AF_MOST-MOSTPROTO_CTL",
++ "slock-AF_MOST-MOSTPROTO_SYNC",
++ "slock-AF_MOST-MOSTPROTO_ASYNC",
++};
++
++static inline void most_sock_reclassify_lock(struct socket *sock, int proto)
++{
++ struct sock *sk = sock->sk;
++
++ if (!sk)
++ return;
++
++ BUG_ON(sock_owned_by_user(sk));
++
++ sock_lock_init_class_and_name(sk,
++ most_slock_key_strings[proto], &most_slock_key[proto],
++ most_key_strings[proto], &most_lock_key[proto]);
++}
++#else
++static inline void most_sock_reclassify_lock(struct socket *sock, int proto)
++{
++}
++#endif
++
++
++int most_sock_register(int proto, struct net_proto_family *ops)
++{
++ int err = 0;
++
++ if (proto < 0 || proto >= MOST_MAX_PROTO)
++ return -EINVAL;
++
++ write_lock(&most_proto_lock);
++
++ if (most_proto[proto])
++ err = -EEXIST;
++ else
++ most_proto[proto] = ops;
++
++ write_unlock(&most_proto_lock);
++
++ return err;
++}
++EXPORT_SYMBOL(most_sock_register);
++
++int most_sock_unregister(int proto)
++{
++ int err = 0;
++
++ if (proto < 0 || proto >= MOST_MAX_PROTO)
++ return -EINVAL;
++
++ write_lock(&most_proto_lock);
++
++ if (!most_proto[proto])
++ err = -ENOENT;
++ else
++ most_proto[proto] = NULL;
++
++ write_unlock(&most_proto_lock);
++
++ return err;
++}
++EXPORT_SYMBOL(most_sock_unregister);
++
++static int most_sock_create(struct net *net, struct socket *sock, int proto,
++ int kern)
++{
++ int err;
++
++ if (net != &init_net)
++ return -EAFNOSUPPORT;
++
++ if (proto < 0 || proto >= MOST_MAX_PROTO)
++ return -EINVAL;
++
++ if (!most_proto[proto])
++ request_module("most-proto-%d", proto);
++
++ err = -EPROTONOSUPPORT;
++
++ read_lock(&most_proto_lock);
++
++ if (most_proto[proto] && try_module_get(most_proto[proto]->owner)) {
++ err = most_proto[proto]->create(net, sock, proto, kern);
++ most_sock_reclassify_lock(sock, proto);
++ module_put(most_proto[proto]->owner);
++ }
++
++ read_unlock(&most_proto_lock);
++
++ return err;
++}
++
++static struct net_proto_family most_sock_family_ops = {
++ .owner = THIS_MODULE,
++ .family = PF_MOST,
++ .create = most_sock_create,
++};
++
++static int __init most_init(void)
++{
++ int err;
++
++ err = sock_register(&most_sock_family_ops);
++ if (err < 0)
++ return err;
++
++ err = dev_sock_init();
++ if (err < 0) {
++ sock_unregister(PF_MOST);
++ return err;
++ }
++
++ printk(KERN_INFO "MOST is initialized\n");
++
++ return 0;
++}
++
++static void __exit most_exit(void)
++{
++ dev_sock_cleanup();
++
++ sock_unregister(PF_MOST);
++}
++
++subsys_initcall(most_init);
++module_exit(most_exit);
++
++MODULE_DESCRIPTION("MOST Core");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_NETPROTO(PF_MOST);
++
+diff --git a/net/most/async_sock.c b/net/most/async_sock.c
+new file mode 100644
+index 0000000..3a11b9b
+--- /dev/null
++++ b/net/most/async_sock.c
+@@ -0,0 +1,155 @@
++/*
++ * async_sock.c MOST asyncronous socket support
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Support for MOST asynchronous sockets
++ */
++
++#include <linux/module.h>
++#include <net/most/most.h>
++#include <net/most/most_core.h>
++#include <net/most/async.h>
++
++static int async_sock_bind(struct socket *sock, struct sockaddr *addr,
++ int addr_len)
++{
++ struct sockaddr_mostasync *aaddr = (struct sockaddr_mostasync *)addr;
++
++ if (!aaddr || aaddr->most_family != AF_MOST)
++ return -EINVAL;
++
++ return most_sock_bind(sock, aaddr->most_dev, aaddr->rx_channel,
++ aaddr->tx_channel);
++}
++
++static int async_sock_getname(struct socket *sock, struct sockaddr *addr,
++ int *addr_len, int peer)
++{
++ struct sockaddr_mostasync *aaddr = (struct sockaddr_mostasync *)addr;
++ struct sock *sk = sock->sk;
++ struct most_dev *mdev = most_sk(sk)->mdev;
++
++ if (!mdev)
++ return -EBADFD;
++
++ lock_sock(sk);
++
++ *addr_len = sizeof(*aaddr);
++ aaddr->most_family = AF_MOST;
++ aaddr->most_dev = mdev->id;
++ aaddr->rx_channel = most_sk(sk)->rx_channel;
++ aaddr->tx_channel = most_sk(sk)->tx_channel;
++
++ release_sock(sk);
++ return 0;
++}
++
++
++static const struct proto_ops async_sock_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .release = most_sock_release,
++ .bind = async_sock_bind,
++ .getname = async_sock_getname,
++ .sendmsg = most_sock_sendmsg,
++ .recvmsg = most_sock_recvmsg,
++ .ioctl = most_sock_ioctl,
++ .poll = datagram_poll,
++ .listen = sock_no_listen,
++ .shutdown = sock_no_shutdown,
++ .setsockopt = most_sock_setsockopt,
++ .getsockopt = most_sock_getsockopt,
++ .connect = sock_no_connect,
++ .socketpair = sock_no_socketpair,
++ .accept = sock_no_accept,
++ .mmap = sock_no_mmap
++};
++static struct proto async_sk_proto = {
++ .name = "ASYNC",
++ .owner = THIS_MODULE,
++ .obj_size = sizeof(struct most_sock)
++};
++
++static int async_sock_create(struct net *net, struct socket *sock, int protocol,
++ int kern)
++{
++ struct sock *sk;
++
++ if (sock->type != SOCK_DGRAM)
++ return -ESOCKTNOSUPPORT;
++
++ sock->ops = &async_sock_ops;
++
++ sk = most_sk_alloc(net, &async_sk_proto, CHAN_ASYNC);
++ if (!sk)
++ return -ENOMEM;
++
++ sock_init_data(sock, sk);
++
++ sock_reset_flag(sk, SOCK_ZAPPED);
++
++ sk->sk_protocol = protocol;
++
++ sock->state = SS_UNCONNECTED;
++ sk->sk_state = MOST_OPEN;
++
++ most_sock_link(sk);
++ return 0;
++}
++
++static struct net_proto_family async_sock_family_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .create = async_sock_create,
++};
++
++
++static int __init async_init(void)
++{
++ int err;
++
++ err = proto_register(&async_sk_proto, 0);
++ if (err < 0)
++ return err;
++
++ err = most_sock_register(MOSTPROTO_ASYNC, &async_sock_family_ops);
++ if (err < 0) {
++ printk(KERN_ERR "MOST socket registration failed\n");
++ return err;
++ }
++
++ printk(KERN_INFO "MOST asynchronous socket layer initialized\n");
++
++ return 0;
++}
++
++static void __exit async_exit(void)
++{
++ if (most_sock_unregister(MOSTPROTO_ASYNC) < 0)
++ printk(KERN_ERR "ASYNC socket unregistration failed\n");
++
++ proto_unregister(&async_sk_proto);
++}
++
++module_init(async_init);
++module_exit(async_exit);
++
++MODULE_DESCRIPTION("Most Asyncronous");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("most-proto-3");
++
+diff --git a/net/most/ctl_sock.c b/net/most/ctl_sock.c
+new file mode 100644
+index 0000000..c10cb32
+--- /dev/null
++++ b/net/most/ctl_sock.c
+@@ -0,0 +1,160 @@
++/*
++ * ctl_sock.c Support for MOST control sockets
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <net/most/most.h>
++#include <net/most/most_core.h>
++#include <net/most/ctl.h>
++
++
++static int ctl_sock_bind(struct socket *sock, struct sockaddr *addr,
++ int addr_len)
++{
++ struct sockaddr_mostctl *caddr = (struct sockaddr_mostctl *) addr;
++
++ if (!caddr || caddr->most_family != AF_MOST)
++ return -EINVAL;
++
++ return most_sock_bind(sock, caddr->most_dev, caddr->rx_channel,
++ caddr->tx_channel);
++}
++
++static int ctl_sock_getname(struct socket *sock, struct sockaddr *addr,
++ int *addr_len, int peer)
++{
++ struct sockaddr_mostctl *caddr = (struct sockaddr_mostctl *) addr;
++ struct sock *sk = sock->sk;
++ struct most_dev *mdev = most_sk(sk)->mdev;
++
++ if (!mdev)
++ return -EBADFD;
++
++ lock_sock(sk);
++
++ *addr_len = sizeof(*caddr);
++ caddr->most_family = AF_MOST;
++ caddr->most_dev = mdev->id;
++ caddr->rx_channel = most_sk(sk)->rx_channel;
++ caddr->tx_channel = most_sk(sk)->tx_channel;
++
++ release_sock(sk);
++ return 0;
++}
++
++int ctl_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
++ struct msghdr *msg, size_t len)
++{
++ if (len != CTL_FRAME_SIZE)
++ return -EINVAL;
++
++ return most_sock_sendmsg(iocb, sock, msg, len);
++}
++
++static const struct proto_ops ctl_sock_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .release = most_sock_release,
++ .bind = ctl_sock_bind,
++ .getname = ctl_sock_getname,
++ .sendmsg = most_sock_sendmsg,
++ .recvmsg = most_sock_recvmsg,
++ .ioctl = most_sock_ioctl,
++ .poll = datagram_poll,
++ .listen = sock_no_listen,
++ .shutdown = sock_no_shutdown,
++ .setsockopt = most_sock_setsockopt,
++ .getsockopt = most_sock_getsockopt,
++ .connect = sock_no_connect,
++ .socketpair = sock_no_socketpair,
++ .accept = sock_no_accept,
++ .mmap = sock_no_mmap
++};
++static struct proto ctl_sk_proto = {
++ .name = "CTL",
++ .owner = THIS_MODULE,
++ .obj_size = sizeof(struct most_sock)
++};
++
++static int ctl_sock_create(struct net *net, struct socket *sock, int protocol,
++ int kern)
++{
++ struct sock *sk;
++
++ if (sock->type != SOCK_RAW)
++ return -ESOCKTNOSUPPORT;
++
++ sock->ops = &ctl_sock_ops;
++
++ sk = most_sk_alloc(net, &ctl_sk_proto, CHAN_CTL);
++ if (!sk)
++ return -ENOMEM;
++
++ sock_init_data(sock, sk);
++
++ sock_reset_flag(sk, SOCK_ZAPPED);
++
++ sk->sk_protocol = protocol;
++
++ sock->state = SS_UNCONNECTED;
++ sk->sk_state = MOST_OPEN;
++
++ most_sock_link(sk);
++ return 0;
++}
++
++static struct net_proto_family ctl_sock_family_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .create = ctl_sock_create,
++};
++
++
++static int __init ctl_init(void)
++{
++ int err;
++
++ err = proto_register(&ctl_sk_proto, 0);
++ if (err < 0)
++ return err;
++
++ err = most_sock_register(MOSTPROTO_CTL, &ctl_sock_family_ops);
++ if (err < 0) {
++ printk(KERN_ERR "MOST socket registration failed\n");
++ return err;
++ }
++
++ printk(KERN_INFO "MOST control socket layer initialized\n");
++
++ return 0;
++}
++
++static void __exit ctl_exit(void)
++{
++ if (most_sock_unregister(MOSTPROTO_CTL) < 0)
++ printk(KERN_ERR "Control socket unregistration failed\n");
++
++ proto_unregister(&ctl_sk_proto);
++}
++
++module_init(ctl_init);
++module_exit(ctl_exit);
++
++MODULE_DESCRIPTION("Most Control");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("most-proto-1");
++
+diff --git a/net/most/dev_sock.c b/net/most/dev_sock.c
+new file mode 100644
+index 0000000..43b980b
+--- /dev/null
++++ b/net/most/dev_sock.c
+@@ -0,0 +1,171 @@
++/*
++ * dev_sock.c Device MOST sockets, to control the underlaying devices
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <net/most/most.h>
++#include <net/most/most_core.h>
++#include <net/most/dev.h>
++
++/* Ioctls that require bound socket */
++static inline int dev_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
++ unsigned long arg)
++{
++ return -ENOSYS;
++}
++
++static int dev_sock_ioctl(struct socket *sock, unsigned int cmd,
++ unsigned long arg)
++{
++ void __user *argp = (void __user *) arg;
++
++ switch (cmd) {
++ case MOSTDEVUP:
++ return most_open_dev(arg & 0xffff);
++ case MOSTDEVDOWN:
++ return most_close_dev(arg & 0xffff);
++ case MOSTGETDEVLIST:
++ return most_get_dev_list(argp);
++ default:
++ return -EINVAL;
++ }
++}
++
++static int dev_sock_bind(struct socket *sock, struct sockaddr *addr,
++ int addr_len)
++{
++ return -ENOSYS;
++}
++
++static int dev_sock_getname(struct socket *sock, struct sockaddr *addr,
++ int *addr_len, int peer)
++{
++ struct sockaddr_mostdev *daddr = (struct sockaddr_mostdev *) addr;
++ struct sock *sk = sock->sk;
++ struct most_dev *mdev = most_sk(sk)->mdev;
++
++ if (!mdev)
++ return -EBADFD;
++
++ lock_sock(sk);
++
++ *addr_len = sizeof(*daddr);
++ daddr->most_family = AF_MOST;
++ daddr->most_dev = mdev->id;
++
++ release_sock(sk);
++ return 0;
++}
++
++static int dev_sock_setsockopt(struct socket *sock, int level, int optname,
++ char __user *optval, unsigned int optlen)
++{
++ return -ENOSYS;
++}
++
++static int dev_sock_getsockopt(struct socket *sock, int level, int optname,
++ char __user *optval, int __user *optlen)
++{
++ return -ENOSYS;
++}
++
++static const struct proto_ops dev_sock_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .release = most_sock_release,
++ .bind = dev_sock_bind,
++ .getname = dev_sock_getname,
++ .sendmsg = sock_no_sendmsg,
++ .recvmsg = sock_no_recvmsg,
++ .ioctl = dev_sock_ioctl,
++ .poll = sock_no_poll,
++ .listen = sock_no_listen,
++ .shutdown = sock_no_shutdown,
++ .setsockopt = dev_sock_setsockopt,
++ .getsockopt = dev_sock_getsockopt,
++ .connect = sock_no_connect,
++ .socketpair = sock_no_socketpair,
++ .accept = sock_no_accept,
++ .mmap = sock_no_mmap
++};
++static struct proto dev_sk_proto = {
++ .name = "DEV",
++ .owner = THIS_MODULE,
++ .obj_size = sizeof(struct most_sock)
++};
++
++static int dev_sock_create(struct net *net, struct socket *sock, int protocol,
++ int kern)
++{
++ struct sock *sk;
++
++ if (sock->type != SOCK_RAW)
++ return -ESOCKTNOSUPPORT;
++
++ sock->ops = &dev_sock_ops;
++
++ sk = most_sk_alloc(net, &dev_sk_proto, CHAN_DEV);
++ if (!sk)
++ return -ENOMEM;
++
++ sock_init_data(sock, sk);
++
++ sock_reset_flag(sk, SOCK_ZAPPED);
++
++ sk->sk_protocol = protocol;
++
++ sock->state = SS_UNCONNECTED;
++ sk->sk_state = MOST_OPEN;
++
++ most_sock_link(sk);
++ return 0;
++}
++
++static struct net_proto_family dev_sock_family_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .create = dev_sock_create,
++};
++
++
++int __init dev_sock_init(void)
++{
++ int err;
++
++ err = proto_register(&dev_sk_proto, 0);
++ if (err < 0)
++ return err;
++
++ err = most_sock_register(MOSTPROTO_DEV, &dev_sock_family_ops);
++ if (err < 0) {
++ printk(KERN_ERR "MOST socket registration failed\n");
++ return err;
++ }
++
++ printk(KERN_INFO "MOST device socket layer initialized\n");
++
++ return 0;
++}
++
++void __exit dev_sock_cleanup(void)
++{
++ if (most_sock_unregister(MOSTPROTO_DEV) < 0)
++ printk(KERN_ERR "Device socket unregistration failed\n");
++
++ proto_unregister(&dev_sk_proto);
++}
++
+diff --git a/net/most/most_core.c b/net/most/most_core.c
+new file mode 100644
+index 0000000..e01aa68
+--- /dev/null
++++ b/net/most/most_core.c
+@@ -0,0 +1,287 @@
++/*
++ * most_core.c The MOST core functions
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++
++#include <net/most/most_core.h>
++#include <net/most/dev.h>
++
++/* MOST device list */
++LIST_HEAD(most_dev_list);
++DEFINE_RWLOCK(most_dev_list_lock);
++
++
++int most_open_dev(u16 dev_id)
++{
++ struct most_dev *mdev = most_dev_get(dev_id);
++ int err = 0;
++
++ if (!mdev)
++ return -ENODEV;
++
++ most_dbg("%s: %s, state: %d\n", __func__, mdev->name, mdev->state);
++
++ if (mdev->state == MOST_DEV_UP)
++ err = -EALREADY;
++
++ if (!err)
++ err = mdev->open(mdev);
++ if (!err)
++ mdev->state = MOST_DEV_UP;
++
++ most_dev_put(mdev);
++ most_dbg("%s: %s, state: %d, err: %d\n", __func__,
++ mdev->name, mdev->state, err);
++ return err;
++}
++
++static int __most_close_dev(struct most_dev *mdev)
++{
++ int err = 0;
++
++ most_dbg("%s: %s, state: %d\n", __func__, mdev ? mdev->name : "nil",
++ mdev ? mdev->state : -1);
++
++ if (!mdev)
++ return -ENODEV;
++
++ if (mdev->state == MOST_DEV_DOWN)
++ err = -EALREADY;
++
++ if (!err)
++ err = mdev->close(mdev);
++ if (!err)
++ mdev->state = MOST_DEV_DOWN;
++
++ most_dev_put(mdev);
++ most_dbg("%s: %s, state: %d, err: %d\n", __func__,
++ mdev->name, mdev->state, err);
++ return err;
++}
++
++int most_close_dev(u16 dev_id)
++{
++ return __most_close_dev(most_dev_get(dev_id));
++}
++
++int most_get_dev_list(void __user *arg)
++{
++ struct most_dev_list_req *dl;
++ struct most_dev_req *dr;
++ struct list_head *p;
++ int n = 0, size, err;
++ u16 dev_num;
++
++ if (get_user(dev_num, (u16 __user *) arg))
++ return -EFAULT;
++
++ if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
++ return -EINVAL;
++
++ size = sizeof(*dl) + dev_num * sizeof(*dr);
++
++ dl = kzalloc(size, GFP_KERNEL);
++ if (!dl)
++ return -ENOMEM;
++
++ dr = dl->dev_req;
++
++ read_lock_bh(&most_dev_list_lock);
++ list_for_each(p, &most_dev_list) {
++ struct most_dev *mdev;
++ mdev = list_entry(p, struct most_dev, list);
++ (dr + n)->dev_id = mdev->id;
++ if (++n >= dev_num)
++ break;
++ }
++ read_unlock_bh(&most_dev_list_lock);
++
++ dl->dev_num = n;
++ size = sizeof(*dl) + n * sizeof(*dr);
++
++ err = copy_to_user(arg, dl, size);
++ kfree(dl);
++
++ return err ? -EFAULT : 0;
++}
++
++static int most_send_frame(struct sk_buff *skb)
++{
++ struct most_dev *mdev = (struct most_dev *) skb->dev;
++
++ if (!mdev) {
++ kfree_skb(skb);
++ return -ENODEV;
++ }
++
++ most_dbg("%s: %s type %d len %d\n", __func__, mdev->name,
++ most_cb(skb)->channel_type, skb->len);
++
++ /* Get rid of skb owner, prior to sending to the driver. */
++ skb_orphan(skb);
++
++ return mdev->send(skb);
++}
++
++static void most_send_queue(struct sk_buff_head *q)
++{
++ struct sk_buff *skb;
++
++ while ((skb = skb_dequeue(q))) {
++ struct most_dev *mdev = (struct most_dev *)skb->dev;
++
++ most_dbg("%s: skb %p len %d\n", __func__, skb, skb->len);
++
++ if (!mdev->can_send || mdev->can_send(skb))
++ most_send_frame(skb);
++ else {
++ most_dbg("%s, could not send frame, requeueing\n",
++ __func__);
++ skb_queue_tail(q, skb);
++ break;
++ }
++ }
++}
++
++static void most_tx_task(unsigned long arg)
++{
++ struct most_dev *mdev = (struct most_dev *) arg;
++
++ most_dbg("%s: %s\n", __func__, mdev->name);
++
++ most_send_queue(&mdev->ctl_q);
++ most_send_queue(&mdev->sync_q);
++ most_send_queue(&mdev->async_q);
++}
++
++static void most_rx_task(unsigned long arg)
++{
++ struct most_dev *mdev = (struct most_dev *) arg;
++ struct sk_buff *skb = skb_dequeue(&mdev->rx_q);
++
++ most_dbg("%s: %s\n", __func__, mdev->name);
++
++ while (skb) {
++ /* Send to the sockets */
++ most_send_to_sock(mdev->id, skb);
++ kfree_skb(skb);
++ skb = skb_dequeue(&mdev->rx_q);
++ }
++}
++
++
++/* Get MOST device by index.
++ * Device is held on return. */
++struct most_dev *most_dev_get(int index)
++{
++ struct most_dev *mdev = NULL;
++ struct list_head *p;
++
++ if (index < 0)
++ return NULL;
++
++ read_lock(&most_dev_list_lock);
++ list_for_each(p, &most_dev_list) {
++ struct most_dev *d = list_entry(p, struct most_dev, list);
++ if (d->id == index) {
++ mdev = most_dev_hold(d);
++ break;
++ }
++ }
++ read_unlock(&most_dev_list_lock);
++ return mdev;
++}
++EXPORT_SYMBOL(most_dev_get);
++
++
++/* Alloc MOST device */
++struct most_dev *most_alloc_dev(void)
++{
++ struct most_dev *mdev;
++
++ mdev = kzalloc(sizeof(struct most_dev), GFP_KERNEL);
++ if (!mdev)
++ return NULL;
++
++ mdev->state = MOST_DEV_DOWN;
++
++ return mdev;
++}
++EXPORT_SYMBOL(most_alloc_dev);
++
++
++void most_free_dev(struct most_dev *mdev)
++{
++ kfree(mdev);
++}
++EXPORT_SYMBOL(most_free_dev);
++
++
++/* Register MOST device */
++int most_register_dev(struct most_dev *mdev)
++{
++ struct list_head *head = &most_dev_list, *p;
++ int id = 0;
++
++ if (!mdev->open || !mdev->close || !mdev->send || !mdev->owner)
++ return -EINVAL;
++
++ write_lock_bh(&most_dev_list_lock);
++
++ /* Find first available device id */
++ list_for_each(p, &most_dev_list) {
++ if (list_entry(p, struct most_dev, list)->id != id)
++ break;
++ head = p; id++;
++ }
++
++ sprintf(mdev->name, "most%d", id);
++ mdev->id = id;
++ list_add(&mdev->list, head);
++
++ tasklet_init(&mdev->rx_task, most_rx_task, (unsigned long) mdev);
++ tasklet_init(&mdev->tx_task, most_tx_task, (unsigned long) mdev);
++
++ skb_queue_head_init(&mdev->rx_q);
++ skb_queue_head_init(&mdev->ctl_q);
++ skb_queue_head_init(&mdev->sync_q);
++ skb_queue_head_init(&mdev->async_q);
++
++ write_unlock_bh(&most_dev_list_lock);
++ return 0;
++}
++EXPORT_SYMBOL(most_register_dev);
++
++int most_unregister_dev(struct most_dev *mdev)
++{
++ int ret = 0;
++ most_dbg("%s: %s: state: %d\n", __func__, mdev->name, mdev->state);
++
++ if (mdev->state != MOST_DEV_DOWN)
++ ret = __most_close_dev(mdev);
++
++ write_lock_bh(&most_dev_list_lock);
++ list_del(&mdev->list);
++ write_unlock_bh(&most_dev_list_lock);
++
++ return ret;
++}
++EXPORT_SYMBOL(most_unregister_dev);
++
+diff --git a/net/most/most_sock.c b/net/most/most_sock.c
+new file mode 100644
+index 0000000..ff07383
+--- /dev/null
++++ b/net/most/most_sock.c
+@@ -0,0 +1,315 @@
++/*
++ * most_sock.c Generic functions for MOST sockets
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <net/most/most_core.h>
++
++static struct most_sock_list most_sk_list = {
++ .lock = __RW_LOCK_UNLOCKED(ctl_sk_list.lock)
++};
++
++void most_sock_link(struct sock *sk)
++{
++ write_lock_bh(&most_sk_list.lock);
++ sk_add_node(sk, &most_sk_list.head);
++ write_unlock_bh(&most_sk_list.lock);
++}
++EXPORT_SYMBOL(most_sock_link);
++
++void most_sock_unlink(struct sock *sk)
++{
++ write_lock_bh(&most_sk_list.lock);
++ sk_del_node_init(sk);
++ write_unlock_bh(&most_sk_list.lock);
++}
++EXPORT_SYMBOL(most_sock_unlink);
++
++static int channel_in_use(int dev_id, u8 channel)
++{
++ struct sock *sk;
++ struct hlist_node *node;
++
++ read_lock_bh(&most_sk_list.lock);
++
++ sk_for_each(sk, node, &most_sk_list.head)
++ if (most_sk(sk)->dev_id == dev_id &&
++ sk->sk_state == MOST_BOUND &&
++ (most_sk(sk)->rx_channel == channel ||
++ most_sk(sk)->tx_channel == channel))
++ goto found;
++
++ sk = NULL;
++found:
++ read_unlock_bh(&most_sk_list.lock);
++
++ return sk != NULL;
++}
++
++int most_send_to_sock(int dev_id, struct sk_buff *skb)
++{
++ struct sock *sk;
++ struct hlist_node *node;
++
++ read_lock(&most_sk_list.lock);
++ sk_for_each(sk, node, &most_sk_list.head) {
++ if (most_sk(sk)->dev_id == dev_id &&
++ most_sk(sk)->channel_type == most_cb(skb)->channel_type
++ && most_sk(sk)->rx_channel == most_cb(skb)->channel &&
++ sk->sk_state == MOST_BOUND) {
++
++ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
++ if (nskb)
++ if (sock_queue_rcv_skb(sk, nskb))
++ kfree_skb(nskb);
++ }
++
++ }
++ read_unlock(&most_sk_list.lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(most_send_to_sock);
++
++int most_sock_release(struct socket *sock)
++{
++ struct sock *sk = sock->sk;
++ struct most_dev *mdev;
++
++ most_dbg("%s: sock %p sk %p\n", __func__, sock, sk);
++
++ if (!sk)
++ return 0;
++
++ mdev = most_sk(sk)->mdev;
++
++ most_sock_unlink(sk);
++
++ if (mdev) {
++ if (sk->sk_state == MOST_BOUND)
++ most_configure_channels(mdev, most_sk(sk), 0);
++
++ most_dev_put(mdev);
++ }
++
++ sock_orphan(sk);
++ sock_put(sk);
++ return 0;
++}
++EXPORT_SYMBOL(most_sock_release);
++
++int most_sock_bind(struct socket *sock, int dev_id, u8 rx_chan, u8 tx_chan)
++{
++ struct sock *sk = sock->sk;
++ struct most_dev *mdev = NULL;
++ int err = 0;
++
++ most_dbg("%s: sock %p sk %p, rx: %d, tx: %d\n",
++ __func__, sock, sk, rx_chan, tx_chan);
++
++ lock_sock(sk);
++
++ if (sk->sk_state != MOST_OPEN) {
++ err = -EBADFD;
++ goto done;
++ }
++
++ if (most_sk(sk)->mdev) {
++ err = -EALREADY;
++ goto done;
++ }
++
++ if (channel_in_use(dev_id, rx_chan) ||
++ channel_in_use(dev_id, tx_chan)) {
++ err = -EADDRINUSE;
++ goto done;
++ } else {
++ most_sk(sk)->rx_channel = rx_chan;
++ most_sk(sk)->tx_channel = tx_chan;
++ }
++
++ mdev = most_dev_get(dev_id);
++ if (!mdev) {
++ err = -ENODEV;
++ goto done;
++ }
++
++ err = most_configure_channels(mdev, most_sk(sk), 1);
++ if (err) {
++ most_dev_put(mdev);
++ goto done;
++ }
++
++ most_sk(sk)->mdev = mdev;
++ most_sk(sk)->dev_id = mdev->id;
++
++ sk->sk_state = MOST_BOUND;
++
++done:
++ release_sock(sk);
++ return err;
++}
++EXPORT_SYMBOL(most_sock_bind);
++
++
++int most_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
++{
++ most_dbg("%s\n", __func__);
++ return -EINVAL;
++}
++EXPORT_SYMBOL(most_sock_ioctl);
++
++int most_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
++ struct msghdr *msg, size_t len, int flags)
++{
++ int noblock = flags & MSG_DONTWAIT;
++ struct sock *sk = sock->sk;
++ struct sk_buff *skb;
++ int copied, err;
++
++ most_dbg("%s\n", __func__);
++
++ if (most_sk(sk)->rx_channel == MOST_NO_CHANNEL)
++ return -EOPNOTSUPP;
++
++ if (flags & (MSG_OOB))
++ return -EOPNOTSUPP;
++
++ if (sk->sk_state != MOST_BOUND)
++ return 0;
++
++ skb = skb_recv_datagram(sk, flags, noblock, &err);
++ if (!skb)
++ return err;
++
++ msg->msg_namelen = 0;
++
++ copied = skb->len;
++ if (len < copied) {
++ msg->msg_flags |= MSG_TRUNC;
++ copied = len;
++ }
++
++ skb_reset_transport_header(skb);
++ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
++
++ skb_free_datagram(sk, skb);
++
++ return err ? : copied;
++}
++EXPORT_SYMBOL(most_sock_recvmsg);
++
++int most_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
++ struct msghdr *msg, size_t len)
++{
++ struct sock *sk = sock->sk;
++ struct most_dev *mdev;
++ struct sk_buff *skb;
++ int err;
++
++ most_dbg("%s: sock %p sk %p, channeltype: %d\n",
++ __func__, sock, sk, most_sk(sk)->channel_type);
++
++ if (most_sk(sk)->tx_channel == MOST_NO_CHANNEL)
++ return -EOPNOTSUPP;
++
++ if (msg->msg_flags & MSG_OOB)
++ return -EOPNOTSUPP;
++
++ if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
++ return -EINVAL;
++
++ lock_sock(sk);
++
++ mdev = most_sk(sk)->mdev;
++ if (!mdev) {
++ err = -EBADFD;
++ goto done;
++ }
++
++ skb = sock_alloc_send_skb(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
++ if (!skb)
++ goto done;
++
++ most_cb(skb)->channel = most_sk(sk)->tx_channel;
++ most_cb(skb)->channel_type = most_sk(sk)->channel_type;
++
++ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
++ err = -EFAULT;
++ goto drop;
++ }
++
++ skb->dev = (void *) mdev;
++
++ skb_queue_tail(&mdev->ctl_q, skb);
++ most_sched_tx(mdev);
++
++ err = len;
++
++done:
++ release_sock(sk);
++ return err;
++
++drop:
++ kfree_skb(skb);
++ goto done;
++}
++EXPORT_SYMBOL(most_sock_sendmsg);
++
++int most_sock_setsockopt(struct socket *sock, int level, int optname,
++ char __user *optval, unsigned int optlen)
++{
++ struct sock *sk = sock->sk;
++ int err = 0;
++
++ most_dbg("%s: sk %p", __func__, sk);
++
++ lock_sock(sk);
++
++ switch (optname) {
++ default:
++ err = -ENOPROTOOPT;
++ break;
++ }
++
++ release_sock(sk);
++ return err;
++}
++EXPORT_SYMBOL(most_sock_setsockopt);
++
++
++int most_sock_getsockopt(struct socket *sock, int level, int optname,
++ char __user *optval, int __user *optlen)
++{
++ struct sock *sk = sock->sk;
++ int err = 0;
++
++ most_dbg("%s: sk %p", __func__, sk);
++
++ lock_sock(sk);
++
++ switch (optname) {
++ default:
++ err = -ENOPROTOOPT;
++ break;
++ }
++
++ release_sock(sk);
++ return err;
++}
++EXPORT_SYMBOL(most_sock_getsockopt);
++
+diff --git a/net/most/sync_sock.c b/net/most/sync_sock.c
+new file mode 100644
+index 0000000..77342f2
+--- /dev/null
++++ b/net/most/sync_sock.c
+@@ -0,0 +1,151 @@
++/*
++ * sync_sock.c Support for MOST synchronous sockets
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <net/most/most.h>
++#include <net/most/most_core.h>
++#include <net/most/sync.h>
++
++static int sync_sock_bind(struct socket *sock, struct sockaddr *addr,
++ int addr_len)
++{
++ struct sockaddr_mostsync *saddr = (struct sockaddr_mostsync *)addr;
++
++ if (!saddr || saddr->most_family != AF_MOST)
++ return -EINVAL;
++
++ return most_sock_bind(sock, saddr->most_dev, saddr->rx_channel,
++ saddr->tx_channel);
++}
++
++static int sync_sock_getname(struct socket *sock, struct sockaddr *addr,
++ int *addr_len, int peer)
++{
++ struct sockaddr_mostsync *saddr = (struct sockaddr_mostsync *)addr;
++ struct sock *sk = sock->sk;
++ struct most_dev *mdev = most_sk(sk)->mdev;
++
++ if (!mdev)
++ return -EBADFD;
++
++ lock_sock(sk);
++
++ *addr_len = sizeof(*saddr);
++ saddr->most_family = AF_MOST;
++ saddr->most_dev = mdev->id;
++ saddr->rx_channel = most_sk(sk)->rx_channel;
++ saddr->tx_channel = most_sk(sk)->tx_channel;
++
++ release_sock(sk);
++ return 0;
++}
++
++
++static const struct proto_ops sync_sock_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .release = most_sock_release,
++ .bind = sync_sock_bind,
++ .getname = sync_sock_getname,
++ .sendmsg = most_sock_sendmsg,
++ .recvmsg = most_sock_recvmsg,
++ .ioctl = most_sock_ioctl,
++ .poll = datagram_poll,
++ .listen = sock_no_listen,
++ .shutdown = sock_no_shutdown,
++ .setsockopt = most_sock_setsockopt,
++ .getsockopt = most_sock_getsockopt,
++ .connect = sock_no_connect,
++ .socketpair = sock_no_socketpair,
++ .accept = sock_no_accept,
++ .mmap = sock_no_mmap
++};
++static struct proto sync_sk_proto = {
++ .name = "SYNC",
++ .owner = THIS_MODULE,
++ .obj_size = sizeof(struct most_sock)
++};
++
++static int sync_sock_create(struct net *net, struct socket *sock, int protocol,
++ int kern)
++{
++ struct sock *sk;
++
++ if (sock->type != SOCK_STREAM)
++ return -ESOCKTNOSUPPORT;
++
++ sock->ops = &sync_sock_ops;
++
++ sk = most_sk_alloc(net, &sync_sk_proto, CHAN_SYNC);
++ if (!sk)
++ return -ENOMEM;
++
++ sock_init_data(sock, sk);
++
++ sock_reset_flag(sk, SOCK_ZAPPED);
++
++ sk->sk_protocol = protocol;
++
++ sock->state = SS_UNCONNECTED;
++ sk->sk_state = MOST_OPEN;
++
++ most_sock_link(sk);
++ return 0;
++}
++
++static struct net_proto_family sync_sock_family_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .create = sync_sock_create,
++};
++
++
++static int __init sync_init(void)
++{
++ int err;
++
++ err = proto_register(&sync_sk_proto, 0);
++ if (err < 0)
++ return err;
++
++ err = most_sock_register(MOSTPROTO_SYNC, &sync_sock_family_ops);
++ if (err < 0) {
++ printk(KERN_ERR "MOST socket registration failed\n");
++ return err;
++ }
++
++ printk(KERN_INFO "MOST synchronous socket layer initialized\n");
++
++ return 0;
++}
++
++static void __exit sync_exit(void)
++{
++ if (most_sock_unregister(MOSTPROTO_SYNC) < 0)
++ printk(KERN_ERR "SYNC socket unregistration failed\n");
++
++ proto_unregister(&sync_sk_proto);
++}
++
++module_init(sync_init);
++module_exit(sync_exit);
++
++MODULE_DESCRIPTION("Most Syncronous");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("most-proto-2");
++
+diff --git a/sound/drivers/Kconfig b/sound/drivers/Kconfig
+index 84714a6..54ad4e7 100644
+--- a/sound/drivers/Kconfig
++++ b/sound/drivers/Kconfig
+@@ -182,4 +182,17 @@ config SND_AC97_POWER_SAVE_DEFAULT
+ The default time-out value in seconds for AC97 automatic
+ power-save mode. 0 means to disable the power-save mode.
+
++config SND_TIMBERDALE_I2S
++ tristate "The timberdale FPGA I2S driver"
++ depends on MFD_TIMBERDALE && HAS_IOMEM
++ default y
++ help
++ Say Y here to enable driver for the I2S block found within the
++ Timberdale FPGA.
++ There is support for up to 8 I2S channels, in either transmitter
++ or receiver mode.
++
++ To compile this driver as a module, choose M here: the module
++ will be called snd-timbi2s.
++
+ endif # SND_DRIVERS
+diff --git a/sound/drivers/Makefile b/sound/drivers/Makefile
+index d4a07f9..dea2eed 100644
+--- a/sound/drivers/Makefile
++++ b/sound/drivers/Makefile
+@@ -10,6 +10,7 @@ snd-portman2x4-objs := portman2x4.o
+ snd-serial-u16550-objs := serial-u16550.o
+ snd-virmidi-objs := virmidi.o
+ snd-ml403-ac97cr-objs := ml403-ac97cr.o pcm-indirect2.o
++snd-timbi2s-objs := timbi2s.o
+
+ # Toplevel Module Dependency
+ obj-$(CONFIG_SND_DUMMY) += snd-dummy.o
+@@ -19,5 +20,6 @@ obj-$(CONFIG_SND_MTPAV) += snd-mtpav.o
+ obj-$(CONFIG_SND_MTS64) += snd-mts64.o
+ obj-$(CONFIG_SND_PORTMAN2X4) += snd-portman2x4.o
+ obj-$(CONFIG_SND_ML403_AC97CR) += snd-ml403-ac97cr.o
++obj-$(CONFIG_SND_TIMBERDALE_I2S) += snd-timbi2s.o
+
+ obj-$(CONFIG_SND) += opl3/ opl4/ mpu401/ vx/ pcsp/
+diff --git a/sound/drivers/timbi2s.c b/sound/drivers/timbi2s.c
+new file mode 100644
+index 0000000..d1269fa
+--- /dev/null
++++ b/sound/drivers/timbi2s.c
+@@ -0,0 +1,759 @@
++/*
++ * timbi2s.c timberdale FPGA I2S driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA I2S
++ *
++ */
++
++#include <linux/io.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/initval.h>
++#include <sound/timbi2s.h>
++
++#define DRIVER_NAME "timb-i2s"
++
++#define MAX_BUSSES 10
++
++#define TIMBI2S_REG_VER 0x00
++#define TIMBI2S_REG_UIR 0x04
++
++#define TIMBI2S_BUS_PRESCALE 0x00
++#define TIMBI2S_BUS_ICLR 0x04
++#define TIMBI2S_BUS_IPR 0x08
++#define TIMBI2S_BUS_ISR 0x0c
++#define TIMBI2S_BUS_IER 0x10
++
++
++#define TIMBI2S_IRQ_TX_FULL 0x01
++#define TIMBI2S_IRQ_TX_ALMOST_FULL 0x02
++#define TIMBI2S_IRQ_TX_ALMOST_EMPTY 0x04
++#define TIMBI2S_IRQ_TX_EMPTY 0x08
++
++#define TIMBI2S_IRQ_RX_FULL 0x01
++#define TIMBI2S_IRQ_RX_ALMOST_FULL 0x02
++#define TIMBI2S_IRQ_RX_ALMOST_EMPTY 0x04
++#define TIMBI2S_IRQ_RX_NOT_EMPTY 0x08
++
++#define TIMBI2S_BUS_ICOR 0x14
++#define TIMBI2S_ICOR_TX_ENABLE 0x00000001
++#define TIMBI2S_ICOR_RX_ENABLE 0x00000002
++#define TIMBI2S_ICOR_LFIFO_RST 0x00000004
++#define TIMBI2S_ICOR_RFIFO_RST 0x00000008
++#define TIMBI2S_ICOR_FIFO_RST (TIMBI2S_ICOR_LFIFO_RST | TIMBI2S_ICOR_RFIFO_RST)
++#define TIMBI2S_ICOR_SOFT_RST 0x00000010
++#define TIMBI2S_ICOR_WORD_SEL_LEFT_SHIFT 8
++#define TIMBI2S_ICOR_WORD_SEL_LEFT_MASK (0xff << 8)
++#define TIMBI2S_ICOR_WORD_SEL_RIGHT_SHIFT 16
++#define TIMBI2S_ICOR_WORD_SEL_RIGHT_MASK (0xff << 16)
++#define TIMBI2S_ICOR_CLK_MASTER 0x10000000
++#define TIMBI2S_ICOR_RX_ID 0x20000000
++#define TIMBI2S_ICOR_TX_ID 0x40000000
++#define TIMBI2S_ICOR_WORD_SEL 0x80000000
++#define TIMBI2S_BUS_FIFO 0x18
++
++#define TIMBI2S_BUS_REG_AREA_SIZE (TIMBI2S_BUS_FIFO - \
++ TIMBI2S_BUS_PRESCALE + 4)
++#define TIMBI2S_FIRST_BUS_AREA_OFS 0x08
++
++struct timbi2s_bus {
++ u32 flags;
++ u32 prescale;
++ struct snd_pcm *pcm;
++ struct snd_card *card;
++ struct snd_pcm_substream *substream;
++ unsigned buf_pos;
++ spinlock_t lock; /* mutual exclusion */
++ u16 sample_rate;
++};
++
++#define BUS_RX 0x200
++#define BUS_MASTER 0x100
++#define BUS_INDEX_MASK 0xff
++#define BUS_INDEX(b) ((b)->flags & BUS_INDEX_MASK)
++#define BUS_IS_MASTER(b) ((b)->flags & BUS_MASTER)
++#define BUS_IS_RX(b) ((b)->flags & BUS_RX)
++
++#define SET_BUS_INDEX(b, id) ((b)->flags = ((b)->flags & ~BUS_INDEX_MASK) | id)
++#define SET_BUS_MASTER(b) ((b)->flags |= BUS_MASTER)
++#define SET_BUS_RX(b) ((b)->flags |= BUS_RX)
++
++#define TIMBI2S_BUS_OFFSET(bus) (TIMBI2S_FIRST_BUS_AREA_OFS + \
++ TIMBI2S_BUS_REG_AREA_SIZE * BUS_INDEX(bus))
++
++struct timbi2s {
++ void __iomem *membase;
++ int irq;
++ struct tasklet_struct tasklet;
++ u32 main_clk;
++ unsigned num_busses;
++ struct timbi2s_bus busses[0];
++};
++
++#define BITS_PER_CHANNEL 16
++#define NUM_CHANNELS 2
++
++#define SAMPLE_SIZE ((NUM_CHANNELS * BITS_PER_CHANNEL) / 8)
++#define NUM_PERIODS 32
++#define NUM_SAMPLES 256
++
++static struct snd_pcm_hardware timbi2s_rx_hw = {
++ .info = (SNDRV_PCM_INFO_MMAP
++ | SNDRV_PCM_INFO_MMAP_VALID
++ | SNDRV_PCM_INFO_INTERLEAVED),
++ .formats = SNDRV_PCM_FMTBIT_S16_LE,
++ .rates = SNDRV_PCM_RATE_44100,
++ .rate_min = 44100,
++ .rate_max = 44100,
++ .channels_min = 2, /* only stereo */
++ .channels_max = 2,
++ .buffer_bytes_max = NUM_PERIODS * SAMPLE_SIZE * NUM_SAMPLES,
++ .period_bytes_min = SAMPLE_SIZE * NUM_SAMPLES,
++ .period_bytes_max = SAMPLE_SIZE * NUM_SAMPLES,
++ .periods_min = NUM_PERIODS,
++ .periods_max = NUM_PERIODS,
++};
++
++static struct snd_pcm_hardware timbi2s_tx_hw = {
++ .info = (SNDRV_PCM_INFO_MMAP
++ | SNDRV_PCM_INFO_MMAP_VALID
++ | SNDRV_PCM_INFO_INTERLEAVED),
++ .formats = SNDRV_PCM_FMTBIT_S16_LE,
++ .rates = SNDRV_PCM_RATE_44100,
++ .rate_min = 44100,
++ .rate_max = 44100,
++ .channels_min = 2, /* only stereo */
++ .channels_max = 2,
++ .buffer_bytes_max = NUM_PERIODS * SAMPLE_SIZE * NUM_SAMPLES,
++ .period_bytes_min = SAMPLE_SIZE * NUM_SAMPLES,
++ .period_bytes_max = SAMPLE_SIZE * NUM_SAMPLES,
++ .periods_min = NUM_PERIODS,
++ .periods_max = NUM_PERIODS,
++};
++
++static inline void timbi2s_bus_write(struct timbi2s_bus *bus, u32 val, u32 reg)
++{
++ struct timbi2s *i2s = snd_pcm_chip(bus->card);
++
++ iowrite32(val, i2s->membase + TIMBI2S_BUS_OFFSET(bus) + reg);
++}
++
++static inline u32 timbi2s_bus_read(struct timbi2s_bus *bus, u32 reg)
++{
++ struct timbi2s *i2s = snd_pcm_chip(bus->card);
++
++ return ioread32(i2s->membase + TIMBI2S_BUS_OFFSET(bus) + reg);
++}
++
++static u32 timbi2s_calc_prescale(u32 main_clk, u32 sample_rate)
++{
++ u32 halfbit_rate = sample_rate * BITS_PER_CHANNEL * NUM_CHANNELS * 2;
++ return main_clk / halfbit_rate;
++}
++
++static int timbi2s_open(struct snd_pcm_substream *substream)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p, bus: %d\n", __func__, substream,
++ BUS_INDEX(bus));
++
++ if (BUS_IS_RX(bus))
++ runtime->hw = timbi2s_rx_hw;
++ else
++ runtime->hw = timbi2s_tx_hw;
++
++ if (bus->sample_rate == 8000) {
++ runtime->hw.rates = SNDRV_PCM_RATE_8000;
++ runtime->hw.rate_min = 8000;
++ runtime->hw.rate_max = 8000;
++ }
++
++ bus->substream = substream;
++
++ return 0;
++}
++
++static int timbi2s_close(struct snd_pcm_substream *substream)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p, bus: %d\n", __func__, substream,
++ BUS_INDEX(bus));
++
++ bus->substream = NULL;
++
++ return 0;
++}
++
++static int timbi2s_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *hw_params)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ struct timbi2s *i2s = snd_pcm_chip(card);
++ int err;
++
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p, bus: %d\n", __func__,
++ substream, BUS_INDEX(bus));
++
++ bus->prescale = timbi2s_calc_prescale(i2s->main_clk,
++ params_rate(hw_params));
++
++ err = snd_pcm_lib_malloc_pages(substream,
++ params_buffer_bytes(hw_params));
++ if (err < 0)
++ return err;
++
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Rate: %d, format: %d\n", __func__, params_rate(hw_params),
++ params_format(hw_params));
++
++ return 0;
++}
++
++static int timbi2s_hw_free(struct snd_pcm_substream *substream)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ unsigned long flags;
++
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p\n", __func__, substream);
++
++ spin_lock_irqsave(&bus->lock, flags);
++ /* disable interrupts */
++ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_IER);
++ spin_unlock_irqrestore(&bus->lock, flags);
++
++ /* disable TX and RX */
++ timbi2s_bus_write(bus, TIMBI2S_ICOR_FIFO_RST | TIMBI2S_ICOR_SOFT_RST,
++ TIMBI2S_BUS_ICOR);
++
++ return snd_pcm_lib_free_pages(substream);
++}
++
++static int timbi2s_prepare(struct snd_pcm_substream *substream)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ u32 data;
++
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p, bus: %d, buffer: %d, period: %d\n",
++ __func__, substream,
++ BUS_INDEX(bus), (int)snd_pcm_lib_buffer_bytes(substream),
++ (int)snd_pcm_lib_period_bytes(substream));
++
++ if (runtime->dma_addr & 3 || runtime->buffer_size & 3) {
++ dev_err(snd_card_get_device_link(card),
++ "%s: Only word aligned data allowed\n", __func__);
++ return -EINVAL;
++ }
++
++ if (runtime->channels != NUM_CHANNELS) {
++ dev_err(snd_card_get_device_link(card),
++ "%s: Number of channels unsupported %d\n", __func__,
++ runtime->channels);
++ return -EINVAL;
++ }
++
++ /* reset */
++ timbi2s_bus_write(bus, TIMBI2S_ICOR_FIFO_RST | TIMBI2S_ICOR_SOFT_RST,
++ TIMBI2S_BUS_ICOR);
++
++ /* only masters have prescaling, don't write if not needed */
++ if (BUS_IS_MASTER(bus))
++ timbi2s_bus_write(bus, bus->prescale, TIMBI2S_BUS_PRESCALE);
++
++ /* write word select */
++ data = ((BITS_PER_CHANNEL << TIMBI2S_ICOR_WORD_SEL_LEFT_SHIFT) &
++ TIMBI2S_ICOR_WORD_SEL_LEFT_MASK) |
++ ((BITS_PER_CHANNEL << TIMBI2S_ICOR_WORD_SEL_RIGHT_SHIFT) &
++ TIMBI2S_ICOR_WORD_SEL_RIGHT_MASK);
++ timbi2s_bus_write(bus, data, TIMBI2S_BUS_ICOR);
++
++ bus->buf_pos = 0;
++
++ return 0;
++}
++
++static int
++timbi2s_playback_trigger(struct snd_pcm_substream *substream, int cmd)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ unsigned long flags;
++ u32 data;
++
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p, bus: %d, cmd: %d\n", __func__,
++ substream, BUS_INDEX(bus), cmd);
++
++ switch (cmd) {
++ case SNDRV_PCM_TRIGGER_START:
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Got TRIGGER_START command\n", __func__);
++
++ /* start */
++ data = timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR);
++ data |= TIMBI2S_ICOR_TX_ENABLE;
++ timbi2s_bus_write(bus, data, TIMBI2S_BUS_ICOR);
++
++ /* enable interrupts */
++ timbi2s_bus_write(bus, TIMBI2S_IRQ_TX_ALMOST_EMPTY,
++ TIMBI2S_BUS_IER);
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: ISR: %x, ICOR: %x\n", __func__,
++ timbi2s_bus_read(bus, TIMBI2S_BUS_ISR),
++ timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR));
++ break;
++ case SNDRV_PCM_TRIGGER_STOP:
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Got TRIGGER_STOP command\n", __func__);
++
++ spin_lock_irqsave(&bus->lock, flags);
++ /* disable interrupts */
++ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_IER);
++ spin_unlock_irqrestore(&bus->lock, flags);
++
++ /* reset */
++ data = timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR);
++ data &= ~TIMBI2S_ICOR_TX_ENABLE;
++
++ timbi2s_bus_write(bus, data, TIMBI2S_BUS_ICOR);
++ break;
++ default:
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Got unsupported command\n", __func__);
++
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int
++timbi2s_capture_trigger(struct snd_pcm_substream *substream, int cmd)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ unsigned long flags;
++
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p, bus: %d, cmd: %d\n", __func__,
++ substream, BUS_INDEX(bus), cmd);
++
++ switch (cmd) {
++ case SNDRV_PCM_TRIGGER_START:
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Got TRIGGER_START command\n", __func__);
++
++ timbi2s_bus_write(bus, TIMBI2S_ICOR_RX_ENABLE |
++ TIMBI2S_ICOR_FIFO_RST, TIMBI2S_BUS_ICOR);
++
++ timbi2s_bus_write(bus, TIMBI2S_IRQ_RX_ALMOST_FULL,
++ TIMBI2S_BUS_IER);
++ break;
++ case SNDRV_PCM_TRIGGER_STOP:
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Got TRIGGER_STOP command\n", __func__);
++ /* disable interrupts */
++ spin_lock_irqsave(&bus->lock, flags);
++ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_IER);
++ spin_unlock_irqrestore(&bus->lock, flags);
++ /* Stop RX */
++ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_ICOR);
++ break;
++ default:
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Got unsupported command\n", __func__);
++
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static snd_pcm_uframes_t
++timbi2s_pointer(struct snd_pcm_substream *substream)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ snd_pcm_uframes_t ret;
++
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p\n", __func__, substream);
++
++ ret = bytes_to_frames(substream->runtime, bus->buf_pos);
++ if (ret >= substream->runtime->buffer_size)
++ ret -= substream->runtime->buffer_size;
++
++ return ret;
++}
++
++static struct snd_pcm_ops timbi2s_playback_ops = {
++ .open = timbi2s_open,
++ .close = timbi2s_close,
++ .ioctl = snd_pcm_lib_ioctl,
++ .hw_params = timbi2s_hw_params,
++ .hw_free = timbi2s_hw_free,
++ .prepare = timbi2s_prepare,
++ .trigger = timbi2s_playback_trigger,
++ .pointer = timbi2s_pointer,
++};
++
++static struct snd_pcm_ops timbi2s_capture_ops = {
++ .open = timbi2s_open,
++ .close = timbi2s_close,
++ .ioctl = snd_pcm_lib_ioctl,
++ .hw_params = timbi2s_hw_params,
++ .hw_free = timbi2s_hw_free,
++ .prepare = timbi2s_prepare,
++ .trigger = timbi2s_capture_trigger,
++ .pointer = timbi2s_pointer,
++};
++
++static void timbi2s_irq_process_rx(struct timbi2s_bus *bus)
++{
++ struct snd_pcm_runtime *runtime = bus->substream->runtime;
++ u32 buffer_size = snd_pcm_lib_buffer_bytes(bus->substream);
++ u32 ipr = timbi2s_bus_read(bus, TIMBI2S_BUS_IPR);
++ int i;
++
++ dev_dbg(snd_card_get_device_link(bus->card),
++ "%s: Entry, bus: %d, IPR %x\n", __func__, BUS_INDEX(bus), ipr);
++
++ for (i = 0; i < NUM_SAMPLES; i++) {
++ *(u32 *)(runtime->dma_area + bus->buf_pos) =
++ timbi2s_bus_read(bus, TIMBI2S_BUS_FIFO);
++ bus->buf_pos += SAMPLE_SIZE;
++ bus->buf_pos %= buffer_size;
++ }
++
++ timbi2s_bus_write(bus, ipr, TIMBI2S_BUS_ICLR);
++
++ /* inform ALSA that a period was received */
++ snd_pcm_period_elapsed(bus->substream);
++}
++
++static void timbi2s_irq_process_tx(struct timbi2s_bus *bus)
++{
++ struct snd_pcm_runtime *runtime = bus->substream->runtime;
++ u32 buffer_size = snd_pcm_lib_buffer_bytes(bus->substream);
++ u32 ipr = timbi2s_bus_read(bus, TIMBI2S_BUS_IPR);
++ int i;
++
++ dev_dbg(snd_card_get_device_link(bus->card),
++ "%s: Entry, bus: %d, IPR %x\n", __func__, BUS_INDEX(bus), ipr);
++
++ for (i = 0; i < NUM_SAMPLES; i++) {
++ timbi2s_bus_write(bus,
++ *(u32 *)(runtime->dma_area + bus->buf_pos),
++ TIMBI2S_BUS_FIFO);
++ bus->buf_pos += SAMPLE_SIZE;
++ bus->buf_pos %= buffer_size;
++ }
++
++ dev_dbg(snd_card_get_device_link(bus->card), "%s: ISR: %x, ICOR: %x\n",
++ __func__, timbi2s_bus_read(bus, TIMBI2S_BUS_ISR),
++ timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR));
++
++ timbi2s_bus_write(bus, ipr, TIMBI2S_BUS_ICLR);
++
++ /* inform ALSA that a period was received */
++ snd_pcm_period_elapsed(bus->substream);
++}
++
++static void timbi2s_tasklet(unsigned long arg)
++{
++ struct snd_card *card = (struct snd_card *)arg;
++ struct timbi2s *i2s = snd_pcm_chip(card);
++ u32 uir;
++ unsigned i;
++
++ dev_dbg(snd_card_get_device_link(card), "%s: Entry, UIR %x\n",
++ __func__, uir);
++
++ while ((uir = ioread32(i2s->membase + TIMBI2S_REG_UIR)) != 0) {
++ for (i = 0; i < i2s->num_busses; i++)
++ if (uir & (1 << i)) {
++ struct timbi2s_bus *bus = i2s->busses + i;
++ if (BUS_IS_RX(bus))
++ timbi2s_irq_process_rx(bus);
++ else
++ timbi2s_irq_process_tx(bus);
++ }
++ }
++
++ enable_irq(i2s->irq);
++}
++
++static irqreturn_t timbi2s_irq(int irq, void *devid)
++{
++ struct timbi2s *i2s = devid;
++
++ tasklet_schedule(&i2s->tasklet);
++ disable_irq_nosync(i2s->irq);
++
++ return IRQ_HANDLED;
++}
++
++static int timbi2s_setup_busses(struct snd_card *card,
++ struct platform_device *pdev)
++{
++ const struct timbi2s_platform_data *pdata = pdev->dev.platform_data;
++ unsigned i;
++
++ dev_dbg(&pdev->dev, "%s: Entry, no busses: %d, busses: %p\n", __func__,
++ pdata->num_busses, pdata->busses);
++
++ for (i = 0; i < pdata->num_busses; i++) {
++ const struct timbi2s_bus_data *bus_data = pdata->busses + i;
++ int capture = bus_data->rx;
++ int err;
++ u32 ctl;
++ struct timbi2s *i2s = snd_pcm_chip(card);
++ struct timbi2s_bus *bus = i2s->busses + i;
++
++ dev_dbg(&pdev->dev, "%s: Setting up bus: %d\n", __func__, i);
++
++ SET_BUS_INDEX(bus, i);
++ bus->sample_rate = bus_data->sample_rate;
++ bus->card = card;
++ /* prescaling only applies to master busses, we use the
++ * knowledge of that to identify the direction later
++ * eg, bus->prescale != 0 -> master bus
++ */
++ if (capture)
++ SET_BUS_RX(bus);
++
++ spin_lock_init(&bus->lock);
++
++ if (bus->sample_rate != 44100 && bus->sample_rate != 8000) {
++ dev_err(&pdev->dev,
++ "Unsupported bitrate: %d\n", bus->sample_rate);
++ return -EINVAL;
++ }
++
++ dev_dbg(&pdev->dev, "%s: Will check HW direction on bus: %d\n",
++ __func__, BUS_INDEX(bus));
++
++ /* check that the HW agrees with the direction */
++ ctl = timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR);
++ if ((capture && !(ctl & TIMBI2S_ICOR_RX_ID)) ||
++ (!capture && !(ctl & TIMBI2S_ICOR_TX_ID))) {
++ dev_dbg(&pdev->dev,
++ "HW and platform data disagree on direction\n");
++ return -EINVAL;
++ }
++
++ dev_dbg(&pdev->dev, "%s: Will create PCM channel for bus: %d\n",
++ __func__, BUS_INDEX(bus));
++ err = snd_pcm_new(card, bus_data->name ? bus_data->name :
++ card->shortname, i, !capture, capture, &bus->pcm);
++ if (err) {
++ dev_dbg(&pdev->dev, "%s, Failed to create pcm: %d\n",
++ __func__, err);
++ return err;
++ }
++
++ if (capture)
++ snd_pcm_set_ops(bus->pcm, SNDRV_PCM_STREAM_CAPTURE,
++ &timbi2s_capture_ops);
++ if (!capture)
++ snd_pcm_set_ops(bus->pcm, SNDRV_PCM_STREAM_PLAYBACK,
++ &timbi2s_playback_ops);
++
++ dev_dbg(&pdev->dev, "%s: Will preallocate buffers to bus: %d\n",
++ __func__, BUS_INDEX(bus));
++
++ err = snd_pcm_lib_preallocate_pages_for_all(bus->pcm,
++ SNDRV_DMA_TYPE_CONTINUOUS,
++ snd_dma_continuous_data(GFP_KERNEL),
++ NUM_SAMPLES * NUM_PERIODS * SAMPLE_SIZE * 2,
++ NUM_SAMPLES * NUM_PERIODS * SAMPLE_SIZE * 2);
++ if (err) {
++ dev_dbg(&pdev->dev, "%s, Failed to create pcm: %d\n",
++ __func__, err);
++
++ return err;
++ }
++
++ bus->pcm->private_data = bus;
++ bus->pcm->info_flags = 0;
++ strcpy(bus->pcm->name, card->shortname);
++ i2s->num_busses++;
++ }
++
++ return 0;
++}
++
++static int __devinit timbi2s_probe(struct platform_device *pdev)
++{
++ int err;
++ int irq;
++ struct timbi2s *i2s;
++ struct resource *iomem;
++ const struct timbi2s_platform_data *pdata = pdev->dev.platform_data;
++ struct snd_card *card;
++ u32 ver;
++
++ if (!pdata) {
++ err = -ENODEV;
++ goto out;
++ }
++
++ if (pdata->num_busses > MAX_BUSSES) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!iomem) {
++ err = -ENODEV;
++ goto out;
++ }
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0) {
++ err = -ENODEV;
++ goto out;
++ }
++
++ err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
++ THIS_MODULE, sizeof(struct timbi2s) +
++ sizeof(struct timbi2s_bus) * pdata->num_busses, &card);
++ if (err)
++ goto out;
++
++ strcpy(card->driver, "Timberdale I2S");
++ strcpy(card->shortname, "Timberdale I2S");
++ sprintf(card->longname, "Timberdale I2S Driver");
++
++ snd_card_set_dev(card, &pdev->dev);
++
++ i2s = snd_pcm_chip(card);
++
++ if (!request_mem_region(iomem->start, resource_size(iomem),
++ DRIVER_NAME)) {
++ err = -EBUSY;
++ goto err_region;
++ }
++
++ i2s->membase = ioremap(iomem->start, resource_size(iomem));
++ if (!i2s->membase) {
++ err = -ENOMEM;
++ goto err_ioremap;
++ }
++
++ err = timbi2s_setup_busses(card, pdev);
++ if (err)
++ goto err_setup;
++
++ tasklet_init(&i2s->tasklet, timbi2s_tasklet, (unsigned long)card);
++ i2s->irq = irq;
++ i2s->main_clk = pdata->main_clk;
++
++ err = request_irq(irq, timbi2s_irq, 0, DRIVER_NAME, i2s);
++ if (err)
++ goto err_request_irq;
++
++ err = snd_card_register(card);
++ if (err)
++ goto err_register;
++
++ platform_set_drvdata(pdev, card);
++
++ ver = ioread32(i2s->membase + TIMBI2S_REG_VER);
++
++ printk(KERN_INFO
++ "Driver for Timberdale I2S (ver: %d.%d) successfully probed.\n",
++ ver >> 16 , ver & 0xffff);
++
++ return 0;
++
++err_register:
++ free_irq(irq, card);
++err_request_irq:
++err_setup:
++ iounmap(i2s->membase);
++err_ioremap:
++ release_mem_region(iomem->start, resource_size(iomem));
++err_region:
++ snd_card_free(card);
++out:
++ printk(KERN_ERR DRIVER_NAME": Failed to register: %d\n", err);
++
++ return err;
++}
++
++static int __devexit timbi2s_remove(struct platform_device *pdev)
++{
++ struct snd_card *card = platform_get_drvdata(pdev);
++ struct timbi2s *i2s = snd_pcm_chip(card);
++ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ tasklet_kill(&i2s->tasklet);
++ free_irq(i2s->irq, i2s);
++
++ iounmap(i2s->membase);
++ release_mem_region(iomem->start, resource_size(iomem));
++ snd_card_free(card);
++
++ platform_set_drvdata(pdev, 0);
++ return 0;
++}
++
++static struct platform_driver timbi2s_platform_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = timbi2s_probe,
++ .remove = __devexit_p(timbi2s_remove),
++};
++
++/*--------------------------------------------------------------------------*/
++
++static int __init timbi2s_init(void)
++{
++ return platform_driver_register(&timbi2s_platform_driver);
++}
++
++static void __exit timbi2s_exit(void)
++{
++ platform_driver_unregister(&timbi2s_platform_driver);
++}
++
++module_init(timbi2s_init);
++module_exit(timbi2s_exit);
++
++MODULE_ALIAS("platform:"DRIVER_NAME);
++MODULE_DESCRIPTION("Timberdale I2S bus driver");
++MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
++MODULE_LICENSE("GPL v2");
+
+