Merge remote-tracking branches 'spi/topic/pxa2xx', 'spi/topic/qspi', 'spi/topic/s3c24xx', 'spi/topic/s3c64xx', 'spi/topic/sh', 'spi/topic/tegra114', 'spi/topic/tegra20-sflash', 'spi/topic/tegra20-slink', 'spi/topic/txx9' and 'spi/topic/xcomm' into spi-linus

This commit is contained in:
Mark Brown 2014-01-23 13:07:14 +00:00
24 changed files with 500 additions and 1155 deletions

View file

@ -3,6 +3,11 @@ TI QSPI controller.
Required properties:
- compatible : should be "ti,dra7xxx-qspi" or "ti,am4372-qspi".
- reg: Should contain QSPI registers location and length.
- reg-names: Should contain the resource reg names.
- qspi_base: Qspi configuration register Address space
- qspi_mmap: Memory mapped Address space
- (optional) qspi_ctrlmod: Control module Address space
- interrupts: should contain the qspi interrupt number.
- #address-cells, #size-cells : Must be present if the device has sub-nodes
- ti,hwmods: Name of the hwmod associated to the QSPI
@ -14,7 +19,8 @@ Example:
qspi: qspi@4b300000 {
compatible = "ti,dra7xxx-qspi";
reg = <0x4b300000 0x100>;
reg = <0x47900000 0x100>, <0x30000000 0x3ffffff>;
reg-names = "qspi_base", "qspi_mmap";
#address-cells = <1>;
#size-cells = <0>;
spi-max-frequency = <25000000>;

View file

@ -723,6 +723,7 @@ config ARCH_S3C64XX
bool "Samsung S3C64XX"
select ARCH_HAS_CPUFREQ
select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
select ARM_VIC
select CLKDEV_LOOKUP
select CLKSRC_SAMSUNG_PWM

View file

@ -17,9 +17,10 @@ config CPU_S3C6410
help
Enable S3C6410 CPU support
config S3C64XX_DMA
bool "S3C64XX DMA"
select S3C_DMA
config S3C64XX_PL080
bool "S3C64XX DMA using generic PL08x driver"
select AMBA_PL08X
select SAMSUNG_DMADEV
config S3C64XX_SETUP_SDHCI
bool

View file

@ -26,7 +26,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle.o
# DMA support
obj-$(CONFIG_S3C64XX_DMA) += dma.o
obj-$(CONFIG_S3C64XX_PL080) += pl080.o
# Device support

View file

@ -58,4 +58,9 @@ int __init s3c64xx_pm_late_initcall(void);
static inline int s3c64xx_pm_late_initcall(void) { return 0; }
#endif
#ifdef CONFIG_S3C64XX_PL080
extern struct pl08x_platform_data s3c64xx_dma0_plat_data;
extern struct pl08x_platform_data s3c64xx_dma1_plat_data;
#endif
#endif /* __ARCH_ARM_MACH_S3C64XX_COMMON_H */

View file

@ -1,762 +0,0 @@
/* linux/arch/arm/plat-s3c64xx/dma.c
*
* Copyright 2009 Openmoko, Inc.
* Copyright 2009 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
* S3C64XX DMA core
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* NOTE: Code in this file is not used when booting with Device Tree support.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/dmapool.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/amba/pl080.h>
#include <linux/of.h>
#include <mach/dma.h>
#include <mach/map.h>
#include <mach/irqs.h>
#include "regs-sys.h"
/* dma channel state information */
struct s3c64xx_dmac {
struct device dev;
struct clk *clk;
void __iomem *regs;
struct s3c2410_dma_chan *channels;
enum dma_ch chanbase;
};
/* pool to provide LLI buffers */
static struct dma_pool *dma_pool;
/* Debug configuration and code */
static unsigned char debug_show_buffs = 0;
static void dbg_showchan(struct s3c2410_dma_chan *chan)
{
pr_debug("DMA%d: %08x->%08x L %08x C %08x,%08x S %08x\n",
chan->number,
readl(chan->regs + PL080_CH_SRC_ADDR),
readl(chan->regs + PL080_CH_DST_ADDR),
readl(chan->regs + PL080_CH_LLI),
readl(chan->regs + PL080_CH_CONTROL),
readl(chan->regs + PL080S_CH_CONTROL2),
readl(chan->regs + PL080S_CH_CONFIG));
}
static void show_lli(struct pl080s_lli *lli)
{
pr_debug("LLI[%p] %08x->%08x, NL %08x C %08x,%08x\n",
lli, lli->src_addr, lli->dst_addr, lli->next_lli,
lli->control0, lli->control1);
}
static void dbg_showbuffs(struct s3c2410_dma_chan *chan)
{
struct s3c64xx_dma_buff *ptr;
struct s3c64xx_dma_buff *end;
pr_debug("DMA%d: buffs next %p, curr %p, end %p\n",
chan->number, chan->next, chan->curr, chan->end);
ptr = chan->next;
end = chan->end;
if (debug_show_buffs) {
for (; ptr != NULL; ptr = ptr->next) {
pr_debug("DMA%d: %08x ",
chan->number, ptr->lli_dma);
show_lli(ptr->lli);
}
}
}
/* End of Debug */
static struct s3c2410_dma_chan *s3c64xx_dma_map_channel(unsigned int channel)
{
struct s3c2410_dma_chan *chan;
unsigned int start, offs;
start = 0;
if (channel >= DMACH_PCM1_TX)
start = 8;
for (offs = 0; offs < 8; offs++) {
chan = &s3c2410_chans[start + offs];
if (!chan->in_use)
goto found;
}
return NULL;
found:
s3c_dma_chan_map[channel] = chan;
return chan;
}
int s3c2410_dma_config(enum dma_ch channel, int xferunit)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
if (chan == NULL)
return -EINVAL;
switch (xferunit) {
case 1:
chan->hw_width = 0;
break;
case 2:
chan->hw_width = 1;
break;
case 4:
chan->hw_width = 2;
break;
default:
printk(KERN_ERR "%s: illegal width %d\n", __func__, xferunit);
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(s3c2410_dma_config);
static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan,
struct pl080s_lli *lli,
dma_addr_t data, int size)
{
dma_addr_t src, dst;
u32 control0, control1;
switch (chan->source) {
case DMA_FROM_DEVICE:
src = chan->dev_addr;
dst = data;
control0 = PL080_CONTROL_SRC_AHB2;
control0 |= PL080_CONTROL_DST_INCR;
break;
case DMA_TO_DEVICE:
src = data;
dst = chan->dev_addr;
control0 = PL080_CONTROL_DST_AHB2;
control0 |= PL080_CONTROL_SRC_INCR;
break;
default:
BUG();
}
/* note, we do not currently setup any of the burst controls */
control1 = size >> chan->hw_width; /* size in no of xfers */
control0 |= PL080_CONTROL_PROT_SYS; /* always in priv. mode */
control0 |= PL080_CONTROL_TC_IRQ_EN; /* always fire IRQ */
control0 |= (u32)chan->hw_width << PL080_CONTROL_DWIDTH_SHIFT;
control0 |= (u32)chan->hw_width << PL080_CONTROL_SWIDTH_SHIFT;
lli->src_addr = src;
lli->dst_addr = dst;
lli->next_lli = 0;
lli->control0 = control0;
lli->control1 = control1;
}
static void s3c64xx_lli_to_regs(struct s3c2410_dma_chan *chan,
struct pl080s_lli *lli)
{
void __iomem *regs = chan->regs;
pr_debug("%s: LLI %p => regs\n", __func__, lli);
show_lli(lli);
writel(lli->src_addr, regs + PL080_CH_SRC_ADDR);
writel(lli->dst_addr, regs + PL080_CH_DST_ADDR);
writel(lli->next_lli, regs + PL080_CH_LLI);
writel(lli->control0, regs + PL080_CH_CONTROL);
writel(lli->control1, regs + PL080S_CH_CONTROL2);
}
static int s3c64xx_dma_start(struct s3c2410_dma_chan *chan)
{
struct s3c64xx_dmac *dmac = chan->dmac;
u32 config;
u32 bit = chan->bit;
dbg_showchan(chan);
pr_debug("%s: clearing interrupts\n", __func__);
/* clear interrupts */
writel(bit, dmac->regs + PL080_TC_CLEAR);
writel(bit, dmac->regs + PL080_ERR_CLEAR);
pr_debug("%s: starting channel\n", __func__);
config = readl(chan->regs + PL080S_CH_CONFIG);
config |= PL080_CONFIG_ENABLE;
config &= ~PL080_CONFIG_HALT;
pr_debug("%s: writing config %08x\n", __func__, config);
writel(config, chan->regs + PL080S_CH_CONFIG);
return 0;
}
static int s3c64xx_dma_stop(struct s3c2410_dma_chan *chan)
{
u32 config;
int timeout;
pr_debug("%s: stopping channel\n", __func__);
dbg_showchan(chan);
config = readl(chan->regs + PL080S_CH_CONFIG);
config |= PL080_CONFIG_HALT;
writel(config, chan->regs + PL080S_CH_CONFIG);
timeout = 1000;
do {
config = readl(chan->regs + PL080S_CH_CONFIG);
pr_debug("%s: %d - config %08x\n", __func__, timeout, config);
if (config & PL080_CONFIG_ACTIVE)
udelay(10);
else
break;
} while (--timeout > 0);
if (config & PL080_CONFIG_ACTIVE) {
printk(KERN_ERR "%s: channel still active\n", __func__);
return -EFAULT;
}
config = readl(chan->regs + PL080S_CH_CONFIG);
config &= ~PL080_CONFIG_ENABLE;
writel(config, chan->regs + PL080S_CH_CONFIG);
return 0;
}
static inline void s3c64xx_dma_bufffdone(struct s3c2410_dma_chan *chan,
struct s3c64xx_dma_buff *buf,
enum s3c2410_dma_buffresult result)
{
if (chan->callback_fn != NULL)
(chan->callback_fn)(chan, buf->pw, 0, result);
}
static void s3c64xx_dma_freebuff(struct s3c64xx_dma_buff *buff)
{
dma_pool_free(dma_pool, buff->lli, buff->lli_dma);
kfree(buff);
}
static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
{
struct s3c64xx_dma_buff *buff, *next;
u32 config;
dbg_showchan(chan);
pr_debug("%s: flushing channel\n", __func__);
config = readl(chan->regs + PL080S_CH_CONFIG);
config &= ~PL080_CONFIG_ENABLE;
writel(config, chan->regs + PL080S_CH_CONFIG);
/* dump all the buffers associated with this channel */
for (buff = chan->curr; buff != NULL; buff = next) {
next = buff->next;
pr_debug("%s: buff %p (next %p)\n", __func__, buff, buff->next);
s3c64xx_dma_bufffdone(chan, buff, S3C2410_RES_ABORT);
s3c64xx_dma_freebuff(buff);
}
chan->curr = chan->next = chan->end = NULL;
return 0;
}
int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
WARN_ON(!chan);
if (!chan)
return -EINVAL;
switch (op) {
case S3C2410_DMAOP_START:
return s3c64xx_dma_start(chan);
case S3C2410_DMAOP_STOP:
return s3c64xx_dma_stop(chan);
case S3C2410_DMAOP_FLUSH:
return s3c64xx_dma_flush(chan);
/* believe PAUSE/RESUME are no-ops */
case S3C2410_DMAOP_PAUSE:
case S3C2410_DMAOP_RESUME:
case S3C2410_DMAOP_STARTED:
case S3C2410_DMAOP_TIMEOUT:
return 0;
}
return -ENOENT;
}
EXPORT_SYMBOL(s3c2410_dma_ctrl);
/* s3c2410_dma_enque
*
*/
int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
dma_addr_t data, int size)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
struct s3c64xx_dma_buff *next;
struct s3c64xx_dma_buff *buff;
struct pl080s_lli *lli;
unsigned long flags;
int ret;
WARN_ON(!chan);
if (!chan)
return -EINVAL;
buff = kzalloc(sizeof(struct s3c64xx_dma_buff), GFP_ATOMIC);
if (!buff) {
printk(KERN_ERR "%s: no memory for buffer\n", __func__);
return -ENOMEM;
}
lli = dma_pool_alloc(dma_pool, GFP_ATOMIC, &buff->lli_dma);
if (!lli) {
printk(KERN_ERR "%s: no memory for lli\n", __func__);
ret = -ENOMEM;
goto err_buff;
}
pr_debug("%s: buff %p, dp %08x lli (%p, %08x) %d\n",
__func__, buff, data, lli, (u32)buff->lli_dma, size);
buff->lli = lli;
buff->pw = id;
s3c64xx_dma_fill_lli(chan, lli, data, size);
local_irq_save(flags);
if ((next = chan->next) != NULL) {
struct s3c64xx_dma_buff *end = chan->end;
struct pl080s_lli *endlli = end->lli;
pr_debug("enquing onto channel\n");
end->next = buff;
endlli->next_lli = buff->lli_dma;
if (chan->flags & S3C2410_DMAF_CIRCULAR) {
struct s3c64xx_dma_buff *curr = chan->curr;
lli->next_lli = curr->lli_dma;
}
if (next == chan->curr) {
writel(buff->lli_dma, chan->regs + PL080_CH_LLI);
chan->next = buff;
}
show_lli(endlli);
chan->end = buff;
} else {
pr_debug("enquing onto empty channel\n");
chan->curr = buff;
chan->next = buff;
chan->end = buff;
s3c64xx_lli_to_regs(chan, lli);
}
local_irq_restore(flags);
show_lli(lli);
dbg_showchan(chan);
dbg_showbuffs(chan);
return 0;
err_buff:
kfree(buff);
return ret;
}
EXPORT_SYMBOL(s3c2410_dma_enqueue);
int s3c2410_dma_devconfig(enum dma_ch channel,
enum dma_data_direction source,
unsigned long devaddr)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
u32 peripheral;
u32 config = 0;
pr_debug("%s: channel %d, source %d, dev %08lx, chan %p\n",
__func__, channel, source, devaddr, chan);
WARN_ON(!chan);
if (!chan)
return -EINVAL;
peripheral = (chan->peripheral & 0xf);
chan->source = source;
chan->dev_addr = devaddr;
pr_debug("%s: peripheral %d\n", __func__, peripheral);
switch (source) {
case DMA_FROM_DEVICE:
config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT;
break;
case DMA_TO_DEVICE:
config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT;
break;
default:
printk(KERN_ERR "%s: bad source\n", __func__);
return -EINVAL;
}
/* allow TC and ERR interrupts */
config |= PL080_CONFIG_TC_IRQ_MASK;
config |= PL080_CONFIG_ERR_IRQ_MASK;
pr_debug("%s: config %08x\n", __func__, config);
writel(config, chan->regs + PL080S_CH_CONFIG);
return 0;
}
EXPORT_SYMBOL(s3c2410_dma_devconfig);
int s3c2410_dma_getposition(enum dma_ch channel,
dma_addr_t *src, dma_addr_t *dst)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
WARN_ON(!chan);
if (!chan)
return -EINVAL;
if (src != NULL)
*src = readl(chan->regs + PL080_CH_SRC_ADDR);
if (dst != NULL)
*dst = readl(chan->regs + PL080_CH_DST_ADDR);
return 0;
}
EXPORT_SYMBOL(s3c2410_dma_getposition);
/* s3c2410_request_dma
*
* get control of an dma channel
*/
int s3c2410_dma_request(enum dma_ch channel,
struct s3c2410_dma_client *client,
void *dev)
{
struct s3c2410_dma_chan *chan;
unsigned long flags;
pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
channel, client->name, dev);
local_irq_save(flags);
chan = s3c64xx_dma_map_channel(channel);
if (chan == NULL) {
local_irq_restore(flags);
return -EBUSY;
}
dbg_showchan(chan);
chan->client = client;
chan->in_use = 1;
chan->peripheral = channel;
chan->flags = 0;
local_irq_restore(flags);
/* need to setup */
pr_debug("%s: channel initialised, %p\n", __func__, chan);
return chan->number | DMACH_LOW_LEVEL;
}
EXPORT_SYMBOL(s3c2410_dma_request);
/* s3c2410_dma_free
*
* release the given channel back to the system, will stop and flush
* any outstanding transfers, and ensure the channel is ready for the
* next claimant.
*
* Note, although a warning is currently printed if the freeing client
* info is not the same as the registrant's client info, the free is still
* allowed to go through.
*/
int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
unsigned long flags;
if (chan == NULL)
return -EINVAL;
local_irq_save(flags);
if (chan->client != client) {
printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n",
channel, chan->client, client);
}
/* sort out stopping and freeing the channel */
chan->client = NULL;
chan->in_use = 0;
if (!(channel & DMACH_LOW_LEVEL))
s3c_dma_chan_map[channel] = NULL;
local_irq_restore(flags);
return 0;
}
EXPORT_SYMBOL(s3c2410_dma_free);
static irqreturn_t s3c64xx_dma_irq(int irq, void *pw)
{
struct s3c64xx_dmac *dmac = pw;
struct s3c2410_dma_chan *chan;
enum s3c2410_dma_buffresult res;
u32 tcstat, errstat;
u32 bit;
int offs;
tcstat = readl(dmac->regs + PL080_TC_STATUS);
errstat = readl(dmac->regs + PL080_ERR_STATUS);
for (offs = 0, bit = 1; offs < 8; offs++, bit <<= 1) {
struct s3c64xx_dma_buff *buff;
if (!(errstat & bit) && !(tcstat & bit))
continue;
chan = dmac->channels + offs;
res = S3C2410_RES_ERR;
if (tcstat & bit) {
writel(bit, dmac->regs + PL080_TC_CLEAR);
res = S3C2410_RES_OK;
}
if (errstat & bit)
writel(bit, dmac->regs + PL080_ERR_CLEAR);
/* 'next' points to the buffer that is next to the
* currently active buffer.
* For CIRCULAR queues, 'next' will be same as 'curr'
* when 'end' is the active buffer.
*/
buff = chan->curr;
while (buff && buff != chan->next
&& buff->next != chan->next)
buff = buff->next;
if (!buff)
BUG();
if (buff == chan->next)
buff = chan->end;
s3c64xx_dma_bufffdone(chan, buff, res);
/* Free the node and update curr, if non-circular queue */
if (!(chan->flags & S3C2410_DMAF_CIRCULAR)) {
chan->curr = buff->next;
s3c64xx_dma_freebuff(buff);
}
/* Update 'next' */
buff = chan->next;
if (chan->next == chan->end) {
chan->next = chan->curr;
if (!(chan->flags & S3C2410_DMAF_CIRCULAR))
chan->end = NULL;
} else {
chan->next = buff->next;
}
}
return IRQ_HANDLED;
}
static struct bus_type dma_subsys = {
.name = "s3c64xx-dma",
.dev_name = "s3c64xx-dma",
};
static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
int irq, unsigned int base)
{
struct s3c2410_dma_chan *chptr = &s3c2410_chans[chno];
struct s3c64xx_dmac *dmac;
char clkname[16];
void __iomem *regs;
void __iomem *regptr;
int err, ch;
dmac = kzalloc(sizeof(struct s3c64xx_dmac), GFP_KERNEL);
if (!dmac) {
printk(KERN_ERR "%s: failed to alloc mem\n", __func__);
return -ENOMEM;
}
dmac->dev.id = chno / 8;
dmac->dev.bus = &dma_subsys;
err = device_register(&dmac->dev);
if (err) {
printk(KERN_ERR "%s: failed to register device\n", __func__);
goto err_alloc;
}
regs = ioremap(base, 0x200);
if (!regs) {
printk(KERN_ERR "%s: failed to ioremap()\n", __func__);
err = -ENXIO;
goto err_dev;
}
snprintf(clkname, sizeof(clkname), "dma%d", dmac->dev.id);
dmac->clk = clk_get(NULL, clkname);
if (IS_ERR(dmac->clk)) {
printk(KERN_ERR "%s: failed to get clock %s\n", __func__, clkname);
err = PTR_ERR(dmac->clk);
goto err_map;
}
clk_prepare_enable(dmac->clk);
dmac->regs = regs;
dmac->chanbase = chbase;
dmac->channels = chptr;
err = request_irq(irq, s3c64xx_dma_irq, 0, "DMA", dmac);
if (err < 0) {
printk(KERN_ERR "%s: failed to get irq\n", __func__);
goto err_clk;
}
regptr = regs + PL080_Cx_BASE(0);
for (ch = 0; ch < 8; ch++, chptr++) {
pr_debug("%s: registering DMA %d (%p)\n",
__func__, chno + ch, regptr);
chptr->bit = 1 << ch;
chptr->number = chno + ch;
chptr->dmac = dmac;
chptr->regs = regptr;
regptr += PL080_Cx_STRIDE;
}
/* for the moment, permanently enable the controller */
writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
irq, regs, chno, chno+8);
return 0;
err_clk:
clk_disable_unprepare(dmac->clk);
clk_put(dmac->clk);
err_map:
iounmap(regs);
err_dev:
device_unregister(&dmac->dev);
err_alloc:
kfree(dmac);
return err;
}
static int __init s3c64xx_dma_init(void)
{
int ret;
/* This driver is not supported when booting with device tree. */
if (of_have_populated_dt())
return -ENODEV;
printk(KERN_INFO "%s: Registering DMA channels\n", __func__);
dma_pool = dma_pool_create("DMA-LLI", NULL, sizeof(struct pl080s_lli), 16, 0);
if (!dma_pool) {
printk(KERN_ERR "%s: failed to create pool\n", __func__);
return -ENOMEM;
}
ret = subsys_system_register(&dma_subsys, NULL);
if (ret) {
printk(KERN_ERR "%s: failed to create subsys\n", __func__);
return -ENOMEM;
}
/* Set all DMA configuration to be DMA, not SDMA */
writel(0xffffff, S3C64XX_SDMA_SEL);
/* Register standard DMA controllers */
s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000);
s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000);
return 0;
}
arch_initcall(s3c64xx_dma_init);

View file

@ -11,51 +11,48 @@
#ifndef __ASM_ARCH_DMA_H
#define __ASM_ARCH_DMA_H __FILE__
#define S3C_DMA_CHANNELS (16)
#define S3C64XX_DMA_CHAN(name) ((unsigned long)(name))
/* see mach-s3c2410/dma.h for notes on dma channel numbers */
/* DMA0/SDMA0 */
#define DMACH_UART0 S3C64XX_DMA_CHAN("uart0_tx")
#define DMACH_UART0_SRC2 S3C64XX_DMA_CHAN("uart0_rx")
#define DMACH_UART1 S3C64XX_DMA_CHAN("uart1_tx")
#define DMACH_UART1_SRC2 S3C64XX_DMA_CHAN("uart1_rx")
#define DMACH_UART2 S3C64XX_DMA_CHAN("uart2_tx")
#define DMACH_UART2_SRC2 S3C64XX_DMA_CHAN("uart2_rx")
#define DMACH_UART3 S3C64XX_DMA_CHAN("uart3_tx")
#define DMACH_UART3_SRC2 S3C64XX_DMA_CHAN("uart3_rx")
#define DMACH_PCM0_TX S3C64XX_DMA_CHAN("pcm0_tx")
#define DMACH_PCM0_RX S3C64XX_DMA_CHAN("pcm0_rx")
#define DMACH_I2S0_OUT S3C64XX_DMA_CHAN("i2s0_tx")
#define DMACH_I2S0_IN S3C64XX_DMA_CHAN("i2s0_rx")
#define DMACH_SPI0_TX S3C64XX_DMA_CHAN("spi0_tx")
#define DMACH_SPI0_RX S3C64XX_DMA_CHAN("spi0_rx")
#define DMACH_HSI_I2SV40_TX S3C64XX_DMA_CHAN("i2s2_tx")
#define DMACH_HSI_I2SV40_RX S3C64XX_DMA_CHAN("i2s2_rx")
/* DMA1/SDMA1 */
#define DMACH_PCM1_TX S3C64XX_DMA_CHAN("pcm1_tx")
#define DMACH_PCM1_RX S3C64XX_DMA_CHAN("pcm1_rx")
#define DMACH_I2S1_OUT S3C64XX_DMA_CHAN("i2s1_tx")
#define DMACH_I2S1_IN S3C64XX_DMA_CHAN("i2s1_rx")
#define DMACH_SPI1_TX S3C64XX_DMA_CHAN("spi1_tx")
#define DMACH_SPI1_RX S3C64XX_DMA_CHAN("spi1_rx")
#define DMACH_AC97_PCMOUT S3C64XX_DMA_CHAN("ac97_out")
#define DMACH_AC97_PCMIN S3C64XX_DMA_CHAN("ac97_in")
#define DMACH_AC97_MICIN S3C64XX_DMA_CHAN("ac97_mic")
#define DMACH_PWM S3C64XX_DMA_CHAN("pwm")
#define DMACH_IRDA S3C64XX_DMA_CHAN("irda")
#define DMACH_EXTERNAL S3C64XX_DMA_CHAN("external")
#define DMACH_SECURITY_RX S3C64XX_DMA_CHAN("sec_rx")
#define DMACH_SECURITY_TX S3C64XX_DMA_CHAN("sec_tx")
/* Note, for the S3C64XX architecture we keep the DMACH_
* defines in the order they are allocated to [S]DMA0/[S]DMA1
* so that is easy to do DHACH_ -> DMA controller conversion
*/
enum dma_ch {
/* DMA0/SDMA0 */
DMACH_UART0 = 0,
DMACH_UART0_SRC2,
DMACH_UART1,
DMACH_UART1_SRC2,
DMACH_UART2,
DMACH_UART2_SRC2,
DMACH_UART3,
DMACH_UART3_SRC2,
DMACH_PCM0_TX,
DMACH_PCM0_RX,
DMACH_I2S0_OUT,
DMACH_I2S0_IN,
DMACH_SPI0_TX,
DMACH_SPI0_RX,
DMACH_HSI_I2SV40_TX,
DMACH_HSI_I2SV40_RX,
DMACH_MAX = 32
};
/* DMA1/SDMA1 */
DMACH_PCM1_TX = 16,
DMACH_PCM1_RX,
DMACH_I2S1_OUT,
DMACH_I2S1_IN,
DMACH_SPI1_TX,
DMACH_SPI1_RX,
DMACH_AC97_PCMOUT,
DMACH_AC97_PCMIN,
DMACH_AC97_MICIN,
DMACH_PWM,
DMACH_IRDA,
DMACH_EXTERNAL,
DMACH_RES1,
DMACH_RES2,
DMACH_SECURITY_RX, /* SDMA1 only */
DMACH_SECURITY_TX, /* SDMA1 only */
DMACH_MAX /* the end */
struct s3c2410_dma_client {
char *name;
};
static inline bool samsung_dma_has_circular(void)
@ -65,67 +62,10 @@ static inline bool samsung_dma_has_circular(void)
static inline bool samsung_dma_is_dmadev(void)
{
return false;
return true;
}
#define S3C2410_DMAF_CIRCULAR (1 << 0)
#include <plat/dma.h>
#define DMACH_LOW_LEVEL (1<<28) /* use this to specifiy hardware ch no */
struct s3c64xx_dma_buff;
/** s3c64xx_dma_buff - S3C64XX DMA buffer descriptor
* @next: Pointer to next buffer in queue or ring.
* @pw: Client provided identifier
* @lli: Pointer to hardware descriptor this buffer is associated with.
* @lli_dma: Hardare address of the descriptor.
*/
struct s3c64xx_dma_buff {
struct s3c64xx_dma_buff *next;
void *pw;
struct pl080s_lli *lli;
dma_addr_t lli_dma;
};
struct s3c64xx_dmac;
struct s3c2410_dma_chan {
unsigned char number; /* number of this dma channel */
unsigned char in_use; /* channel allocated */
unsigned char bit; /* bit for enable/disable/etc */
unsigned char hw_width;
unsigned char peripheral;
unsigned int flags;
enum dma_data_direction source;
dma_addr_t dev_addr;
struct s3c2410_dma_client *client;
struct s3c64xx_dmac *dmac; /* pointer to controller */
void __iomem *regs;
/* cdriver callbacks */
s3c2410_dma_cbfn_t callback_fn; /* buffer done callback */
s3c2410_dma_opfn_t op_fn; /* channel op callback */
/* buffer list and information */
struct s3c64xx_dma_buff *curr; /* current dma buffer */
struct s3c64xx_dma_buff *next; /* next buffer to load */
struct s3c64xx_dma_buff *end; /* end of queue */
/* note, when channel is running in circular mode, curr is the
* first buffer enqueued, end is the last and curr is where the
* last buffer-done event is set-at. The buffers are not freed
* and the last buffer hardware descriptor points back to the
* first.
*/
};
#include <plat/dma-core.h>
#include <linux/amba/pl08x.h>
#include <plat/dma-ops.h>
#endif /* __ASM_ARCH_IRQ_H */

View file

@ -0,0 +1,244 @@
/*
* Samsung's S3C64XX generic DMA support using amba-pl08x driver.
*
* Copyright (c) 2013 Tomasz Figa <tomasz.figa@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl080.h>
#include <linux/amba/pl08x.h>
#include <linux/of.h>
#include <mach/irqs.h>
#include <mach/map.h>
#include "regs-sys.h"
static int pl08x_get_xfer_signal(const struct pl08x_channel_data *cd)
{
return cd->min_signal;
}
static void pl08x_put_xfer_signal(const struct pl08x_channel_data *cd, int ch)
{
}
/*
* DMA0
*/
static struct pl08x_channel_data s3c64xx_dma0_info[] = {
{
.bus_id = "uart0_tx",
.min_signal = 0,
.max_signal = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart0_rx",
.min_signal = 1,
.max_signal = 1,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart1_tx",
.min_signal = 2,
.max_signal = 2,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart1_rx",
.min_signal = 3,
.max_signal = 3,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart2_tx",
.min_signal = 4,
.max_signal = 4,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart2_rx",
.min_signal = 5,
.max_signal = 5,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart3_tx",
.min_signal = 6,
.max_signal = 6,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart3_rx",
.min_signal = 7,
.max_signal = 7,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "pcm0_tx",
.min_signal = 8,
.max_signal = 8,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "pcm0_rx",
.min_signal = 9,
.max_signal = 9,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2s0_tx",
.min_signal = 10,
.max_signal = 10,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2s0_rx",
.min_signal = 11,
.max_signal = 11,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "spi0_tx",
.min_signal = 12,
.max_signal = 12,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "spi0_rx",
.min_signal = 13,
.max_signal = 13,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2s2_tx",
.min_signal = 14,
.max_signal = 14,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2s2_rx",
.min_signal = 15,
.max_signal = 15,
.periph_buses = PL08X_AHB2,
}
};
struct pl08x_platform_data s3c64xx_dma0_plat_data = {
.memcpy_channel = {
.bus_id = "memcpy",
.cctl_memcpy =
(PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT |
PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT |
PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT |
PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT |
PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE |
PL080_CONTROL_PROT_SYS),
},
.lli_buses = PL08X_AHB1,
.mem_buses = PL08X_AHB1,
.get_xfer_signal = pl08x_get_xfer_signal,
.put_xfer_signal = pl08x_put_xfer_signal,
.slave_channels = s3c64xx_dma0_info,
.num_slave_channels = ARRAY_SIZE(s3c64xx_dma0_info),
};
static AMBA_AHB_DEVICE(s3c64xx_dma0, "dma-pl080s.0", 0,
0x75000000, {IRQ_DMA0}, &s3c64xx_dma0_plat_data);
/*
* DMA1
*/
static struct pl08x_channel_data s3c64xx_dma1_info[] = {
{
.bus_id = "pcm1_tx",
.min_signal = 0,
.max_signal = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "pcm1_rx",
.min_signal = 1,
.max_signal = 1,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2s1_tx",
.min_signal = 2,
.max_signal = 2,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2s1_rx",
.min_signal = 3,
.max_signal = 3,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "spi1_tx",
.min_signal = 4,
.max_signal = 4,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "spi1_rx",
.min_signal = 5,
.max_signal = 5,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ac97_out",
.min_signal = 6,
.max_signal = 6,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ac97_in",
.min_signal = 7,
.max_signal = 7,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ac97_mic",
.min_signal = 8,
.max_signal = 8,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "pwm",
.min_signal = 9,
.max_signal = 9,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "irda",
.min_signal = 10,
.max_signal = 10,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "external",
.min_signal = 11,
.max_signal = 11,
.periph_buses = PL08X_AHB2,
},
};
struct pl08x_platform_data s3c64xx_dma1_plat_data = {
.memcpy_channel = {
.bus_id = "memcpy",
.cctl_memcpy =
(PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT |
PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT |
PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT |
PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT |
PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE |
PL080_CONTROL_PROT_SYS),
},
.lli_buses = PL08X_AHB1,
.mem_buses = PL08X_AHB1,
.get_xfer_signal = pl08x_get_xfer_signal,
.put_xfer_signal = pl08x_put_xfer_signal,
.slave_channels = s3c64xx_dma1_info,
.num_slave_channels = ARRAY_SIZE(s3c64xx_dma1_info),
};
static AMBA_AHB_DEVICE(s3c64xx_dma1, "dma-pl080s.1", 0,
0x75100000, {IRQ_DMA1}, &s3c64xx_dma1_plat_data);
static int __init s3c64xx_pl080_init(void)
{
/* Set all DMA configuration to be DMA, not SDMA */
writel(0xffffff, S3C64XX_SDMA_SEL);
if (of_have_populated_dt())
return 0;
amba_device_register(&s3c64xx_dma0_device, &iomem_resource);
amba_device_register(&s3c64xx_dma1_device, &iomem_resource);
return 0;
}
arch_initcall(s3c64xx_pl080_init);

View file

@ -1468,6 +1468,8 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio;
#if defined(CONFIG_PL330_DMA)
pd.filter = pl330_filter;
#elif defined(CONFIG_S3C64XX_PL080)
pd.filter = pl08x_filter_id;
#elif defined(CONFIG_S3C24XX_DMAC)
pd.filter = s3c24xx_dma_filter;
#endif
@ -1509,8 +1511,10 @@ void __init s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
pd.num_cs = num_cs;
pd.src_clk_nr = src_clk_nr;
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio;
#ifdef CONFIG_PL330_DMA
#if defined(CONFIG_PL330_DMA)
pd.filter = pl330_filter;
#elif defined(CONFIG_S3C64XX_PL080)
pd.filter = pl08x_filter_id;
#endif
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1);
@ -1550,8 +1554,10 @@ void __init s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
pd.num_cs = num_cs;
pd.src_clk_nr = src_clk_nr;
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio;
#ifdef CONFIG_PL330_DMA
#if defined(CONFIG_PL330_DMA)
pd.filter = pl330_filter;
#elif defined(CONFIG_S3C64XX_PL080)
pd.filter = pl08x_filter_id;
#endif
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2);

View file

@ -18,6 +18,12 @@
#include <mach/dma.h>
#if defined(CONFIG_PL330_DMA)
#define dma_filter pl330_filter
#elif defined(CONFIG_S3C64XX_PL080)
#define dma_filter pl08x_filter_id
#endif
static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
struct samsung_dma_req *param,
struct device *dev, char *ch_name)
@ -30,7 +36,7 @@ static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
if (dev->of_node)
return (unsigned)dma_request_slave_channel(dev, ch_name);
else
return (unsigned)dma_request_channel(mask, pl330_filter,
return (unsigned)dma_request_channel(mask, dma_filter,
(void *)dma_ch);
}

View file

@ -1,13 +0,0 @@
/* linux/arch/arm/plat-samsung/include/plat/fiq.h
*
* Copyright (c) 2009 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* Header file for S3C24XX CPU FIQ support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
extern int s3c24xx_set_fiq(unsigned int irq, bool on);

View file

@ -331,8 +331,8 @@ static struct samsung_clock_alias s3c64xx_clock_aliases[] = {
ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.0"),
ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "hsmmc"),
ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "mmc_busclk.0"),
ALIAS(HCLK_DMA1, NULL, "dma1"),
ALIAS(HCLK_DMA0, NULL, "dma0"),
ALIAS(HCLK_DMA1, "dma-pl080s.1", "apb_pclk"),
ALIAS(HCLK_DMA0, "dma-pl080s.0", "apb_pclk"),
ALIAS(HCLK_CAMIF, "s3c-camif", "camif"),
ALIAS(HCLK_LCD, "s3c-fb", "lcd"),
ALIAS(PCLK_SPI1, "s3c6410-spi.1", "spi"),

View file

@ -402,7 +402,7 @@ config SPI_S3C24XX_FIQ
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
depends on PLAT_SAMSUNG
select S3C64XX_DMA if ARCH_S3C64XX
select S3C64XX_PL080 if ARCH_S3C64XX
help
SPI driver for Samsung S3C64XX and newer SoCs.

View file

@ -1268,7 +1268,7 @@ static void pxa2xx_spi_shutdown(struct platform_device *pdev)
dev_err(&pdev->dev, "shutdown failed with %d\n", status);
}
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
static int pxa2xx_spi_suspend(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);

View file

@ -29,7 +29,6 @@
#include <plat/regs-spi.h>
#include <plat/fiq.h>
#include <asm/fiq.h>
#include "spi-s3c24xx-fiq.h"
@ -78,14 +77,12 @@ struct s3c24xx_spi {
unsigned char *rx;
struct clk *clk;
struct resource *ioarea;
struct spi_master *master;
struct spi_device *curdev;
struct device *dev;
struct s3c2410_spi_info *pdata;
};
#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT)
#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP)
@ -517,8 +514,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
if (master == NULL) {
dev_err(&pdev->dev, "No memory for spi_master\n");
err = -ENOMEM;
goto err_nomem;
return -ENOMEM;
}
hw = spi_master_get_devdata(master);
@ -562,48 +558,32 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);
/* find and map our resources */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
err = -ENOENT;
goto err_no_iores;
}
hw->ioarea = request_mem_region(res->start, resource_size(res),
pdev->name);
if (hw->ioarea == NULL) {
dev_err(&pdev->dev, "Cannot reserve region\n");
err = -ENXIO;
goto err_no_iores;
}
hw->regs = ioremap(res->start, resource_size(res));
if (hw->regs == NULL) {
dev_err(&pdev->dev, "Cannot map IO\n");
err = -ENXIO;
goto err_no_iomap;
hw->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hw->regs)) {
err = PTR_ERR(hw->regs);
goto err_no_pdata;
}
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq < 0) {
dev_err(&pdev->dev, "No IRQ specified\n");
err = -ENOENT;
goto err_no_irq;
goto err_no_pdata;
}
err = request_irq(hw->irq, s3c24xx_spi_irq, 0, pdev->name, hw);
err = devm_request_irq(&pdev->dev, hw->irq, s3c24xx_spi_irq, 0,
pdev->name, hw);
if (err) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
goto err_no_irq;
goto err_no_pdata;
}
hw->clk = clk_get(&pdev->dev, "spi");
hw->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(hw->clk)) {
dev_err(&pdev->dev, "No clock for device\n");
err = PTR_ERR(hw->clk);
goto err_no_clk;
goto err_no_pdata;
}
/* setup any gpio we can */
@ -615,7 +595,8 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
goto err_register;
}
err = gpio_request(pdata->pin_cs, dev_name(&pdev->dev));
err = devm_gpio_request(&pdev->dev, pdata->pin_cs,
dev_name(&pdev->dev));
if (err) {
dev_err(&pdev->dev, "Failed to get gpio for cs\n");
goto err_register;
@ -639,27 +620,10 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
return 0;
err_register:
if (hw->set_cs == s3c24xx_spi_gpiocs)
gpio_free(pdata->pin_cs);
clk_disable(hw->clk);
clk_put(hw->clk);
err_no_clk:
free_irq(hw->irq, hw);
err_no_irq:
iounmap(hw->regs);
err_no_iomap:
release_resource(hw->ioarea);
kfree(hw->ioarea);
err_no_iores:
err_no_pdata:
spi_master_put(hw->master);
err_nomem:
return err;
}
@ -668,19 +632,7 @@ static int s3c24xx_spi_remove(struct platform_device *dev)
struct s3c24xx_spi *hw = platform_get_drvdata(dev);
spi_bitbang_stop(&hw->bitbang);
clk_disable(hw->clk);
clk_put(hw->clk);
free_irq(hw->irq, hw);
iounmap(hw->regs);
if (hw->set_cs == s3c24xx_spi_gpiocs)
gpio_free(hw->pdata->pin_cs);
release_resource(hw->ioarea);
kfree(hw->ioarea);
spi_master_put(hw->master);
return 0;
}

View file

@ -927,9 +927,6 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
/* Start the signals */
writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
/* Start the signals */
writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
spin_unlock_irqrestore(&sdd->lock, flags);
status = wait_for_xfer(sdd, xfer, use_dma);

View file

@ -171,7 +171,6 @@ static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
int remain = t->len;
int cur_len;
unsigned char *data;
unsigned long tmp;
long ret;
if (t->len)
@ -213,9 +212,7 @@ static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
}
if (list_is_last(&t->transfer_list, &mesg->transfers)) {
tmp = spi_sh_read(ss, SPI_SH_CR1);
tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
spi_sh_write(ss, tmp, SPI_SH_CR1);
spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
ss->cr1 &= ~SPI_SH_TBE;
@ -239,7 +236,6 @@ static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
int remain = t->len;
int cur_len;
unsigned char *data;
unsigned long tmp;
long ret;
if (t->len > SPI_SH_MAX_BYTE)
@ -247,9 +243,7 @@ static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
else
spi_sh_write(ss, t->len, SPI_SH_CR3);
tmp = spi_sh_read(ss, SPI_SH_CR1);
tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
spi_sh_write(ss, tmp, SPI_SH_CR1);
spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
spi_sh_wait_write_buffer_empty(ss);

View file

@ -54,11 +54,8 @@
#define SPI_CS_SS_VAL (1 << 20)
#define SPI_CS_SW_HW (1 << 21)
/* SPI_CS_POL_INACTIVE bits are default high */
#define SPI_CS_POL_INACTIVE 22
#define SPI_CS_POL_INACTIVE_0 (1 << 22)
#define SPI_CS_POL_INACTIVE_1 (1 << 23)
#define SPI_CS_POL_INACTIVE_2 (1 << 24)
#define SPI_CS_POL_INACTIVE_3 (1 << 25)
/* n from 0 to 3 */
#define SPI_CS_POL_INACTIVE(n) (1 << (22 + (n)))
#define SPI_CS_POL_INACTIVE_MASK (0xF << 22)
#define SPI_CS_SEL_0 (0 << 26)
@ -165,9 +162,6 @@
#define MAX_HOLD_CYCLES 16
#define SPI_DEFAULT_SPEED 25000000
#define MAX_CHIP_SELECT 4
#define SPI_FIFO_DEPTH 64
struct tegra_spi_data {
struct device *dev;
struct spi_master *master;
@ -184,7 +178,6 @@ struct tegra_spi_data {
struct spi_device *cur_spi;
struct spi_device *cs_control;
unsigned cur_pos;
unsigned cur_len;
unsigned words_per_32bit;
unsigned bytes_per_word;
unsigned curr_dma_words;
@ -204,12 +197,10 @@ struct tegra_spi_data {
u32 rx_status;
u32 status_reg;
bool is_packed;
unsigned long packed_size;
u32 command1_reg;
u32 dma_control_reg;
u32 def_command1_reg;
u32 spi_cs_timing;
struct completion xfer_completion;
struct spi_transfer *curr_xfer;
@ -227,14 +218,14 @@ struct tegra_spi_data {
static int tegra_spi_runtime_suspend(struct device *dev);
static int tegra_spi_runtime_resume(struct device *dev);
static inline unsigned long tegra_spi_readl(struct tegra_spi_data *tspi,
static inline u32 tegra_spi_readl(struct tegra_spi_data *tspi,
unsigned long reg)
{
return readl(tspi->base + reg);
}
static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
unsigned long val, unsigned long reg)
u32 val, unsigned long reg)
{
writel(val, tspi->base + reg);
@ -245,7 +236,7 @@ static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
static void tegra_spi_clear_status(struct tegra_spi_data *tspi)
{
unsigned long val;
u32 val;
/* Write 1 to clear status register */
val = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
@ -296,10 +287,9 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
{
unsigned nbytes;
unsigned tx_empty_count;
unsigned long fifo_status;
u32 fifo_status;
unsigned max_n_32bit;
unsigned i, count;
unsigned long x;
unsigned int written_words;
unsigned fifo_words_left;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
@ -313,9 +303,9 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
nbytes = written_words * tspi->bytes_per_word;
max_n_32bit = DIV_ROUND_UP(nbytes, 4);
for (count = 0; count < max_n_32bit; count++) {
x = 0;
u32 x = 0;
for (i = 0; (i < 4) && nbytes; i++, nbytes--)
x |= (*tx_buf++) << (i*8);
x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
}
} else {
@ -323,10 +313,10 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
written_words = max_n_32bit;
nbytes = written_words * tspi->bytes_per_word;
for (count = 0; count < max_n_32bit; count++) {
x = 0;
u32 x = 0;
for (i = 0; nbytes && (i < tspi->bytes_per_word);
i++, nbytes--)
x |= ((*tx_buf++) << i*8);
x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
}
}
@ -338,9 +328,8 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
unsigned rx_full_count;
unsigned long fifo_status;
u32 fifo_status;
unsigned i, count;
unsigned long x;
unsigned int read_words = 0;
unsigned len;
u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
@ -350,20 +339,16 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
if (tspi->is_packed) {
len = tspi->curr_dma_words * tspi->bytes_per_word;
for (count = 0; count < rx_full_count; count++) {
x = tegra_spi_readl(tspi, SPI_RX_FIFO);
u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO);
for (i = 0; len && (i < 4); i++, len--)
*rx_buf++ = (x >> i*8) & 0xFF;
}
tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
read_words += tspi->curr_dma_words;
} else {
unsigned int rx_mask;
unsigned int bits_per_word = t->bits_per_word;
rx_mask = (1 << bits_per_word) - 1;
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
for (count = 0; count < rx_full_count; count++) {
x = tegra_spi_readl(tspi, SPI_RX_FIFO);
x &= rx_mask;
u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
@ -376,27 +361,24 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
unsigned len;
/* Make the dma buffer to read by cpu */
dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
tspi->dma_buf_size, DMA_TO_DEVICE);
if (tspi->is_packed) {
len = tspi->curr_dma_words * tspi->bytes_per_word;
unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
} else {
unsigned int i;
unsigned int count;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
unsigned int x;
for (count = 0; count < tspi->curr_dma_words; count++) {
x = 0;
u32 x = 0;
for (i = 0; consume && (i < tspi->bytes_per_word);
i++, consume--)
x |= ((*tx_buf++) << i * 8);
x |= (u32)(*tx_buf++) << (i * 8);
tspi->tx_dma_buf[count] = x;
}
}
@ -410,27 +392,21 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
unsigned len;
/* Make the dma buffer to read by cpu */
dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
tspi->dma_buf_size, DMA_FROM_DEVICE);
if (tspi->is_packed) {
len = tspi->curr_dma_words * tspi->bytes_per_word;
unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
} else {
unsigned int i;
unsigned int count;
unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
unsigned int x;
unsigned int rx_mask;
unsigned int bits_per_word = t->bits_per_word;
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
rx_mask = (1 << bits_per_word) - 1;
for (count = 0; count < tspi->curr_dma_words; count++) {
x = tspi->rx_dma_buf[count];
x &= rx_mask;
u32 x = tspi->rx_dma_buf[count] & rx_mask;
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
@ -490,16 +466,16 @@ static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
static int tegra_spi_start_dma_based_transfer(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
unsigned long val;
u32 val;
unsigned int len;
int ret = 0;
unsigned long status;
u32 status;
/* Make sure that Rx and Tx fifo are empty */
status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
dev_err(tspi->dev,
"Rx/Tx fifo are not empty status 0x%08lx\n", status);
dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
(unsigned)status);
return -EIO;
}
@ -564,7 +540,7 @@ static int tegra_spi_start_dma_based_transfer(
static int tegra_spi_start_cpu_based_transfer(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
unsigned long val;
u32 val;
unsigned cur_words;
if (tspi->cur_direction & DATA_DIR_TX)
@ -677,13 +653,13 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
dma_release_channel(dma_chan);
}
static unsigned long tegra_spi_setup_transfer_one(struct spi_device *spi,
static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
struct spi_transfer *t, bool is_first_of_msg)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
u32 speed = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
unsigned long command1;
u32 command1;
int req_mode;
if (speed != tspi->cur_speed) {
@ -738,7 +714,7 @@ static unsigned long tegra_spi_setup_transfer_one(struct spi_device *spi,
}
static int tegra_spi_start_transfer_one(struct spi_device *spi,
struct spi_transfer *t, unsigned long command1)
struct spi_transfer *t, u32 command1)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
unsigned total_fifo_words;
@ -763,8 +739,8 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
tegra_spi_writel(tspi, command1, SPI_COMMAND1);
tspi->command1_reg = command1;
dev_dbg(tspi->dev, "The def 0x%x and written 0x%lx\n",
tspi->def_command1_reg, command1);
dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n",
tspi->def_command1_reg, (unsigned)command1);
if (total_fifo_words > SPI_FIFO_DEPTH)
ret = tegra_spi_start_dma_based_transfer(tspi, t);
@ -776,15 +752,9 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
static int tegra_spi_setup(struct spi_device *spi)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
unsigned long val;
u32 val;
unsigned long flags;
int ret;
unsigned int cs_pol_bit[MAX_CHIP_SELECT] = {
SPI_CS_POL_INACTIVE_0,
SPI_CS_POL_INACTIVE_1,
SPI_CS_POL_INACTIVE_2,
SPI_CS_POL_INACTIVE_3,
};
dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
spi->bits_per_word,
@ -806,9 +776,9 @@ static int tegra_spi_setup(struct spi_device *spi)
spin_lock_irqsave(&tspi->lock, flags);
val = tspi->def_command1_reg;
if (spi->mode & SPI_CS_HIGH)
val &= ~cs_pol_bit[spi->chip_select];
val &= ~SPI_CS_POL_INACTIVE(spi->chip_select);
else
val |= cs_pol_bit[spi->chip_select];
val |= SPI_CS_POL_INACTIVE(spi->chip_select);
tspi->def_command1_reg = val;
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
spin_unlock_irqrestore(&tspi->lock, flags);
@ -842,7 +812,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
msg->actual_length = 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
unsigned long cmd1;
u32 cmd1;
reinit_completion(&tspi->xfer_completion);

View file

@ -148,14 +148,14 @@ struct tegra_sflash_data {
static int tegra_sflash_runtime_suspend(struct device *dev);
static int tegra_sflash_runtime_resume(struct device *dev);
static inline unsigned long tegra_sflash_readl(struct tegra_sflash_data *tsd,
static inline u32 tegra_sflash_readl(struct tegra_sflash_data *tsd,
unsigned long reg)
{
return readl(tsd->base + reg);
}
static inline void tegra_sflash_writel(struct tegra_sflash_data *tsd,
unsigned long val, unsigned long reg)
u32 val, unsigned long reg)
{
writel(val, tsd->base + reg);
}
@ -185,7 +185,7 @@ static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
struct tegra_sflash_data *tsd, struct spi_transfer *t)
{
unsigned nbytes;
unsigned long status;
u32 status;
unsigned max_n_32bit = tsd->curr_xfer_words;
u8 *tx_buf = (u8 *)t->tx_buf + tsd->cur_tx_pos;
@ -196,11 +196,11 @@ static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
status = tegra_sflash_readl(tsd, SPI_STATUS);
while (!(status & SPI_TXF_FULL)) {
int i;
unsigned int x = 0;
u32 x = 0;
for (i = 0; nbytes && (i < tsd->bytes_per_word);
i++, nbytes--)
x |= ((*tx_buf++) << i*8);
x |= (u32)(*tx_buf++) << (i * 8);
tegra_sflash_writel(tsd, x, SPI_TX_FIFO);
if (!nbytes)
break;
@ -214,16 +214,14 @@ static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
static int tegra_sflash_read_rx_fifo_to_client_rxbuf(
struct tegra_sflash_data *tsd, struct spi_transfer *t)
{
unsigned long status;
u32 status;
unsigned int read_words = 0;
u8 *rx_buf = (u8 *)t->rx_buf + tsd->cur_rx_pos;
status = tegra_sflash_readl(tsd, SPI_STATUS);
while (!(status & SPI_RXF_EMPTY)) {
int i;
unsigned long x;
x = tegra_sflash_readl(tsd, SPI_RX_FIFO);
u32 x = tegra_sflash_readl(tsd, SPI_RX_FIFO);
for (i = 0; (i < tsd->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
read_words++;
@ -236,7 +234,7 @@ static int tegra_sflash_read_rx_fifo_to_client_rxbuf(
static int tegra_sflash_start_cpu_based_transfer(
struct tegra_sflash_data *tsd, struct spi_transfer *t)
{
unsigned long val = 0;
u32 val = 0;
unsigned cur_words;
if (tsd->cur_direction & DATA_DIR_TX)
@ -266,7 +264,7 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
{
struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
u32 speed;
unsigned long command;
u32 command;
speed = t->speed_hz;
if (speed != tsd->cur_speed) {

View file

@ -196,7 +196,7 @@ struct tegra_slink_data {
u32 rx_status;
u32 status_reg;
bool is_packed;
unsigned long packed_size;
u32 packed_size;
u32 command_reg;
u32 command2_reg;
@ -220,14 +220,14 @@ struct tegra_slink_data {
static int tegra_slink_runtime_suspend(struct device *dev);
static int tegra_slink_runtime_resume(struct device *dev);
static inline unsigned long tegra_slink_readl(struct tegra_slink_data *tspi,
static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
unsigned long reg)
{
return readl(tspi->base + reg);
}
static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
unsigned long val, unsigned long reg)
u32 val, unsigned long reg)
{
writel(val, tspi->base + reg);
@ -238,38 +238,30 @@ static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
static void tegra_slink_clear_status(struct tegra_slink_data *tspi)
{
unsigned long val;
unsigned long val_write = 0;
u32 val_write;
val = tegra_slink_readl(tspi, SLINK_STATUS);
tegra_slink_readl(tspi, SLINK_STATUS);
/* Write 1 to clear status register */
val_write = SLINK_RDY | SLINK_FIFO_ERROR;
tegra_slink_writel(tspi, val_write, SLINK_STATUS);
}
static unsigned long tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
static u32 tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
struct spi_transfer *t)
{
unsigned long val;
switch (tspi->bytes_per_word) {
case 0:
val = SLINK_PACK_SIZE_4;
break;
return SLINK_PACK_SIZE_4;
case 1:
val = SLINK_PACK_SIZE_8;
break;
return SLINK_PACK_SIZE_8;
case 2:
val = SLINK_PACK_SIZE_16;
break;
return SLINK_PACK_SIZE_16;
case 4:
val = SLINK_PACK_SIZE_32;
break;
return SLINK_PACK_SIZE_32;
default:
val = 0;
return 0;
}
return val;
}
static unsigned tegra_slink_calculate_curr_xfer_param(
@ -312,10 +304,9 @@ static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
{
unsigned nbytes;
unsigned tx_empty_count;
unsigned long fifo_status;
u32 fifo_status;
unsigned max_n_32bit;
unsigned i, count;
unsigned long x;
unsigned int written_words;
unsigned fifo_words_left;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
@ -329,9 +320,9 @@ static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
nbytes = written_words * tspi->bytes_per_word;
max_n_32bit = DIV_ROUND_UP(nbytes, 4);
for (count = 0; count < max_n_32bit; count++) {
x = 0;
u32 x = 0;
for (i = 0; (i < 4) && nbytes; i++, nbytes--)
x |= (*tx_buf++) << (i*8);
x |= (u32)(*tx_buf++) << (i * 8);
tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
}
} else {
@ -339,10 +330,10 @@ static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
written_words = max_n_32bit;
nbytes = written_words * tspi->bytes_per_word;
for (count = 0; count < max_n_32bit; count++) {
x = 0;
u32 x = 0;
for (i = 0; nbytes && (i < tspi->bytes_per_word);
i++, nbytes--)
x |= ((*tx_buf++) << i*8);
x |= (u32)(*tx_buf++) << (i * 8);
tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
}
}
@ -354,9 +345,8 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
unsigned rx_full_count;
unsigned long fifo_status;
u32 fifo_status;
unsigned i, count;
unsigned long x;
unsigned int read_words = 0;
unsigned len;
u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
@ -366,7 +356,7 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
if (tspi->is_packed) {
len = tspi->curr_dma_words * tspi->bytes_per_word;
for (count = 0; count < rx_full_count; count++) {
x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
for (i = 0; len && (i < 4); i++, len--)
*rx_buf++ = (x >> i*8) & 0xFF;
}
@ -374,7 +364,7 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
read_words += tspi->curr_dma_words;
} else {
for (count = 0; count < rx_full_count; count++) {
x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
@ -387,27 +377,24 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
static void tegra_slink_copy_client_txbuf_to_spi_txbuf(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
unsigned len;
/* Make the dma buffer to read by cpu */
dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
tspi->dma_buf_size, DMA_TO_DEVICE);
if (tspi->is_packed) {
len = tspi->curr_dma_words * tspi->bytes_per_word;
unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
} else {
unsigned int i;
unsigned int count;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
unsigned int x;
for (count = 0; count < tspi->curr_dma_words; count++) {
x = 0;
u32 x = 0;
for (i = 0; consume && (i < tspi->bytes_per_word);
i++, consume--)
x |= ((*tx_buf++) << i * 8);
x |= (u32)(*tx_buf++) << (i * 8);
tspi->tx_dma_buf[count] = x;
}
}
@ -434,14 +421,10 @@ static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf(
unsigned int i;
unsigned int count;
unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
unsigned int x;
unsigned int rx_mask, bits_per_word;
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
bits_per_word = t->bits_per_word;
rx_mask = (1 << bits_per_word) - 1;
for (count = 0; count < tspi->curr_dma_words; count++) {
x = tspi->rx_dma_buf[count];
x &= rx_mask;
u32 x = tspi->rx_dma_buf[count] & rx_mask;
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
@ -501,17 +484,16 @@ static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
static int tegra_slink_start_dma_based_transfer(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
unsigned long val;
unsigned long test_val;
u32 val;
unsigned int len;
int ret = 0;
unsigned long status;
u32 status;
/* Make sure that Rx and Tx fifo are empty */
status = tegra_slink_readl(tspi, SLINK_STATUS);
if ((status & SLINK_FIFO_EMPTY) != SLINK_FIFO_EMPTY) {
dev_err(tspi->dev,
"Rx/Tx fifo are not empty status 0x%08lx\n", status);
dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
(unsigned)status);
return -EIO;
}
@ -551,9 +533,9 @@ static int tegra_slink_start_dma_based_transfer(
}
/* Wait for tx fifo to be fill before starting slink */
test_val = tegra_slink_readl(tspi, SLINK_STATUS);
while (!(test_val & SLINK_TX_FULL))
test_val = tegra_slink_readl(tspi, SLINK_STATUS);
status = tegra_slink_readl(tspi, SLINK_STATUS);
while (!(status & SLINK_TX_FULL))
status = tegra_slink_readl(tspi, SLINK_STATUS);
}
if (tspi->cur_direction & DATA_DIR_RX) {
@ -587,7 +569,7 @@ static int tegra_slink_start_dma_based_transfer(
static int tegra_slink_start_cpu_based_transfer(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
unsigned long val;
u32 val;
unsigned cur_words;
val = tspi->packed_size;
@ -714,8 +696,8 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
u8 bits_per_word;
unsigned total_fifo_words;
int ret;
unsigned long command;
unsigned long command2;
u32 command;
u32 command2;
bits_per_word = t->bits_per_word;
speed = t->speed_hz;
@ -762,17 +744,18 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
static int tegra_slink_setup(struct spi_device *spi)
{
struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
unsigned long val;
unsigned long flags;
int ret;
unsigned int cs_pol_bit[MAX_CHIP_SELECT] = {
static const u32 cs_pol_bit[MAX_CHIP_SELECT] = {
SLINK_CS_POLARITY,
SLINK_CS_POLARITY1,
SLINK_CS_POLARITY2,
SLINK_CS_POLARITY3,
};
struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
u32 val;
unsigned long flags;
int ret;
dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
spi->bits_per_word,
spi->mode & SPI_CPOL ? "" : "~",

View file

@ -46,6 +46,8 @@ struct ti_qspi {
struct spi_master *master;
void __iomem *base;
void __iomem *ctrl_base;
void __iomem *mmap_base;
struct clk *fclk;
struct device *dev;
@ -54,6 +56,8 @@ struct ti_qspi {
u32 spi_max_frequency;
u32 cmd;
u32 dc;
bool ctrl_mod;
};
#define QSPI_PID (0x0)
@ -204,42 +208,27 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
txbuf = t->tx_buf;
cmd = qspi->cmd | QSPI_WR_SNGL;
count = t->len;
wlen = t->bits_per_word;
wlen = t->bits_per_word >> 3; /* in bytes */
while (count) {
switch (wlen) {
case 8:
case 1:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
cmd, qspi->dc, *txbuf);
writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
ret = wait_for_completion_timeout(&qspi->transfer_complete,
QSPI_COMPLETION_TIMEOUT);
if (ret == 0) {
dev_err(qspi->dev, "write timed out\n");
return -ETIMEDOUT;
}
txbuf += 1;
count -= 1;
break;
case 16:
case 2:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
cmd, qspi->dc, *txbuf);
writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
ret = wait_for_completion_timeout(&qspi->transfer_complete,
QSPI_COMPLETION_TIMEOUT);
if (ret == 0) {
dev_err(qspi->dev, "write timed out\n");
return -ETIMEDOUT;
}
txbuf += 2;
count -= 2;
break;
case 32:
case 4:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n",
cmd, qspi->dc, *txbuf);
writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
break;
}
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
ret = wait_for_completion_timeout(&qspi->transfer_complete,
QSPI_COMPLETION_TIMEOUT);
@ -247,10 +236,8 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
dev_err(qspi->dev, "write timed out\n");
return -ETIMEDOUT;
}
txbuf += 4;
count -= 4;
break;
}
txbuf += wlen;
count -= wlen;
}
return 0;
@ -276,7 +263,7 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
break;
}
count = t->len;
wlen = t->bits_per_word;
wlen = t->bits_per_word >> 3; /* in bytes */
while (count) {
dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
@ -288,22 +275,18 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
return -ETIMEDOUT;
}
switch (wlen) {
case 8:
case 1:
*rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
rxbuf += 1;
count -= 1;
break;
case 16:
case 2:
*((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
rxbuf += 2;
count -= 2;
break;
case 32:
case 4:
*((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
rxbuf += 4;
count -= 4;
break;
}
rxbuf += wlen;
count -= wlen;
}
return 0;
@ -435,7 +418,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
{
struct ti_qspi *qspi;
struct spi_master *master;
struct resource *r;
struct resource *r, *res_ctrl, *res_mmap;
struct device_node *np = pdev->dev.of_node;
u32 max_freq;
int ret = 0, num_cs, irq;
@ -462,7 +445,35 @@ static int ti_qspi_probe(struct platform_device *pdev)
qspi->dev = &pdev->dev;
platform_set_drvdata(pdev, qspi);
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
if (r == NULL) {
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
dev_err(&pdev->dev, "missing platform data\n");
return -ENODEV;
}
}
res_mmap = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "qspi_mmap");
if (res_mmap == NULL) {
res_mmap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res_mmap == NULL) {
dev_err(&pdev->dev,
"memory mapped resource not required\n");
return -ENODEV;
}
}
res_ctrl = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "qspi_ctrlmod");
if (res_ctrl == NULL) {
res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (res_ctrl == NULL) {
dev_dbg(&pdev->dev,
"control module resources not required\n");
}
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@ -478,6 +489,23 @@ static int ti_qspi_probe(struct platform_device *pdev)
goto free_master;
}
if (res_ctrl) {
qspi->ctrl_mod = true;
qspi->ctrl_base = devm_ioremap_resource(&pdev->dev, res_ctrl);
if (IS_ERR(qspi->ctrl_base)) {
ret = PTR_ERR(qspi->ctrl_base);
goto free_master;
}
}
if (res_mmap) {
qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
if (IS_ERR(qspi->mmap_base)) {
ret = PTR_ERR(qspi->mmap_base);
goto free_master;
}
}
ret = devm_request_irq(&pdev->dev, irq, ti_qspi_isr, 0,
dev_name(&pdev->dev), qspi);
if (ret < 0) {
@ -539,7 +567,7 @@ static struct platform_driver ti_qspi_driver = {
.probe = ti_qspi_probe,
.remove = ti_qspi_remove,
.driver = {
.name = "ti,dra7xxx-qspi",
.name = "ti-qspi",
.owner = THIS_MODULE,
.pm = &ti_qspi_pm_ops,
.of_match_table = ti_qspi_match,
@ -551,3 +579,4 @@ module_platform_driver(ti_qspi_driver);
MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI QSPI controller driver");
MODULE_ALIAS("platform:ti-qspi");

View file

@ -348,7 +348,7 @@ static int txx9spi_probe(struct platform_device *dev)
INIT_LIST_HEAD(&c->queue);
init_waitqueue_head(&c->waitq);
c->clk = clk_get(&dev->dev, "spi-baseclk");
c->clk = devm_clk_get(&dev->dev, "spi-baseclk");
if (IS_ERR(c->clk)) {
ret = PTR_ERR(c->clk);
c->clk = NULL;
@ -356,7 +356,6 @@ static int txx9spi_probe(struct platform_device *dev)
}
ret = clk_enable(c->clk);
if (ret) {
clk_put(c->clk);
c->clk = NULL;
goto exit;
}
@ -415,10 +414,8 @@ static int txx9spi_probe(struct platform_device *dev)
exit:
if (c->workqueue)
destroy_workqueue(c->workqueue);
if (c->clk) {
if (c->clk)
clk_disable(c->clk);
clk_put(c->clk);
}
spi_master_put(master);
return ret;
}
@ -430,7 +427,6 @@ static int txx9spi_remove(struct platform_device *dev)
destroy_workqueue(c->workqueue);
clk_disable(c->clk);
clk_put(c->clk);
return 0;
}

View file

@ -231,22 +231,13 @@ static int spi_xcomm_probe(struct i2c_client *i2c,
master->dev.of_node = i2c->dev.of_node;
i2c_set_clientdata(i2c, master);
ret = spi_register_master(master);
ret = devm_spi_register_master(&i2c->dev, master);
if (ret < 0)
spi_master_put(master);
return ret;
}
static int spi_xcomm_remove(struct i2c_client *i2c)
{
struct spi_master *master = i2c_get_clientdata(i2c);
spi_unregister_master(master);
return 0;
}
static const struct i2c_device_id spi_xcomm_ids[] = {
{ "spi-xcomm" },
{ },
@ -259,7 +250,6 @@ static struct i2c_driver spi_xcomm_driver = {
},
.id_table = spi_xcomm_ids,
.probe = spi_xcomm_probe,
.remove = spi_xcomm_remove,
};
module_i2c_driver(spi_xcomm_driver);

View file

@ -23,4 +23,6 @@ struct s3c2410_spi_info {
void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol);
};
extern int s3c24xx_set_fiq(unsigned int irq, bool on);
#endif /* __LINUX_SPI_S3C24XX_H */