Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (33 commits) x86: poll waiting for I/OAT DMA channel status maintainers: add dma engine tree details dmaengine: add TODO items for future work on dma drivers dmaengine: Add API documentation for slave dma usage dmaengine/dw_dmac: Update maintainer-ship dmaengine: move link order dmaengine/dw_dmac: implement pause and resume in dwc_control dmaengine/dw_dmac: Replace spin_lock* with irqsave variants and enable submission from callback dmaengine/dw_dmac: Divide one sg to many desc, if sg len is greater than DWC_MAX_COUNT dmaengine/dw_dmac: set residue as total len in dwc_tx_status if status is !DMA_SUCCESS dmaengine/dw_dmac: don't call callback routine in case dmaengine_terminate_all() is called dmaengine: at_hdmac: pause: no need to wait for FIFO empty pch_dma: modify pci device table definition pch_dma: Support new device ML7223 IOH pch_dma: Support I2S for ML7213 IOH pch_dma: Fix DMA setting issue pch_dma: modify for checkpatch pch_dma: fix dma direction issue for ML7213 IOH video-in dmaengine: at_hdmac: use descriptor chaining help function dmaengine: at_hdmac: implement pause and resume in atc_control ... Fix up trivial conflict in drivers/dma/dw_dmac.c
This commit is contained in:
commit
4cb865deec
18 changed files with 739 additions and 249 deletions
|
@ -1 +1,96 @@
|
|||
See Documentation/crypto/async-tx-api.txt
|
||||
DMA Engine API Guide
|
||||
====================
|
||||
|
||||
Vinod Koul <vinod dot koul at intel.com>
|
||||
|
||||
NOTE: For DMA Engine usage in async_tx please see:
|
||||
Documentation/crypto/async-tx-api.txt
|
||||
|
||||
|
||||
Below is a guide to device driver writers on how to use the Slave-DMA API of the
|
||||
DMA Engine. This is applicable only for slave DMA usage only.
|
||||
|
||||
The slave DMA usage consists of following steps
|
||||
1. Allocate a DMA slave channel
|
||||
2. Set slave and controller specific parameters
|
||||
3. Get a descriptor for transaction
|
||||
4. Submit the transaction and wait for callback notification
|
||||
|
||||
1. Allocate a DMA slave channel
|
||||
Channel allocation is slightly different in the slave DMA context, client
|
||||
drivers typically need a channel from a particular DMA controller only and even
|
||||
in some cases a specific channel is desired. To request a channel
|
||||
dma_request_channel() API is used.
|
||||
|
||||
Interface:
|
||||
struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
|
||||
dma_filter_fn filter_fn,
|
||||
void *filter_param);
|
||||
where dma_filter_fn is defined as:
|
||||
typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
|
||||
|
||||
When the optional 'filter_fn' parameter is set to NULL dma_request_channel
|
||||
simply returns the first channel that satisfies the capability mask. Otherwise,
|
||||
when the mask parameter is insufficient for specifying the necessary channel,
|
||||
the filter_fn routine can be used to disposition the available channels in the
|
||||
system. The filter_fn routine is called once for each free channel in the
|
||||
system. Upon seeing a suitable channel filter_fn returns DMA_ACK which flags
|
||||
that channel to be the return value from dma_request_channel. A channel
|
||||
allocated via this interface is exclusive to the caller, until
|
||||
dma_release_channel() is called.
|
||||
|
||||
2. Set slave and controller specific parameters
|
||||
Next step is always to pass some specific information to the DMA driver. Most of
|
||||
the generic information which a slave DMA can use is in struct dma_slave_config.
|
||||
It allows the clients to specify DMA direction, DMA addresses, bus widths, DMA
|
||||
burst lengths etc. If some DMA controllers have more parameters to be sent then
|
||||
they should try to embed struct dma_slave_config in their controller specific
|
||||
structure. That gives flexibility to client to pass more parameters, if
|
||||
required.
|
||||
|
||||
Interface:
|
||||
int dmaengine_slave_config(struct dma_chan *chan,
|
||||
struct dma_slave_config *config)
|
||||
|
||||
3. Get a descriptor for transaction
|
||||
For slave usage the various modes of slave transfers supported by the
|
||||
DMA-engine are:
|
||||
slave_sg - DMA a list of scatter gather buffers from/to a peripheral
|
||||
dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the
|
||||
operation is explicitly stopped.
|
||||
The non NULL return of this transfer API represents a "descriptor" for the given
|
||||
transaction.
|
||||
|
||||
Interface:
|
||||
struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_sg)(
|
||||
struct dma_chan *chan,
|
||||
struct scatterlist *dst_sg, unsigned int dst_nents,
|
||||
struct scatterlist *src_sg, unsigned int src_nents,
|
||||
unsigned long flags);
|
||||
struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)(
|
||||
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_data_direction direction);
|
||||
|
||||
4. Submit the transaction and wait for callback notification
|
||||
To schedule the transaction to be scheduled by dma device, the "descriptor"
|
||||
returned in above (3) needs to be submitted.
|
||||
To tell the dma driver that a transaction is ready to be serviced, the
|
||||
descriptor->submit() callback needs to be invoked. This chains the descriptor to
|
||||
the pending queue.
|
||||
The transactions in the pending queue can be activated by calling the
|
||||
issue_pending API. If channel is idle then the first transaction in queue is
|
||||
started and subsequent ones queued up.
|
||||
On completion of the DMA operation the next in queue is submitted and a tasklet
|
||||
triggered. The tasklet would then call the client driver completion callback
|
||||
routine for notification, if set.
|
||||
Interface:
|
||||
void dma_async_issue_pending(struct dma_chan *chan);
|
||||
|
||||
==============================================================================
|
||||
|
||||
Additional usage notes for dma driver writers
|
||||
1/ Although DMA engine specifies that completion callback routines cannot submit
|
||||
any new operations, but typically for slave DMA subsequent transaction may not
|
||||
be available for submit prior to callback routine being called. This requirement
|
||||
is not a requirement for DMA-slave devices. But they should take care to drop
|
||||
the spin-lock they might be holding before calling the callback routine
|
||||
|
|
|
@ -2178,6 +2178,8 @@ M: Dan Williams <dan.j.williams@intel.com>
|
|||
S: Supported
|
||||
F: drivers/dma/
|
||||
F: include/linux/dma*
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx.git
|
||||
T: git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma)
|
||||
|
||||
DME1737 HARDWARE MONITOR DRIVER
|
||||
M: Juerg Haefliger <juergh@gmail.com>
|
||||
|
@ -5451,6 +5453,13 @@ L: linux-serial@vger.kernel.org
|
|||
S: Maintained
|
||||
F: drivers/tty/serial
|
||||
|
||||
SYNOPSYS DESIGNWARE DMAC DRIVER
|
||||
M: Viresh Kumar <viresh.kumar@st.com>
|
||||
S: Maintained
|
||||
F: include/linux/dw_dmac.h
|
||||
F: drivers/dma/dw_dmac_regs.h
|
||||
F: drivers/dma/dw_dmac.c
|
||||
|
||||
TIMEKEEPING, NTP
|
||||
M: John Stultz <johnstul@us.ibm.com>
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
|
|
@ -17,6 +17,9 @@ obj-$(CONFIG_SFI) += sfi/
|
|||
# was used and do nothing if so
|
||||
obj-$(CONFIG_PNP) += pnp/
|
||||
obj-$(CONFIG_ARM_AMBA) += amba/
|
||||
# Many drivers will want to use DMA so this has to be made available
|
||||
# really early.
|
||||
obj-$(CONFIG_DMA_ENGINE) += dma/
|
||||
|
||||
obj-$(CONFIG_VIRTIO) += virtio/
|
||||
obj-$(CONFIG_XEN) += xen/
|
||||
|
@ -92,7 +95,6 @@ obj-$(CONFIG_EISA) += eisa/
|
|||
obj-y += lguest/
|
||||
obj-$(CONFIG_CPU_FREQ) += cpufreq/
|
||||
obj-$(CONFIG_CPU_IDLE) += cpuidle/
|
||||
obj-$(CONFIG_DMA_ENGINE) += dma/
|
||||
obj-$(CONFIG_MMC) += mmc/
|
||||
obj-$(CONFIG_MEMSTICK) += memstick/
|
||||
obj-y += leds/
|
||||
|
|
|
@ -200,16 +200,18 @@ config PL330_DMA
|
|||
platform_data for a dma-pl330 device.
|
||||
|
||||
config PCH_DMA
|
||||
tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support"
|
||||
tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
|
||||
depends on PCI && X86
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Enable support for Intel EG20T PCH DMA engine.
|
||||
|
||||
This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/
|
||||
Output Hub) which is for IVI(In-Vehicle Infotainment) use.
|
||||
ML7213 is companion chip for Intel Atom E6xx series.
|
||||
ML7213 is completely compatible for Intel EG20T PCH.
|
||||
This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
|
||||
Output Hub), ML7213 and ML7223.
|
||||
ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
|
||||
for MP(Media Phone) use.
|
||||
ML7213/ML7223 is companion chip for Intel Atom E6xx series.
|
||||
ML7213/ML7223 is completely compatible for Intel EG20T PCH.
|
||||
|
||||
config IMX_SDMA
|
||||
tristate "i.MX SDMA support"
|
||||
|
|
14
drivers/dma/TODO
Normal file
14
drivers/dma/TODO
Normal file
|
@ -0,0 +1,14 @@
|
|||
TODO for slave dma
|
||||
|
||||
1. Move remaining drivers to use new slave interface
|
||||
2. Remove old slave pointer machansim
|
||||
3. Make issue_pending to start the transaction in below drivers
|
||||
- mpc512x_dma
|
||||
- imx-dma
|
||||
- imx-sdma
|
||||
- mxs-dma.c
|
||||
- dw_dmac
|
||||
- intel_mid_dma
|
||||
- ste_dma40
|
||||
4. Check other subsystems for dma drivers and merge/move to dmaengine
|
||||
5. Remove dma_slave_config's dma direction.
|
|
@ -37,8 +37,8 @@
|
|||
|
||||
#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
|
||||
#define ATC_DEFAULT_CTRLA (0)
|
||||
#define ATC_DEFAULT_CTRLB (ATC_SIF(0) \
|
||||
|ATC_DIF(1))
|
||||
#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
|
||||
|ATC_DIF(AT_DMA_MEM_IF))
|
||||
|
||||
/*
|
||||
* Initial number of descriptors to allocate for each channel. This could
|
||||
|
@ -164,6 +164,29 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* atc_desc_chain - build chain adding a descripor
|
||||
* @first: address of first descripor of the chain
|
||||
* @prev: address of previous descripor of the chain
|
||||
* @desc: descriptor to queue
|
||||
*
|
||||
* Called from prep_* functions
|
||||
*/
|
||||
static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
|
||||
struct at_desc *desc)
|
||||
{
|
||||
if (!(*first)) {
|
||||
*first = desc;
|
||||
} else {
|
||||
/* inform the HW lli about chaining */
|
||||
(*prev)->lli.dscr = desc->txd.phys;
|
||||
/* insert the link descriptor to the LD ring */
|
||||
list_add_tail(&desc->desc_node,
|
||||
&(*first)->tx_list);
|
||||
}
|
||||
*prev = desc;
|
||||
}
|
||||
|
||||
/**
|
||||
* atc_assign_cookie - compute and assign new cookie
|
||||
* @atchan: channel we work on
|
||||
|
@ -237,16 +260,12 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
|
|||
static void
|
||||
atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
|
||||
{
|
||||
dma_async_tx_callback callback;
|
||||
void *param;
|
||||
struct dma_async_tx_descriptor *txd = &desc->txd;
|
||||
|
||||
dev_vdbg(chan2dev(&atchan->chan_common),
|
||||
"descriptor %u complete\n", txd->cookie);
|
||||
|
||||
atchan->completed_cookie = txd->cookie;
|
||||
callback = txd->callback;
|
||||
param = txd->callback_param;
|
||||
|
||||
/* move children to free_list */
|
||||
list_splice_init(&desc->tx_list, &atchan->free_list);
|
||||
|
@ -278,12 +297,19 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The API requires that no submissions are done from a
|
||||
* callback, so we don't need to drop the lock here
|
||||
*/
|
||||
if (callback)
|
||||
callback(param);
|
||||
/* for cyclic transfers,
|
||||
* no need to replay callback function while stopping */
|
||||
if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
|
||||
dma_async_tx_callback callback = txd->callback;
|
||||
void *param = txd->callback_param;
|
||||
|
||||
/*
|
||||
* The API requires that no submissions are done from a
|
||||
* callback, so we don't need to drop the lock here
|
||||
*/
|
||||
if (callback)
|
||||
callback(param);
|
||||
}
|
||||
|
||||
dma_run_dependencies(txd);
|
||||
}
|
||||
|
@ -419,6 +445,26 @@ static void atc_handle_error(struct at_dma_chan *atchan)
|
|||
atc_chain_complete(atchan, bad_desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* atc_handle_cyclic - at the end of a period, run callback function
|
||||
* @atchan: channel used for cyclic operations
|
||||
*
|
||||
* Called with atchan->lock held and bh disabled
|
||||
*/
|
||||
static void atc_handle_cyclic(struct at_dma_chan *atchan)
|
||||
{
|
||||
struct at_desc *first = atc_first_active(atchan);
|
||||
struct dma_async_tx_descriptor *txd = &first->txd;
|
||||
dma_async_tx_callback callback = txd->callback;
|
||||
void *param = txd->callback_param;
|
||||
|
||||
dev_vdbg(chan2dev(&atchan->chan_common),
|
||||
"new cyclic period llp 0x%08x\n",
|
||||
channel_readl(atchan, DSCR));
|
||||
|
||||
if (callback)
|
||||
callback(param);
|
||||
}
|
||||
|
||||
/*-- IRQ & Tasklet ---------------------------------------------------*/
|
||||
|
||||
|
@ -426,16 +472,11 @@ static void atc_tasklet(unsigned long data)
|
|||
{
|
||||
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
|
||||
|
||||
/* Channel cannot be enabled here */
|
||||
if (atc_chan_is_enabled(atchan)) {
|
||||
dev_err(chan2dev(&atchan->chan_common),
|
||||
"BUG: channel enabled in tasklet\n");
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&atchan->lock);
|
||||
if (test_and_clear_bit(0, &atchan->error_status))
|
||||
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
|
||||
atc_handle_error(atchan);
|
||||
else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
|
||||
atc_handle_cyclic(atchan);
|
||||
else
|
||||
atc_advance_work(atchan);
|
||||
|
||||
|
@ -464,12 +505,13 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
|
|||
|
||||
for (i = 0; i < atdma->dma_common.chancnt; i++) {
|
||||
atchan = &atdma->chan[i];
|
||||
if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) {
|
||||
if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
|
||||
if (pending & AT_DMA_ERR(i)) {
|
||||
/* Disable channel on AHB error */
|
||||
dma_writel(atdma, CHDR, atchan->mask);
|
||||
dma_writel(atdma, CHDR,
|
||||
AT_DMA_RES(i) | atchan->mask);
|
||||
/* Give information to tasklet */
|
||||
set_bit(0, &atchan->error_status);
|
||||
set_bit(ATC_IS_ERROR, &atchan->status);
|
||||
}
|
||||
tasklet_schedule(&atchan->tasklet);
|
||||
ret = IRQ_HANDLED;
|
||||
|
@ -549,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||
}
|
||||
|
||||
ctrla = ATC_DEFAULT_CTRLA;
|
||||
ctrlb = ATC_DEFAULT_CTRLB
|
||||
ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
|
||||
| ATC_SRC_ADDR_MODE_INCR
|
||||
| ATC_DST_ADDR_MODE_INCR
|
||||
| ATC_FC_MEM2MEM;
|
||||
|
@ -584,16 +626,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||
|
||||
desc->txd.cookie = 0;
|
||||
|
||||
if (!first) {
|
||||
first = desc;
|
||||
} else {
|
||||
/* inform the HW lli about chaining */
|
||||
prev->lli.dscr = desc->txd.phys;
|
||||
/* insert the link descriptor to the LD ring */
|
||||
list_add_tail(&desc->desc_node,
|
||||
&first->tx_list);
|
||||
}
|
||||
prev = desc;
|
||||
atc_desc_chain(&first, &prev, desc);
|
||||
}
|
||||
|
||||
/* First descriptor of the chain embedds additional information */
|
||||
|
@ -639,7 +672,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
struct scatterlist *sg;
|
||||
size_t total_len = 0;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n",
|
||||
dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
|
||||
sg_len,
|
||||
direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
|
||||
flags);
|
||||
|
||||
|
@ -651,14 +685,15 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
reg_width = atslave->reg_width;
|
||||
|
||||
ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
|
||||
ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
|
||||
ctrlb = ATC_IEN;
|
||||
|
||||
switch (direction) {
|
||||
case DMA_TO_DEVICE:
|
||||
ctrla |= ATC_DST_WIDTH(reg_width);
|
||||
ctrlb |= ATC_DST_ADDR_MODE_FIXED
|
||||
| ATC_SRC_ADDR_MODE_INCR
|
||||
| ATC_FC_MEM2PER;
|
||||
| ATC_FC_MEM2PER
|
||||
| ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
|
||||
reg = atslave->tx_reg;
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
struct at_desc *desc;
|
||||
|
@ -682,16 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
| len >> mem_width;
|
||||
desc->lli.ctrlb = ctrlb;
|
||||
|
||||
if (!first) {
|
||||
first = desc;
|
||||
} else {
|
||||
/* inform the HW lli about chaining */
|
||||
prev->lli.dscr = desc->txd.phys;
|
||||
/* insert the link descriptor to the LD ring */
|
||||
list_add_tail(&desc->desc_node,
|
||||
&first->tx_list);
|
||||
}
|
||||
prev = desc;
|
||||
atc_desc_chain(&first, &prev, desc);
|
||||
total_len += len;
|
||||
}
|
||||
break;
|
||||
|
@ -699,7 +725,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
ctrla |= ATC_SRC_WIDTH(reg_width);
|
||||
ctrlb |= ATC_DST_ADDR_MODE_INCR
|
||||
| ATC_SRC_ADDR_MODE_FIXED
|
||||
| ATC_FC_PER2MEM;
|
||||
| ATC_FC_PER2MEM
|
||||
| ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
|
||||
|
||||
reg = atslave->rx_reg;
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
|
@ -724,16 +751,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
| len >> reg_width;
|
||||
desc->lli.ctrlb = ctrlb;
|
||||
|
||||
if (!first) {
|
||||
first = desc;
|
||||
} else {
|
||||
/* inform the HW lli about chaining */
|
||||
prev->lli.dscr = desc->txd.phys;
|
||||
/* insert the link descriptor to the LD ring */
|
||||
list_add_tail(&desc->desc_node,
|
||||
&first->tx_list);
|
||||
}
|
||||
prev = desc;
|
||||
atc_desc_chain(&first, &prev, desc);
|
||||
total_len += len;
|
||||
}
|
||||
break;
|
||||
|
@ -759,41 +777,211 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* atc_dma_cyclic_check_values
|
||||
* Check for too big/unaligned periods and unaligned DMA buffer
|
||||
*/
|
||||
static int
|
||||
atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
|
||||
size_t period_len, enum dma_data_direction direction)
|
||||
{
|
||||
if (period_len > (ATC_BTSIZE_MAX << reg_width))
|
||||
goto err_out;
|
||||
if (unlikely(period_len & ((1 << reg_width) - 1)))
|
||||
goto err_out;
|
||||
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
|
||||
goto err_out;
|
||||
if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
|
||||
goto err_out;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* atc_dma_cyclic_fill_desc - Fill one period decriptor
|
||||
*/
|
||||
static int
|
||||
atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
|
||||
unsigned int period_index, dma_addr_t buf_addr,
|
||||
size_t period_len, enum dma_data_direction direction)
|
||||
{
|
||||
u32 ctrla;
|
||||
unsigned int reg_width = atslave->reg_width;
|
||||
|
||||
/* prepare common CRTLA value */
|
||||
ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
|
||||
| ATC_DST_WIDTH(reg_width)
|
||||
| ATC_SRC_WIDTH(reg_width)
|
||||
| period_len >> reg_width;
|
||||
|
||||
switch (direction) {
|
||||
case DMA_TO_DEVICE:
|
||||
desc->lli.saddr = buf_addr + (period_len * period_index);
|
||||
desc->lli.daddr = atslave->tx_reg;
|
||||
desc->lli.ctrla = ctrla;
|
||||
desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
|
||||
| ATC_SRC_ADDR_MODE_INCR
|
||||
| ATC_FC_MEM2PER
|
||||
| ATC_SIF(AT_DMA_MEM_IF)
|
||||
| ATC_DIF(AT_DMA_PER_IF);
|
||||
break;
|
||||
|
||||
case DMA_FROM_DEVICE:
|
||||
desc->lli.saddr = atslave->rx_reg;
|
||||
desc->lli.daddr = buf_addr + (period_len * period_index);
|
||||
desc->lli.ctrla = ctrla;
|
||||
desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
|
||||
| ATC_SRC_ADDR_MODE_FIXED
|
||||
| ATC_FC_PER2MEM
|
||||
| ATC_SIF(AT_DMA_PER_IF)
|
||||
| ATC_DIF(AT_DMA_MEM_IF);
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* atc_prep_dma_cyclic - prepare the cyclic DMA transfer
|
||||
* @chan: the DMA channel to prepare
|
||||
* @buf_addr: physical DMA address where the buffer starts
|
||||
* @buf_len: total number of bytes for the entire buffer
|
||||
* @period_len: number of bytes for each period
|
||||
* @direction: transfer direction, to or from device
|
||||
*/
|
||||
static struct dma_async_tx_descriptor *
|
||||
atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_data_direction direction)
|
||||
{
|
||||
struct at_dma_chan *atchan = to_at_dma_chan(chan);
|
||||
struct at_dma_slave *atslave = chan->private;
|
||||
struct at_desc *first = NULL;
|
||||
struct at_desc *prev = NULL;
|
||||
unsigned long was_cyclic;
|
||||
unsigned int periods = buf_len / period_len;
|
||||
unsigned int i;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
|
||||
direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
|
||||
buf_addr,
|
||||
periods, buf_len, period_len);
|
||||
|
||||
if (unlikely(!atslave || !buf_len || !period_len)) {
|
||||
dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
|
||||
if (was_cyclic) {
|
||||
dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Check for too big/unaligned periods and unaligned DMA buffer */
|
||||
if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
|
||||
period_len, direction))
|
||||
goto err_out;
|
||||
|
||||
/* build cyclic linked list */
|
||||
for (i = 0; i < periods; i++) {
|
||||
struct at_desc *desc;
|
||||
|
||||
desc = atc_desc_get(atchan);
|
||||
if (!desc)
|
||||
goto err_desc_get;
|
||||
|
||||
if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
|
||||
period_len, direction))
|
||||
goto err_desc_get;
|
||||
|
||||
atc_desc_chain(&first, &prev, desc);
|
||||
}
|
||||
|
||||
/* lets make a cyclic list */
|
||||
prev->lli.dscr = first->txd.phys;
|
||||
|
||||
/* First descriptor of the chain embedds additional information */
|
||||
first->txd.cookie = -EBUSY;
|
||||
first->len = buf_len;
|
||||
|
||||
return &first->txd;
|
||||
|
||||
err_desc_get:
|
||||
dev_err(chan2dev(chan), "not enough descriptors available\n");
|
||||
atc_desc_put(atchan, first);
|
||||
err_out:
|
||||
clear_bit(ATC_IS_CYCLIC, &atchan->status);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct at_dma_chan *atchan = to_at_dma_chan(chan);
|
||||
struct at_dma *atdma = to_at_dma(chan->device);
|
||||
struct at_desc *desc, *_desc;
|
||||
int chan_id = atchan->chan_common.chan_id;
|
||||
|
||||
LIST_HEAD(list);
|
||||
|
||||
/* Only supports DMA_TERMINATE_ALL */
|
||||
if (cmd != DMA_TERMINATE_ALL)
|
||||
dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
|
||||
|
||||
if (cmd == DMA_PAUSE) {
|
||||
spin_lock_bh(&atchan->lock);
|
||||
|
||||
dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
|
||||
set_bit(ATC_IS_PAUSED, &atchan->status);
|
||||
|
||||
spin_unlock_bh(&atchan->lock);
|
||||
} else if (cmd == DMA_RESUME) {
|
||||
if (!test_bit(ATC_IS_PAUSED, &atchan->status))
|
||||
return 0;
|
||||
|
||||
spin_lock_bh(&atchan->lock);
|
||||
|
||||
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
|
||||
clear_bit(ATC_IS_PAUSED, &atchan->status);
|
||||
|
||||
spin_unlock_bh(&atchan->lock);
|
||||
} else if (cmd == DMA_TERMINATE_ALL) {
|
||||
struct at_desc *desc, *_desc;
|
||||
/*
|
||||
* This is only called when something went wrong elsewhere, so
|
||||
* we don't really care about the data. Just disable the
|
||||
* channel. We still have to poll the channel enable bit due
|
||||
* to AHB/HSB limitations.
|
||||
*/
|
||||
spin_lock_bh(&atchan->lock);
|
||||
|
||||
/* disabling channel: must also remove suspend state */
|
||||
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
|
||||
|
||||
/* confirm that this channel is disabled */
|
||||
while (dma_readl(atdma, CHSR) & atchan->mask)
|
||||
cpu_relax();
|
||||
|
||||
/* active_list entries will end up before queued entries */
|
||||
list_splice_init(&atchan->queue, &list);
|
||||
list_splice_init(&atchan->active_list, &list);
|
||||
|
||||
/* Flush all pending and queued descriptors */
|
||||
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
||||
atc_chain_complete(atchan, desc);
|
||||
|
||||
clear_bit(ATC_IS_PAUSED, &atchan->status);
|
||||
/* if channel dedicated to cyclic operations, free it */
|
||||
clear_bit(ATC_IS_CYCLIC, &atchan->status);
|
||||
|
||||
spin_unlock_bh(&atchan->lock);
|
||||
} else {
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
* This is only called when something went wrong elsewhere, so
|
||||
* we don't really care about the data. Just disable the
|
||||
* channel. We still have to poll the channel enable bit due
|
||||
* to AHB/HSB limitations.
|
||||
*/
|
||||
spin_lock_bh(&atchan->lock);
|
||||
|
||||
dma_writel(atdma, CHDR, atchan->mask);
|
||||
|
||||
/* confirm that this channel is disabled */
|
||||
while (dma_readl(atdma, CHSR) & atchan->mask)
|
||||
cpu_relax();
|
||||
|
||||
/* active_list entries will end up before queued entries */
|
||||
list_splice_init(&atchan->queue, &list);
|
||||
list_splice_init(&atchan->active_list, &list);
|
||||
|
||||
/* Flush all pending and queued descriptors */
|
||||
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
||||
atc_chain_complete(atchan, desc);
|
||||
|
||||
spin_unlock_bh(&atchan->lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -835,9 +1023,17 @@ atc_tx_status(struct dma_chan *chan,
|
|||
|
||||
spin_unlock_bh(&atchan->lock);
|
||||
|
||||
dma_set_tx_state(txstate, last_complete, last_used, 0);
|
||||
dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
|
||||
cookie, last_complete ? last_complete : 0,
|
||||
if (ret != DMA_SUCCESS)
|
||||
dma_set_tx_state(txstate, last_complete, last_used,
|
||||
atc_first_active(atchan)->len);
|
||||
else
|
||||
dma_set_tx_state(txstate, last_complete, last_used, 0);
|
||||
|
||||
if (test_bit(ATC_IS_PAUSED, &atchan->status))
|
||||
ret = DMA_PAUSED;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
|
||||
ret, cookie, last_complete ? last_complete : 0,
|
||||
last_used ? last_used : 0);
|
||||
|
||||
return ret;
|
||||
|
@ -853,6 +1049,10 @@ static void atc_issue_pending(struct dma_chan *chan)
|
|||
|
||||
dev_vdbg(chan2dev(chan), "issue_pending\n");
|
||||
|
||||
/* Not needed for cyclic transfers */
|
||||
if (test_bit(ATC_IS_CYCLIC, &atchan->status))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&atchan->lock);
|
||||
if (!atc_chan_is_enabled(atchan)) {
|
||||
atc_advance_work(atchan);
|
||||
|
@ -959,6 +1159,7 @@ static void atc_free_chan_resources(struct dma_chan *chan)
|
|||
}
|
||||
list_splice_init(&atchan->free_list, &list);
|
||||
atchan->descs_allocated = 0;
|
||||
atchan->status = 0;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
|
||||
}
|
||||
|
@ -1092,10 +1293,15 @@ static int __init at_dma_probe(struct platform_device *pdev)
|
|||
if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
|
||||
atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
|
||||
|
||||
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
|
||||
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
|
||||
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
|
||||
|
||||
if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
|
||||
atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
|
||||
|
||||
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
|
||||
dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
|
||||
atdma->dma_common.device_control = atc_control;
|
||||
}
|
||||
|
||||
dma_writel(atdma, EN, AT_DMA_ENABLE);
|
||||
|
||||
|
|
|
@ -103,6 +103,10 @@
|
|||
/* Bitfields in CTRLB */
|
||||
#define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */
|
||||
#define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */
|
||||
/* Specify AHB interfaces */
|
||||
#define AT_DMA_MEM_IF 0 /* interface 0 as memory interface */
|
||||
#define AT_DMA_PER_IF 1 /* interface 1 as peripheral interface */
|
||||
|
||||
#define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */
|
||||
#define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */
|
||||
#define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */
|
||||
|
@ -180,13 +184,24 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
|
|||
|
||||
/*-- Channels --------------------------------------------------------*/
|
||||
|
||||
/**
|
||||
* atc_status - information bits stored in channel status flag
|
||||
*
|
||||
* Manipulated with atomic operations.
|
||||
*/
|
||||
enum atc_status {
|
||||
ATC_IS_ERROR = 0,
|
||||
ATC_IS_PAUSED = 1,
|
||||
ATC_IS_CYCLIC = 24,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct at_dma_chan - internal representation of an Atmel HDMAC channel
|
||||
* @chan_common: common dmaengine channel object members
|
||||
* @device: parent device
|
||||
* @ch_regs: memory mapped register base
|
||||
* @mask: channel index in a mask
|
||||
* @error_status: transmit error status information from irq handler
|
||||
* @status: transmit status information from irq/prep* functions
|
||||
* to tasklet (use atomic operations)
|
||||
* @tasklet: bottom half to finish transaction work
|
||||
* @lock: serializes enqueue/dequeue operations to descriptors lists
|
||||
|
@ -201,7 +216,7 @@ struct at_dma_chan {
|
|||
struct at_dma *device;
|
||||
void __iomem *ch_regs;
|
||||
u8 mask;
|
||||
unsigned long error_status;
|
||||
unsigned long status;
|
||||
struct tasklet_struct tasklet;
|
||||
|
||||
spinlock_t lock;
|
||||
|
@ -309,8 +324,8 @@ static void atc_setup_irq(struct at_dma_chan *atchan, int on)
|
|||
struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
|
||||
u32 ebci;
|
||||
|
||||
/* enable interrupts on buffer chain completion & error */
|
||||
ebci = AT_DMA_CBTC(atchan->chan_common.chan_id)
|
||||
/* enable interrupts on buffer transfer completion & error */
|
||||
ebci = AT_DMA_BTC(atchan->chan_common.chan_id)
|
||||
| AT_DMA_ERR(atchan->chan_common.chan_id);
|
||||
if (on)
|
||||
dma_writel(atdma, EBCIER, ebci);
|
||||
|
@ -347,7 +362,12 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
|
|||
*/
|
||||
static void set_desc_eol(struct at_desc *desc)
|
||||
{
|
||||
desc->lli.ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
|
||||
u32 ctrlb = desc->lli.ctrlb;
|
||||
|
||||
ctrlb &= ~ATC_IEN;
|
||||
ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
|
||||
|
||||
desc->lli.ctrlb = ctrlb;
|
||||
desc->lli.dscr = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1610,7 +1610,7 @@ int __init coh901318_init(void)
|
|||
{
|
||||
return platform_driver_probe(&coh901318_driver, coh901318_probe);
|
||||
}
|
||||
arch_initcall(coh901318_init);
|
||||
subsys_initcall(coh901318_init);
|
||||
|
||||
void __exit coh901318_exit(void)
|
||||
{
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
* AVR32 systems.)
|
||||
*
|
||||
* Copyright (C) 2007-2008 Atmel Corporation
|
||||
* Copyright (C) 2010-2011 ST Microelectronics
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -93,8 +94,9 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
|
|||
struct dw_desc *desc, *_desc;
|
||||
struct dw_desc *ret = NULL;
|
||||
unsigned int i = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_bh(&dwc->lock);
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
|
||||
if (async_tx_test_ack(&desc->txd)) {
|
||||
list_del(&desc->desc_node);
|
||||
|
@ -104,7 +106,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
|
|||
dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
|
||||
i++;
|
||||
}
|
||||
spin_unlock_bh(&dwc->lock);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
|
||||
|
||||
|
@ -130,12 +132,14 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
|||
*/
|
||||
static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (desc) {
|
||||
struct dw_desc *child;
|
||||
|
||||
dwc_sync_desc_for_cpu(dwc, desc);
|
||||
|
||||
spin_lock_bh(&dwc->lock);
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
list_for_each_entry(child, &desc->tx_list, desc_node)
|
||||
dev_vdbg(chan2dev(&dwc->chan),
|
||||
"moving child desc %p to freelist\n",
|
||||
|
@ -143,7 +147,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
|||
list_splice_init(&desc->tx_list, &dwc->free_list);
|
||||
dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
|
||||
list_add(&desc->desc_node, &dwc->free_list);
|
||||
spin_unlock_bh(&dwc->lock);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -195,18 +199,23 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
|
|||
/*----------------------------------------------------------------------*/
|
||||
|
||||
static void
|
||||
dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
||||
dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
|
||||
bool callback_required)
|
||||
{
|
||||
dma_async_tx_callback callback;
|
||||
void *param;
|
||||
dma_async_tx_callback callback = NULL;
|
||||
void *param = NULL;
|
||||
struct dma_async_tx_descriptor *txd = &desc->txd;
|
||||
struct dw_desc *child;
|
||||
unsigned long flags;
|
||||
|
||||
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
dwc->completed = txd->cookie;
|
||||
callback = txd->callback;
|
||||
param = txd->callback_param;
|
||||
if (callback_required) {
|
||||
callback = txd->callback;
|
||||
param = txd->callback_param;
|
||||
}
|
||||
|
||||
dwc_sync_desc_for_cpu(dwc, desc);
|
||||
|
||||
|
@ -238,11 +247,9 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The API requires that no submissions are done from a
|
||||
* callback, so we don't need to drop the lock here
|
||||
*/
|
||||
if (callback)
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
if (callback_required && callback)
|
||||
callback(param);
|
||||
}
|
||||
|
||||
|
@ -250,7 +257,9 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||
{
|
||||
struct dw_desc *desc, *_desc;
|
||||
LIST_HEAD(list);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
if (dma_readl(dw, CH_EN) & dwc->mask) {
|
||||
dev_err(chan2dev(&dwc->chan),
|
||||
"BUG: XFER bit set, but channel not idle!\n");
|
||||
|
@ -271,8 +280,10 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||
dwc_dostart(dwc, dwc_first_active(dwc));
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
||||
dwc_descriptor_complete(dwc, desc);
|
||||
dwc_descriptor_complete(dwc, desc, true);
|
||||
}
|
||||
|
||||
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||||
|
@ -281,7 +292,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||
struct dw_desc *desc, *_desc;
|
||||
struct dw_desc *child;
|
||||
u32 status_xfer;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
/*
|
||||
* Clear block interrupt flag before scanning so that we don't
|
||||
* miss any, and read LLP before RAW_XFER to ensure it is
|
||||
|
@ -294,30 +307,47 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||
if (status_xfer & dwc->mask) {
|
||||
/* Everything we've submitted is done */
|
||||
dma_writel(dw, CLEAR.XFER, dwc->mask);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
dwc_complete_all(dw, dwc);
|
||||
return;
|
||||
}
|
||||
|
||||
if (list_empty(&dwc->active_list))
|
||||
if (list_empty(&dwc->active_list)) {
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
|
||||
|
||||
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
|
||||
if (desc->lli.llp == llp)
|
||||
/* This one is currently in progress */
|
||||
/* check first descriptors addr */
|
||||
if (desc->txd.phys == llp) {
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/* check first descriptors llp */
|
||||
if (desc->lli.llp == llp) {
|
||||
/* This one is currently in progress */
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
list_for_each_entry(child, &desc->tx_list, desc_node)
|
||||
if (child->lli.llp == llp)
|
||||
if (child->lli.llp == llp) {
|
||||
/* Currently in progress */
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* No descriptors so far seem to be in progress, i.e.
|
||||
* this one must be done.
|
||||
*/
|
||||
dwc_descriptor_complete(dwc, desc);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
dwc_descriptor_complete(dwc, desc, true);
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
}
|
||||
|
||||
dev_err(chan2dev(&dwc->chan),
|
||||
|
@ -332,6 +362,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||
list_move(dwc->queue.next, &dwc->active_list);
|
||||
dwc_dostart(dwc, dwc_first_active(dwc));
|
||||
}
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
}
|
||||
|
||||
static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
|
||||
|
@ -346,9 +377,12 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||
{
|
||||
struct dw_desc *bad_desc;
|
||||
struct dw_desc *child;
|
||||
unsigned long flags;
|
||||
|
||||
dwc_scan_descriptors(dw, dwc);
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
|
||||
/*
|
||||
* The descriptor currently at the head of the active list is
|
||||
* borked. Since we don't have any way to report errors, we'll
|
||||
|
@ -378,8 +412,10 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
|
||||
dwc_dump_lli(dwc, &child->lli);
|
||||
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
/* Pretend the descriptor completed successfully */
|
||||
dwc_descriptor_complete(dwc, bad_desc);
|
||||
dwc_descriptor_complete(dwc, bad_desc, true);
|
||||
}
|
||||
|
||||
/* --------------------- Cyclic DMA API extensions -------------------- */
|
||||
|
@ -402,6 +438,8 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
|
|||
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
|
||||
u32 status_block, u32 status_err, u32 status_xfer)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (status_block & dwc->mask) {
|
||||
void (*callback)(void *param);
|
||||
void *callback_param;
|
||||
|
@ -412,11 +450,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
|
|||
|
||||
callback = dwc->cdesc->period_callback;
|
||||
callback_param = dwc->cdesc->period_callback_param;
|
||||
if (callback) {
|
||||
spin_unlock(&dwc->lock);
|
||||
|
||||
if (callback)
|
||||
callback(callback_param);
|
||||
spin_lock(&dwc->lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -430,6 +466,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
|
|||
dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
|
||||
"interrupt, stopping DMA transfer\n",
|
||||
status_xfer ? "xfer" : "error");
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
|
||||
dev_err(chan2dev(&dwc->chan),
|
||||
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
|
||||
channel_readl(dwc, SAR),
|
||||
|
@ -453,6 +492,8 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
|
|||
|
||||
for (i = 0; i < dwc->cdesc->periods; i++)
|
||||
dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
|
||||
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -476,7 +517,6 @@ static void dw_dma_tasklet(unsigned long data)
|
|||
|
||||
for (i = 0; i < dw->dma.chancnt; i++) {
|
||||
dwc = &dw->chan[i];
|
||||
spin_lock(&dwc->lock);
|
||||
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
|
||||
dwc_handle_cyclic(dw, dwc, status_block, status_err,
|
||||
status_xfer);
|
||||
|
@ -484,7 +524,6 @@ static void dw_dma_tasklet(unsigned long data)
|
|||
dwc_handle_error(dw, dwc);
|
||||
else if ((status_block | status_xfer) & (1 << i))
|
||||
dwc_scan_descriptors(dw, dwc);
|
||||
spin_unlock(&dwc->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -539,8 +578,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
|
|||
struct dw_desc *desc = txd_to_dw_desc(tx);
|
||||
struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
|
||||
dma_cookie_t cookie;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_bh(&dwc->lock);
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
cookie = dwc_assign_cookie(dwc, desc);
|
||||
|
||||
/*
|
||||
|
@ -560,7 +600,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
|
|||
list_add_tail(&desc->desc_node, &dwc->queue);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&dwc->lock);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
return cookie;
|
||||
}
|
||||
|
@ -689,15 +729,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
reg = dws->tx_reg;
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
struct dw_desc *desc;
|
||||
u32 len;
|
||||
u32 mem;
|
||||
|
||||
desc = dwc_desc_get(dwc);
|
||||
if (!desc) {
|
||||
dev_err(chan2dev(chan),
|
||||
"not enough descriptors available\n");
|
||||
goto err_desc_get;
|
||||
}
|
||||
u32 len, dlen, mem;
|
||||
|
||||
mem = sg_phys(sg);
|
||||
len = sg_dma_len(sg);
|
||||
|
@ -705,10 +737,27 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
if (unlikely(mem & 3 || len & 3))
|
||||
mem_width = 0;
|
||||
|
||||
slave_sg_todev_fill_desc:
|
||||
desc = dwc_desc_get(dwc);
|
||||
if (!desc) {
|
||||
dev_err(chan2dev(chan),
|
||||
"not enough descriptors available\n");
|
||||
goto err_desc_get;
|
||||
}
|
||||
|
||||
desc->lli.sar = mem;
|
||||
desc->lli.dar = reg;
|
||||
desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
|
||||
desc->lli.ctlhi = len >> mem_width;
|
||||
if ((len >> mem_width) > DWC_MAX_COUNT) {
|
||||
dlen = DWC_MAX_COUNT << mem_width;
|
||||
mem += dlen;
|
||||
len -= dlen;
|
||||
} else {
|
||||
dlen = len;
|
||||
len = 0;
|
||||
}
|
||||
|
||||
desc->lli.ctlhi = dlen >> mem_width;
|
||||
|
||||
if (!first) {
|
||||
first = desc;
|
||||
|
@ -722,7 +771,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
&first->tx_list);
|
||||
}
|
||||
prev = desc;
|
||||
total_len += len;
|
||||
total_len += dlen;
|
||||
|
||||
if (len)
|
||||
goto slave_sg_todev_fill_desc;
|
||||
}
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
|
@ -735,15 +787,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
reg = dws->rx_reg;
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
struct dw_desc *desc;
|
||||
u32 len;
|
||||
u32 mem;
|
||||
|
||||
desc = dwc_desc_get(dwc);
|
||||
if (!desc) {
|
||||
dev_err(chan2dev(chan),
|
||||
"not enough descriptors available\n");
|
||||
goto err_desc_get;
|
||||
}
|
||||
u32 len, dlen, mem;
|
||||
|
||||
mem = sg_phys(sg);
|
||||
len = sg_dma_len(sg);
|
||||
|
@ -751,10 +795,26 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
if (unlikely(mem & 3 || len & 3))
|
||||
mem_width = 0;
|
||||
|
||||
slave_sg_fromdev_fill_desc:
|
||||
desc = dwc_desc_get(dwc);
|
||||
if (!desc) {
|
||||
dev_err(chan2dev(chan),
|
||||
"not enough descriptors available\n");
|
||||
goto err_desc_get;
|
||||
}
|
||||
|
||||
desc->lli.sar = reg;
|
||||
desc->lli.dar = mem;
|
||||
desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
|
||||
desc->lli.ctlhi = len >> reg_width;
|
||||
if ((len >> reg_width) > DWC_MAX_COUNT) {
|
||||
dlen = DWC_MAX_COUNT << reg_width;
|
||||
mem += dlen;
|
||||
len -= dlen;
|
||||
} else {
|
||||
dlen = len;
|
||||
len = 0;
|
||||
}
|
||||
desc->lli.ctlhi = dlen >> reg_width;
|
||||
|
||||
if (!first) {
|
||||
first = desc;
|
||||
|
@ -768,7 +828,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
&first->tx_list);
|
||||
}
|
||||
prev = desc;
|
||||
total_len += len;
|
||||
total_len += dlen;
|
||||
|
||||
if (len)
|
||||
goto slave_sg_fromdev_fill_desc;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -799,35 +862,52 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||
struct dw_dma *dw = to_dw_dma(chan->device);
|
||||
struct dw_desc *desc, *_desc;
|
||||
unsigned long flags;
|
||||
u32 cfglo;
|
||||
LIST_HEAD(list);
|
||||
|
||||
/* Only supports DMA_TERMINATE_ALL */
|
||||
if (cmd != DMA_TERMINATE_ALL)
|
||||
if (cmd == DMA_PAUSE) {
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
|
||||
cfglo = channel_readl(dwc, CFG_LO);
|
||||
channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
|
||||
while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
|
||||
cpu_relax();
|
||||
|
||||
dwc->paused = true;
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
} else if (cmd == DMA_RESUME) {
|
||||
if (!dwc->paused)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
|
||||
cfglo = channel_readl(dwc, CFG_LO);
|
||||
channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
|
||||
dwc->paused = false;
|
||||
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
} else if (cmd == DMA_TERMINATE_ALL) {
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
|
||||
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||
while (dma_readl(dw, CH_EN) & dwc->mask)
|
||||
cpu_relax();
|
||||
|
||||
dwc->paused = false;
|
||||
|
||||
/* active_list entries will end up before queued entries */
|
||||
list_splice_init(&dwc->queue, &list);
|
||||
list_splice_init(&dwc->active_list, &list);
|
||||
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
/* Flush all pending and queued descriptors */
|
||||
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
||||
dwc_descriptor_complete(dwc, desc, false);
|
||||
} else
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
* This is only called when something went wrong elsewhere, so
|
||||
* we don't really care about the data. Just disable the
|
||||
* channel. We still have to poll the channel enable bit due
|
||||
* to AHB/HSB limitations.
|
||||
*/
|
||||
spin_lock_bh(&dwc->lock);
|
||||
|
||||
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||
|
||||
while (dma_readl(dw, CH_EN) & dwc->mask)
|
||||
cpu_relax();
|
||||
|
||||
/* active_list entries will end up before queued entries */
|
||||
list_splice_init(&dwc->queue, &list);
|
||||
list_splice_init(&dwc->active_list, &list);
|
||||
|
||||
spin_unlock_bh(&dwc->lock);
|
||||
|
||||
/* Flush all pending and queued descriptors */
|
||||
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
||||
dwc_descriptor_complete(dwc, desc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -846,9 +926,7 @@ dwc_tx_status(struct dma_chan *chan,
|
|||
|
||||
ret = dma_async_is_complete(cookie, last_complete, last_used);
|
||||
if (ret != DMA_SUCCESS) {
|
||||
spin_lock_bh(&dwc->lock);
|
||||
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
|
||||
spin_unlock_bh(&dwc->lock);
|
||||
|
||||
last_complete = dwc->completed;
|
||||
last_used = chan->cookie;
|
||||
|
@ -856,7 +934,14 @@ dwc_tx_status(struct dma_chan *chan,
|
|||
ret = dma_async_is_complete(cookie, last_complete, last_used);
|
||||
}
|
||||
|
||||
dma_set_tx_state(txstate, last_complete, last_used, 0);
|
||||
if (ret != DMA_SUCCESS)
|
||||
dma_set_tx_state(txstate, last_complete, last_used,
|
||||
dwc_first_active(dwc)->len);
|
||||
else
|
||||
dma_set_tx_state(txstate, last_complete, last_used, 0);
|
||||
|
||||
if (dwc->paused)
|
||||
return DMA_PAUSED;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -865,10 +950,8 @@ static void dwc_issue_pending(struct dma_chan *chan)
|
|||
{
|
||||
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||
|
||||
spin_lock_bh(&dwc->lock);
|
||||
if (!list_empty(&dwc->queue))
|
||||
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
|
||||
spin_unlock_bh(&dwc->lock);
|
||||
}
|
||||
|
||||
static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
||||
|
@ -880,6 +963,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||
int i;
|
||||
u32 cfghi;
|
||||
u32 cfglo;
|
||||
unsigned long flags;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
|
||||
|
||||
|
@ -917,16 +1001,16 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||
* doesn't mean what you think it means), and status writeback.
|
||||
*/
|
||||
|
||||
spin_lock_bh(&dwc->lock);
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
i = dwc->descs_allocated;
|
||||
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
|
||||
spin_unlock_bh(&dwc->lock);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
|
||||
if (!desc) {
|
||||
dev_info(chan2dev(chan),
|
||||
"only allocated %d descriptors\n", i);
|
||||
spin_lock_bh(&dwc->lock);
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -938,7 +1022,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||
sizeof(desc->lli), DMA_TO_DEVICE);
|
||||
dwc_desc_put(dwc, desc);
|
||||
|
||||
spin_lock_bh(&dwc->lock);
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
i = ++dwc->descs_allocated;
|
||||
}
|
||||
|
||||
|
@ -947,7 +1031,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
|
||||
channel_set_bit(dw, MASK.ERROR, dwc->mask);
|
||||
|
||||
spin_unlock_bh(&dwc->lock);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
dev_dbg(chan2dev(chan),
|
||||
"alloc_chan_resources allocated %d descriptors\n", i);
|
||||
|
@ -960,6 +1044,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
|
|||
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||
struct dw_dma *dw = to_dw_dma(chan->device);
|
||||
struct dw_desc *desc, *_desc;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(list);
|
||||
|
||||
dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
|
||||
|
@ -970,7 +1055,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
|
|||
BUG_ON(!list_empty(&dwc->queue));
|
||||
BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
|
||||
|
||||
spin_lock_bh(&dwc->lock);
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
list_splice_init(&dwc->free_list, &list);
|
||||
dwc->descs_allocated = 0;
|
||||
|
||||
|
@ -979,7 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
|
|||
channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
|
||||
channel_clear_bit(dw, MASK.ERROR, dwc->mask);
|
||||
|
||||
spin_unlock_bh(&dwc->lock);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
|
||||
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
|
||||
|
@ -1004,13 +1089,14 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
|
|||
{
|
||||
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||||
unsigned long flags;
|
||||
|
||||
if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
|
||||
dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
spin_lock(&dwc->lock);
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
|
||||
/* assert channel is idle */
|
||||
if (dma_readl(dw, CH_EN) & dwc->mask) {
|
||||
|
@ -1023,7 +1109,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
|
|||
channel_readl(dwc, LLP),
|
||||
channel_readl(dwc, CTL_HI),
|
||||
channel_readl(dwc, CTL_LO));
|
||||
spin_unlock(&dwc->lock);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -1038,7 +1124,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
|
|||
|
||||
channel_set_bit(dw, CH_EN, dwc->mask);
|
||||
|
||||
spin_unlock(&dwc->lock);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1054,14 +1140,15 @@ void dw_dma_cyclic_stop(struct dma_chan *chan)
|
|||
{
|
||||
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&dwc->lock);
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
|
||||
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||
while (dma_readl(dw, CH_EN) & dwc->mask)
|
||||
cpu_relax();
|
||||
|
||||
spin_unlock(&dwc->lock);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(dw_dma_cyclic_stop);
|
||||
|
||||
|
@ -1090,17 +1177,18 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
|||
unsigned int reg_width;
|
||||
unsigned int periods;
|
||||
unsigned int i;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_bh(&dwc->lock);
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
|
||||
spin_unlock_bh(&dwc->lock);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
dev_dbg(chan2dev(&dwc->chan),
|
||||
"queue and/or active list are not empty\n");
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
|
||||
spin_unlock_bh(&dwc->lock);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
if (was_cyclic) {
|
||||
dev_dbg(chan2dev(&dwc->chan),
|
||||
"channel already prepared for cyclic DMA\n");
|
||||
|
@ -1214,13 +1302,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
|
|||
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||||
struct dw_cyclic_desc *cdesc = dwc->cdesc;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
|
||||
|
||||
if (!cdesc)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&dwc->lock);
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
|
||||
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||
while (dma_readl(dw, CH_EN) & dwc->mask)
|
||||
|
@ -1230,7 +1319,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
|
|||
dma_writel(dw, CLEAR.ERROR, dwc->mask);
|
||||
dma_writel(dw, CLEAR.XFER, dwc->mask);
|
||||
|
||||
spin_unlock_bh(&dwc->lock);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
for (i = 0; i < cdesc->periods; i++)
|
||||
dwc_desc_put(dwc, cdesc->desc[i]);
|
||||
|
@ -1487,3 +1576,4 @@ module_exit(dw_exit);
|
|||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
|
||||
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
|
||||
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
* Driver for the Synopsys DesignWare AHB DMA Controller
|
||||
*
|
||||
* Copyright (C) 2005-2007 Atmel Corporation
|
||||
* Copyright (C) 2010-2011 ST Microelectronics
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -138,6 +139,7 @@ struct dw_dma_chan {
|
|||
void __iomem *ch_regs;
|
||||
u8 mask;
|
||||
u8 priority;
|
||||
bool paused;
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
|
|
|
@ -1292,8 +1292,7 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
|
|||
if (err)
|
||||
goto err_dma;
|
||||
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_allow(&pdev->dev);
|
||||
return 0;
|
||||
|
||||
|
@ -1322,6 +1321,9 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
|
|||
static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct middma_device *device = pci_get_drvdata(pdev);
|
||||
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
pm_runtime_forbid(&pdev->dev);
|
||||
middma_shutdown(pdev);
|
||||
pci_dev_put(pdev);
|
||||
kfree(device);
|
||||
|
@ -1385,13 +1387,20 @@ int dma_resume(struct pci_dev *pci)
|
|||
static int dma_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
return dma_suspend(pci_dev, PMSG_SUSPEND);
|
||||
struct middma_device *device = pci_get_drvdata(pci_dev);
|
||||
|
||||
device->state = SUSPENDED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
return dma_resume(pci_dev);
|
||||
struct middma_device *device = pci_get_drvdata(pci_dev);
|
||||
|
||||
device->state = RUNNING;
|
||||
iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_runtime_idle(struct device *dev)
|
||||
|
|
|
@ -508,6 +508,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
|
|||
struct ioat_ring_ent **ring;
|
||||
u64 status;
|
||||
int order;
|
||||
int i = 0;
|
||||
|
||||
/* have we already been set up? */
|
||||
if (ioat->ring)
|
||||
|
@ -548,8 +549,11 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
|
|||
ioat2_start_null_desc(ioat);
|
||||
|
||||
/* check that we got off the ground */
|
||||
udelay(5);
|
||||
status = ioat_chansts(chan);
|
||||
do {
|
||||
udelay(1);
|
||||
status = ioat_chansts(chan);
|
||||
} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
|
||||
|
||||
if (is_ioat_active(status) || is_ioat_idle(status)) {
|
||||
set_bit(IOAT_RUN, &chan->state);
|
||||
return 1 << ioat->alloc_order;
|
||||
|
|
|
@ -619,7 +619,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
|
|||
|
||||
if (unlikely(!len))
|
||||
return NULL;
|
||||
BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
|
||||
|
||||
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
|
||||
__func__, len);
|
||||
|
@ -652,7 +652,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
|
|||
|
||||
if (unlikely(!len))
|
||||
return NULL;
|
||||
BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
|
||||
|
||||
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
|
||||
__func__, len);
|
||||
|
@ -686,7 +686,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
|
|||
|
||||
if (unlikely(!len))
|
||||
return NULL;
|
||||
BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
|
||||
|
||||
dev_dbg(iop_chan->device->common.dev,
|
||||
"%s src_cnt: %d len: %u flags: %lx\n",
|
||||
|
|
|
@ -671,7 +671,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
|
||||
return NULL;
|
||||
|
||||
BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
|
||||
|
||||
spin_lock_bh(&mv_chan->lock);
|
||||
slot_cnt = mv_chan_memcpy_slot_count(len);
|
||||
|
@ -710,7 +710,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
|
|||
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
|
||||
return NULL;
|
||||
|
||||
BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
|
||||
|
||||
spin_lock_bh(&mv_chan->lock);
|
||||
slot_cnt = mv_chan_memset_slot_count(len);
|
||||
|
@ -744,7 +744,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
|
|||
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
|
||||
return NULL;
|
||||
|
||||
BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
|
||||
|
||||
dev_dbg(mv_chan->device->common.dev,
|
||||
"%s src_cnt: %d len: dest %x %u flags: %ld\n",
|
||||
|
|
|
@ -77,10 +77,10 @@ struct pch_dma_regs {
|
|||
u32 dma_ctl0;
|
||||
u32 dma_ctl1;
|
||||
u32 dma_ctl2;
|
||||
u32 reserved1;
|
||||
u32 dma_ctl3;
|
||||
u32 dma_sts0;
|
||||
u32 dma_sts1;
|
||||
u32 reserved2;
|
||||
u32 dma_sts2;
|
||||
u32 reserved3;
|
||||
struct pch_dma_desc_regs desc[MAX_CHAN_NR];
|
||||
};
|
||||
|
@ -130,6 +130,7 @@ struct pch_dma {
|
|||
#define PCH_DMA_CTL0 0x00
|
||||
#define PCH_DMA_CTL1 0x04
|
||||
#define PCH_DMA_CTL2 0x08
|
||||
#define PCH_DMA_CTL3 0x0C
|
||||
#define PCH_DMA_STS0 0x10
|
||||
#define PCH_DMA_STS1 0x14
|
||||
|
||||
|
@ -138,7 +139,8 @@ struct pch_dma {
|
|||
#define dma_writel(pd, name, val) \
|
||||
writel((val), (pd)->membase + PCH_DMA_##name)
|
||||
|
||||
static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
|
||||
static inline
|
||||
struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
|
||||
{
|
||||
return container_of(txd, struct pch_dma_desc, txd);
|
||||
}
|
||||
|
@ -163,13 +165,15 @@ static inline struct device *chan2parent(struct dma_chan *chan)
|
|||
return chan->dev->device.parent;
|
||||
}
|
||||
|
||||
static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
|
||||
static inline
|
||||
struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
|
||||
{
|
||||
return list_first_entry(&pd_chan->active_list,
|
||||
struct pch_dma_desc, desc_node);
|
||||
}
|
||||
|
||||
static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
|
||||
static inline
|
||||
struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
|
||||
{
|
||||
return list_first_entry(&pd_chan->queue,
|
||||
struct pch_dma_desc, desc_node);
|
||||
|
@ -199,16 +203,30 @@ static void pdc_set_dir(struct dma_chan *chan)
|
|||
struct pch_dma *pd = to_pd(chan->device);
|
||||
u32 val;
|
||||
|
||||
val = dma_readl(pd, CTL0);
|
||||
if (chan->chan_id < 8) {
|
||||
val = dma_readl(pd, CTL0);
|
||||
|
||||
if (pd_chan->dir == DMA_TO_DEVICE)
|
||||
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
|
||||
DMA_CTL0_DIR_SHIFT_BITS);
|
||||
else
|
||||
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
|
||||
DMA_CTL0_DIR_SHIFT_BITS));
|
||||
if (pd_chan->dir == DMA_TO_DEVICE)
|
||||
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
|
||||
DMA_CTL0_DIR_SHIFT_BITS);
|
||||
else
|
||||
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
|
||||
DMA_CTL0_DIR_SHIFT_BITS));
|
||||
|
||||
dma_writel(pd, CTL0, val);
|
||||
dma_writel(pd, CTL0, val);
|
||||
} else {
|
||||
int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
|
||||
val = dma_readl(pd, CTL3);
|
||||
|
||||
if (pd_chan->dir == DMA_TO_DEVICE)
|
||||
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
|
||||
DMA_CTL0_DIR_SHIFT_BITS);
|
||||
else
|
||||
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
|
||||
DMA_CTL0_DIR_SHIFT_BITS));
|
||||
|
||||
dma_writel(pd, CTL3, val);
|
||||
}
|
||||
|
||||
dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
|
||||
chan->chan_id, val);
|
||||
|
@ -219,13 +237,26 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
|
|||
struct pch_dma *pd = to_pd(chan->device);
|
||||
u32 val;
|
||||
|
||||
val = dma_readl(pd, CTL0);
|
||||
if (chan->chan_id < 8) {
|
||||
val = dma_readl(pd, CTL0);
|
||||
|
||||
val &= ~(DMA_CTL0_MODE_MASK_BITS <<
|
||||
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
|
||||
val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
|
||||
val &= ~(DMA_CTL0_MODE_MASK_BITS <<
|
||||
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
|
||||
val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
|
||||
|
||||
dma_writel(pd, CTL0, val);
|
||||
dma_writel(pd, CTL0, val);
|
||||
} else {
|
||||
int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
|
||||
|
||||
val = dma_readl(pd, CTL3);
|
||||
|
||||
val &= ~(DMA_CTL0_MODE_MASK_BITS <<
|
||||
(DMA_CTL0_BITS_PER_CH * ch));
|
||||
val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
|
||||
|
||||
dma_writel(pd, CTL3, val);
|
||||
|
||||
}
|
||||
|
||||
dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
|
||||
chan->chan_id, val);
|
||||
|
@ -251,9 +282,6 @@ static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
|
|||
|
||||
static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
|
||||
{
|
||||
struct pch_dma *pd = to_pd(pd_chan->chan.device);
|
||||
u32 val;
|
||||
|
||||
if (!pdc_is_idle(pd_chan)) {
|
||||
dev_err(chan2dev(&pd_chan->chan),
|
||||
"BUG: Attempt to start non-idle channel\n");
|
||||
|
@ -279,10 +307,6 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
|
|||
channel_writel(pd_chan, NEXT, desc->txd.phys);
|
||||
pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
|
||||
}
|
||||
|
||||
val = dma_readl(pd, CTL2);
|
||||
val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id);
|
||||
dma_writel(pd, CTL2, val);
|
||||
}
|
||||
|
||||
static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
|
||||
|
@ -403,7 +427,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
|
|||
{
|
||||
struct pch_dma_desc *desc, *_d;
|
||||
struct pch_dma_desc *ret = NULL;
|
||||
int i;
|
||||
int i = 0;
|
||||
|
||||
spin_lock(&pd_chan->lock);
|
||||
list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
|
||||
|
@ -478,7 +502,6 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
|
|||
spin_unlock_bh(&pd_chan->lock);
|
||||
|
||||
pdc_enable_irq(chan, 1);
|
||||
pdc_set_dir(chan);
|
||||
|
||||
return pd_chan->descs_allocated;
|
||||
}
|
||||
|
@ -561,6 +584,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
|
|||
else
|
||||
return NULL;
|
||||
|
||||
pd_chan->dir = direction;
|
||||
pdc_set_dir(chan);
|
||||
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
desc = pdc_desc_get(pd_chan);
|
||||
|
||||
|
@ -703,6 +729,7 @@ static void pch_dma_save_regs(struct pch_dma *pd)
|
|||
pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
|
||||
pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
|
||||
pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
|
||||
pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
|
||||
|
||||
list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
|
||||
pd_chan = to_pd_chan(chan);
|
||||
|
@ -725,6 +752,7 @@ static void pch_dma_restore_regs(struct pch_dma *pd)
|
|||
dma_writel(pd, CTL0, pd->regs.dma_ctl0);
|
||||
dma_writel(pd, CTL1, pd->regs.dma_ctl1);
|
||||
dma_writel(pd, CTL2, pd->regs.dma_ctl2);
|
||||
dma_writel(pd, CTL3, pd->regs.dma_ctl3);
|
||||
|
||||
list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
|
||||
pd_chan = to_pd_chan(chan);
|
||||
|
@ -850,8 +878,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
|
|||
|
||||
pd_chan->membase = ®s->desc[i];
|
||||
|
||||
pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
|
||||
spin_lock_init(&pd_chan->lock);
|
||||
|
||||
INIT_LIST_HEAD(&pd_chan->active_list);
|
||||
|
@ -929,13 +955,23 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
|
|||
#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
|
||||
#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
|
||||
#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
|
||||
#define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
|
||||
#define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
|
||||
#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
|
||||
#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
|
||||
#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
|
||||
|
||||
static const struct pci_device_id pch_dma_id_table[] = {
|
||||
DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
|
||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
|
||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
|
||||
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
|
||||
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
|
||||
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
|
||||
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
|
||||
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
|
||||
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
|
||||
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
|
||||
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
|
||||
{ 0, },
|
||||
};
|
||||
|
||||
|
|
|
@ -2313,7 +2313,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
|
|||
if (unlikely(!len))
|
||||
return NULL;
|
||||
|
||||
BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
|
||||
|
||||
spin_lock_bh(&ppc440spe_chan->lock);
|
||||
|
||||
|
@ -2354,7 +2354,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
|
|||
if (unlikely(!len))
|
||||
return NULL;
|
||||
|
||||
BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
|
||||
|
||||
spin_lock_bh(&ppc440spe_chan->lock);
|
||||
|
||||
|
@ -2397,7 +2397,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
|
|||
dma_dest, dma_src, src_cnt));
|
||||
if (unlikely(!len))
|
||||
return NULL;
|
||||
BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
|
||||
|
||||
dev_dbg(ppc440spe_chan->device->common.dev,
|
||||
"ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
|
||||
|
@ -2887,7 +2887,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
|
|||
ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
|
||||
dst, src, src_cnt));
|
||||
BUG_ON(!len);
|
||||
BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
|
||||
BUG_ON(!src_cnt);
|
||||
|
||||
if (src_cnt == 1 && dst[1] == src[0]) {
|
||||
|
|
|
@ -1829,7 +1829,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
|
|||
{
|
||||
struct stedma40_platform_data *plat = chan->base->plat_data;
|
||||
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
|
||||
dma_addr_t addr;
|
||||
dma_addr_t addr = 0;
|
||||
|
||||
if (chan->runtime_addr)
|
||||
return chan->runtime_addr;
|
||||
|
@ -2962,4 +2962,4 @@ static int __init stedma40_init(void)
|
|||
{
|
||||
return platform_driver_probe(&d40_driver, d40_probe);
|
||||
}
|
||||
arch_initcall(stedma40_init);
|
||||
subsys_initcall(stedma40_init);
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
* AVR32 systems.)
|
||||
*
|
||||
* Copyright (C) 2007 Atmel Corporation
|
||||
* Copyright (C) 2010-2011 ST Microelectronics
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
|
Loading…
Reference in a new issue