SPARC64: sg chaining support
This updates the sparc64 iommu/pci dma mappers to sg chaining. Acked-by: David S. Miller <davem@davemloft.net> Later updated to newer kernel with unified sparc64 iommu sg handling. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
0912a5db0e
commit
2c941a2040
3 changed files with 45 additions and 28 deletions
|
@ -10,6 +10,7 @@
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
#ifdef CONFIG_PCI
|
#ifdef CONFIG_PCI
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
|
@ -480,7 +481,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
|
||||||
unsigned long iopte_protection)
|
unsigned long iopte_protection)
|
||||||
{
|
{
|
||||||
struct scatterlist *dma_sg = sg;
|
struct scatterlist *dma_sg = sg;
|
||||||
struct scatterlist *sg_end = sg + nelems;
|
struct scatterlist *sg_end = sg_last(sg, nelems);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < nused; i++) {
|
for (i = 0; i < nused; i++) {
|
||||||
|
@ -515,7 +516,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
|
||||||
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
|
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
pteval = iopte_protection | (pteval & IOPTE_PAGE);
|
pteval = iopte_protection | (pteval & IOPTE_PAGE);
|
||||||
|
@ -528,24 +529,24 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
|
||||||
}
|
}
|
||||||
|
|
||||||
pteval = (pteval & IOPTE_PAGE) + len;
|
pteval = (pteval & IOPTE_PAGE) + len;
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
|
|
||||||
/* Skip over any tail mappings we've fully mapped,
|
/* Skip over any tail mappings we've fully mapped,
|
||||||
* adjusting pteval along the way. Stop when we
|
* adjusting pteval along the way. Stop when we
|
||||||
* detect a page crossing event.
|
* detect a page crossing event.
|
||||||
*/
|
*/
|
||||||
while (sg < sg_end &&
|
while (sg != sg_end &&
|
||||||
(pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
|
(pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
|
||||||
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
|
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
|
||||||
((pteval ^
|
((pteval ^
|
||||||
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
|
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
|
||||||
pteval += sg->length;
|
pteval += sg->length;
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
|
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
|
||||||
pteval = ~0UL;
|
pteval = ~0UL;
|
||||||
} while (dma_npages != 0);
|
} while (dma_npages != 0);
|
||||||
dma_sg++;
|
dma_sg = sg_next(dma_sg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -606,7 +607,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
sgtmp = sglist;
|
sgtmp = sglist;
|
||||||
while (used && sgtmp->dma_length) {
|
while (used && sgtmp->dma_length) {
|
||||||
sgtmp->dma_address += dma_base;
|
sgtmp->dma_address += dma_base;
|
||||||
sgtmp++;
|
sgtmp = sg_next(sgtmp);
|
||||||
used--;
|
used--;
|
||||||
}
|
}
|
||||||
used = nelems - used;
|
used = nelems - used;
|
||||||
|
@ -642,6 +643,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
struct strbuf *strbuf;
|
struct strbuf *strbuf;
|
||||||
iopte_t *base;
|
iopte_t *base;
|
||||||
unsigned long flags, ctx, i, npages;
|
unsigned long flags, ctx, i, npages;
|
||||||
|
struct scatterlist *sg, *sgprv;
|
||||||
u32 bus_addr;
|
u32 bus_addr;
|
||||||
|
|
||||||
if (unlikely(direction == DMA_NONE)) {
|
if (unlikely(direction == DMA_NONE)) {
|
||||||
|
@ -654,11 +656,14 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
|
|
||||||
bus_addr = sglist->dma_address & IO_PAGE_MASK;
|
bus_addr = sglist->dma_address & IO_PAGE_MASK;
|
||||||
|
|
||||||
for (i = 1; i < nelems; i++)
|
sgprv = NULL;
|
||||||
if (sglist[i].dma_length == 0)
|
for_each_sg(sglist, sg, nelems, i) {
|
||||||
|
if (sg->dma_length == 0)
|
||||||
break;
|
break;
|
||||||
i--;
|
sgprv = sg;
|
||||||
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
|
}
|
||||||
|
|
||||||
|
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
|
||||||
bus_addr) >> IO_PAGE_SHIFT;
|
bus_addr) >> IO_PAGE_SHIFT;
|
||||||
|
|
||||||
base = iommu->page_table +
|
base = iommu->page_table +
|
||||||
|
@ -730,6 +735,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
|
||||||
struct iommu *iommu;
|
struct iommu *iommu;
|
||||||
struct strbuf *strbuf;
|
struct strbuf *strbuf;
|
||||||
unsigned long flags, ctx, npages, i;
|
unsigned long flags, ctx, npages, i;
|
||||||
|
struct scatterlist *sg, *sgprv;
|
||||||
u32 bus_addr;
|
u32 bus_addr;
|
||||||
|
|
||||||
iommu = dev->archdata.iommu;
|
iommu = dev->archdata.iommu;
|
||||||
|
@ -753,11 +759,14 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
|
||||||
|
|
||||||
/* Step 2: Kick data out of streaming buffers. */
|
/* Step 2: Kick data out of streaming buffers. */
|
||||||
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
|
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
|
||||||
for(i = 1; i < nelems; i++)
|
sgprv = NULL;
|
||||||
if (!sglist[i].dma_length)
|
for_each_sg(sglist, sg, nelems, i) {
|
||||||
|
if (sg->dma_length == 0)
|
||||||
break;
|
break;
|
||||||
i--;
|
sgprv = sg;
|
||||||
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
|
}
|
||||||
|
|
||||||
|
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
|
||||||
- bus_addr) >> IO_PAGE_SHIFT;
|
- bus_addr) >> IO_PAGE_SHIFT;
|
||||||
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
|
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/msi.h>
|
#include <linux/msi.h>
|
||||||
#include <linux/log2.h>
|
#include <linux/log2.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
|
@ -373,7 +374,7 @@ static inline long fill_sg(long entry, struct device *dev,
|
||||||
int nused, int nelems, unsigned long prot)
|
int nused, int nelems, unsigned long prot)
|
||||||
{
|
{
|
||||||
struct scatterlist *dma_sg = sg;
|
struct scatterlist *dma_sg = sg;
|
||||||
struct scatterlist *sg_end = sg + nelems;
|
struct scatterlist *sg_end = sg_last(sg, nelems);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -413,7 +414,7 @@ static inline long fill_sg(long entry, struct device *dev,
|
||||||
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
|
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
pteval = (pteval & IOPTE_PAGE);
|
pteval = (pteval & IOPTE_PAGE);
|
||||||
|
@ -431,24 +432,25 @@ static inline long fill_sg(long entry, struct device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
pteval = (pteval & IOPTE_PAGE) + len;
|
pteval = (pteval & IOPTE_PAGE) + len;
|
||||||
sg++;
|
sg = sg_next(sg);
|
||||||
|
|
||||||
/* Skip over any tail mappings we've fully mapped,
|
/* Skip over any tail mappings we've fully mapped,
|
||||||
* adjusting pteval along the way. Stop when we
|
* adjusting pteval along the way. Stop when we
|
||||||
* detect a page crossing event.
|
* detect a page crossing event.
|
||||||
*/
|
*/
|
||||||
while (sg < sg_end &&
|
while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
|
||||||
(pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
|
|
||||||
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
|
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
|
||||||
((pteval ^
|
((pteval ^
|
||||||
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
|
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
|
||||||
pteval += sg->length;
|
pteval += sg->length;
|
||||||
sg++;
|
if (sg == sg_end)
|
||||||
|
break;
|
||||||
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
|
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
|
||||||
pteval = ~0UL;
|
pteval = ~0UL;
|
||||||
} while (dma_npages != 0);
|
} while (dma_npages != 0);
|
||||||
dma_sg++;
|
dma_sg = sg_next(dma_sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(iommu_batch_end() < 0L))
|
if (unlikely(iommu_batch_end() < 0L))
|
||||||
|
@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
sgtmp = sglist;
|
sgtmp = sglist;
|
||||||
while (used && sgtmp->dma_length) {
|
while (used && sgtmp->dma_length) {
|
||||||
sgtmp->dma_address += dma_base;
|
sgtmp->dma_address += dma_base;
|
||||||
sgtmp++;
|
sgtmp = sg_next(sgtmp);
|
||||||
used--;
|
used--;
|
||||||
}
|
}
|
||||||
used = nelems - used;
|
used = nelems - used;
|
||||||
|
@ -545,6 +547,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
struct pci_pbm_info *pbm;
|
struct pci_pbm_info *pbm;
|
||||||
struct iommu *iommu;
|
struct iommu *iommu;
|
||||||
unsigned long flags, i, npages;
|
unsigned long flags, i, npages;
|
||||||
|
struct scatterlist *sg, *sgprv;
|
||||||
long entry;
|
long entry;
|
||||||
u32 devhandle, bus_addr;
|
u32 devhandle, bus_addr;
|
||||||
|
|
||||||
|
@ -558,12 +561,15 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
devhandle = pbm->devhandle;
|
devhandle = pbm->devhandle;
|
||||||
|
|
||||||
bus_addr = sglist->dma_address & IO_PAGE_MASK;
|
bus_addr = sglist->dma_address & IO_PAGE_MASK;
|
||||||
|
sgprv = NULL;
|
||||||
for (i = 1; i < nelems; i++)
|
for_each_sg(sglist, sg, nelems, i) {
|
||||||
if (sglist[i].dma_length == 0)
|
if (sg->dma_length == 0)
|
||||||
break;
|
break;
|
||||||
i--;
|
|
||||||
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
|
sgprv = sg;
|
||||||
|
}
|
||||||
|
|
||||||
|
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
|
||||||
bus_addr) >> IO_PAGE_SHIFT;
|
bus_addr) >> IO_PAGE_SHIFT;
|
||||||
|
|
||||||
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
|
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
|
||||||
|
|
|
@ -20,4 +20,6 @@ struct scatterlist {
|
||||||
|
|
||||||
#define ISA_DMA_THRESHOLD (~0UL)
|
#define ISA_DMA_THRESHOLD (~0UL)
|
||||||
|
|
||||||
|
#define ARCH_HAS_SG_CHAIN
|
||||||
|
|
||||||
#endif /* !(_SPARC64_SCATTERLIST_H) */
|
#endif /* !(_SPARC64_SCATTERLIST_H) */
|
||||||
|
|
Loading…
Add table
Reference in a new issue