ANDROID: GKI: iommu: Snapshot of vendor changes
Snapshot of IOMMU changes as of commit 79efc458af96. Following files copied verbatim: arch/arm64/mm/dma-mapping.c drivers/iommu/dma-iommu.c drivers/iommu/io-pgtable.c drivers/iommu/iova.c include/linux/iova.h include/linux/dma-iommu.h include/linux/io-pgtable.h include/linux/iommu.h include/trace/events/iommu.h Remainder contain targetted merged content: drivers/iommu/iommu.c include/linux/dma-mapping.h Preserving Signed-off-bys from all the commits that touch these files. Signed-off-by: Charan Teja Reddy <charante@codeaurora.org> Signed-off-by: Liam Mark <lmark@codeaurora.org> Signed-off-by: Mark Salyzyn <salyzyn@google.com> Signed-off-by: Patrick Daly <pdaly@codeaurora.org> Signed-off-by: Prakash Gupta <guptap@codeaurora.org> Signed-off-by: Qingqing Zhou <qqzhou@codeaurora.org> Signed-off-by: Rishabh Bhatnagar <rishabhb@codeaurora.org> Signed-off-by: Shiraz Hashim <shashim@codeaurora.org> Signed-off-by: Sudarshan Rajagopalan <sudaraja@codeaurora.org> Signed-off-by: Swathi Sridhar <swatsrid@codeaurora.org> Signed-off-by: Vijayanand Jitta <vjitta@codeaurora.org> Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org> Bug: 155522481 Signed-off-by: Mark Salyzyn <salyzyn@google.com> Change-Id: I8dcfb6b857547c512c56549085769eee59cabefb [saravanak Deleted some dead code and split out some changes into smaller commits] Signed-off-by: Saravana Kannan <saravanak@google.com>
This commit is contained in:
parent
e2a2eeef90
commit
e24979f0e7
11 changed files with 769 additions and 163 deletions
|
@ -37,6 +37,7 @@
|
|||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/dma-iommu.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/dma-mapping-fast.h>
|
||||
|
||||
static int swiotlb __ro_after_init;
|
||||
|
@ -51,6 +52,18 @@ static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
|
|||
return prot;
|
||||
}
|
||||
|
||||
static bool is_dma_coherent(struct device *dev, unsigned long attrs)
|
||||
{
|
||||
|
||||
if (attrs & DMA_ATTR_FORCE_COHERENT)
|
||||
return true;
|
||||
else if (attrs & DMA_ATTR_FORCE_NON_COHERENT)
|
||||
return false;
|
||||
else if (is_device_dma_coherent(dev))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
static struct gen_pool *atomic_pool __ro_after_init;
|
||||
|
||||
#define NO_KERNEL_MAPPING_DUMMY 0x2222
|
||||
|
@ -144,7 +157,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
|
|||
{
|
||||
struct page *page;
|
||||
void *ptr, *coherent_ptr;
|
||||
bool coherent = is_device_dma_coherent(dev);
|
||||
bool coherent = is_dma_coherent(dev, attrs);
|
||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
@ -160,6 +173,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
|
|||
}
|
||||
|
||||
ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs);
|
||||
|
||||
if (!ptr)
|
||||
goto no_mem;
|
||||
|
||||
|
@ -227,7 +241,7 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
|
|||
dma_addr_t dev_addr;
|
||||
|
||||
dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
|
||||
if (!is_device_dma_coherent(dev) &&
|
||||
if (!is_dma_coherent(dev, attrs) &&
|
||||
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
|
||||
|
||||
|
@ -239,7 +253,7 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
|
|||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (!is_device_dma_coherent(dev) &&
|
||||
if (!is_dma_coherent(dev, attrs) &&
|
||||
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
|
||||
swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
|
||||
|
@ -253,7 +267,7 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
|
|||
int i, ret;
|
||||
|
||||
ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
|
||||
if (!is_device_dma_coherent(dev) &&
|
||||
if (!is_dma_coherent(dev, attrs) &&
|
||||
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
for_each_sg(sgl, sg, ret, i)
|
||||
__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
|
||||
|
@ -270,7 +284,7 @@ static void __swiotlb_unmap_sg_attrs(struct device *dev,
|
|||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
if (!is_device_dma_coherent(dev) &&
|
||||
if (!is_dma_coherent(dev, attrs) &&
|
||||
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
|
||||
|
@ -347,11 +361,11 @@ static int __swiotlb_mmap(struct device *dev,
|
|||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
int ret;
|
||||
int ret = -ENXIO;
|
||||
unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
|
||||
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
|
||||
is_device_dma_coherent(dev));
|
||||
is_dma_coherent(dev, attrs));
|
||||
|
||||
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
@ -531,6 +545,7 @@ static void *__dummy_alloc(struct device *dev, size_t size,
|
|||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
unsigned long attrs)
|
||||
{
|
||||
WARN(1, "dma alloc failure, device may be missing a call to arch_setup_dma_ops");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -645,7 +660,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
dma_addr_t *handle, gfp_t gfp,
|
||||
unsigned long attrs)
|
||||
{
|
||||
bool coherent = is_device_dma_coherent(dev);
|
||||
bool coherent = is_dma_coherent(dev, attrs);
|
||||
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
|
||||
size_t iosize = size;
|
||||
void *addr;
|
||||
|
@ -659,7 +674,8 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
* Some drivers rely on this, and we probably don't want the
|
||||
* possibility of stale kernel data being read by devices anyway.
|
||||
*/
|
||||
gfp |= __GFP_ZERO;
|
||||
if (!(attrs & DMA_ATTR_SKIP_ZEROING))
|
||||
gfp |= __GFP_ZERO;
|
||||
|
||||
if (!gfpflags_allow_blocking(gfp)) {
|
||||
struct page *page;
|
||||
|
@ -751,11 +767,10 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||
__free_from_pool(cpu_addr, size);
|
||||
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
struct page *page = vmalloc_to_page(cpu_addr);
|
||||
|
||||
iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
|
||||
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
|
||||
dma_common_free_remap(cpu_addr, size, VM_USERMAP, false);
|
||||
} else if (is_vmalloc_addr(cpu_addr)){
|
||||
} else if (is_vmalloc_addr(cpu_addr)) {
|
||||
struct vm_struct *area = find_vm_area(cpu_addr);
|
||||
|
||||
if (WARN_ON(!area || !area->pages))
|
||||
|
@ -774,32 +789,31 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
|||
{
|
||||
struct vm_struct *area;
|
||||
int ret;
|
||||
unsigned long pfn = 0;
|
||||
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
|
||||
is_device_dma_coherent(dev));
|
||||
is_dma_coherent(dev, attrs));
|
||||
|
||||
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
||||
if (!is_vmalloc_addr(cpu_addr)) {
|
||||
unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||
return __swiotlb_mmap_pfn(vma, pfn, size);
|
||||
}
|
||||
|
||||
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
/*
|
||||
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
|
||||
* hence in the vmalloc space.
|
||||
*/
|
||||
unsigned long pfn = vmalloc_to_pfn(cpu_addr);
|
||||
return __swiotlb_mmap_pfn(vma, pfn, size);
|
||||
}
|
||||
|
||||
area = find_vm_area(cpu_addr);
|
||||
if (WARN_ON(!area || !area->pages))
|
||||
return -ENXIO;
|
||||
|
||||
return iommu_dma_mmap(area->pages, size, vma);
|
||||
if (area && area->pages)
|
||||
return iommu_dma_mmap(area->pages, size, vma);
|
||||
else if (!is_vmalloc_addr(cpu_addr))
|
||||
pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||
else if (is_vmalloc_addr(cpu_addr))
|
||||
/*
|
||||
* DMA_ATTR_FORCE_CONTIGUOUS and atomic pool allocations are
|
||||
* always remapped, hence in the vmalloc space.
|
||||
*/
|
||||
pfn = vmalloc_to_pfn(cpu_addr);
|
||||
|
||||
if (pfn)
|
||||
return __swiotlb_mmap_pfn(vma, pfn, size);
|
||||
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
|
@ -807,27 +821,24 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|||
size_t size, unsigned long attrs)
|
||||
{
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
struct page *page = NULL;
|
||||
struct vm_struct *area = find_vm_area(cpu_addr);
|
||||
|
||||
if (!is_vmalloc_addr(cpu_addr)) {
|
||||
struct page *page = virt_to_page(cpu_addr);
|
||||
return __swiotlb_get_sgtable_page(sgt, page, size);
|
||||
}
|
||||
|
||||
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
if (area && area->pages)
|
||||
return sg_alloc_table_from_pages(sgt, area->pages, count, 0,
|
||||
size, GFP_KERNEL);
|
||||
else if (!is_vmalloc_addr(cpu_addr))
|
||||
page = virt_to_page(cpu_addr);
|
||||
else if (is_vmalloc_addr(cpu_addr))
|
||||
/*
|
||||
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
|
||||
* hence in the vmalloc space.
|
||||
* DMA_ATTR_FORCE_CONTIGUOUS and atomic pool allocations
|
||||
* are always remapped, hence in the vmalloc space.
|
||||
*/
|
||||
struct page *page = vmalloc_to_page(cpu_addr);
|
||||
page = vmalloc_to_page(cpu_addr);
|
||||
|
||||
if (page)
|
||||
return __swiotlb_get_sgtable_page(sgt, page, size);
|
||||
}
|
||||
|
||||
if (WARN_ON(!area || !area->pages))
|
||||
return -ENXIO;
|
||||
|
||||
return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
|
||||
GFP_KERNEL);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static void __iommu_sync_single_for_cpu(struct device *dev,
|
||||
|
@ -835,11 +846,12 @@ static void __iommu_sync_single_for_cpu(struct device *dev,
|
|||
enum dma_data_direction dir)
|
||||
{
|
||||
phys_addr_t phys;
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
|
||||
if (is_device_dma_coherent(dev))
|
||||
if (!domain || iommu_is_iova_coherent(domain, dev_addr))
|
||||
return;
|
||||
|
||||
phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
|
||||
phys = iommu_iova_to_phys(domain, dev_addr);
|
||||
__dma_unmap_area(phys_to_virt(phys), size, dir);
|
||||
}
|
||||
|
||||
|
@ -848,11 +860,12 @@ static void __iommu_sync_single_for_device(struct device *dev,
|
|||
enum dma_data_direction dir)
|
||||
{
|
||||
phys_addr_t phys;
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
|
||||
if (is_device_dma_coherent(dev))
|
||||
if (!domain || iommu_is_iova_coherent(domain, dev_addr))
|
||||
return;
|
||||
|
||||
phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
|
||||
phys = iommu_iova_to_phys(domain, dev_addr);
|
||||
__dma_map_area(phys_to_virt(phys), size, dir);
|
||||
}
|
||||
|
||||
|
@ -861,7 +874,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
|
|||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
bool coherent = is_device_dma_coherent(dev);
|
||||
bool coherent = is_dma_coherent(dev, attrs);
|
||||
int prot = dma_info_to_prot(dir, coherent, attrs);
|
||||
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
|
||||
|
||||
|
@ -887,9 +900,11 @@ static void __iommu_sync_sg_for_cpu(struct device *dev,
|
|||
enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
dma_addr_t iova = sg_dma_address(sgl);
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
int i;
|
||||
|
||||
if (is_device_dma_coherent(dev))
|
||||
if (!domain || iommu_is_iova_coherent(domain, iova))
|
||||
return;
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
|
@ -901,9 +916,11 @@ static void __iommu_sync_sg_for_device(struct device *dev,
|
|||
enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
dma_addr_t iova = sg_dma_address(sgl);
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
int i;
|
||||
|
||||
if (is_device_dma_coherent(dev))
|
||||
if (!domain || iommu_is_iova_coherent(domain, iova))
|
||||
return;
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
|
@ -914,13 +931,18 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
|
|||
int nelems, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
bool coherent = is_device_dma_coherent(dev);
|
||||
bool coherent = is_dma_coherent(dev, attrs);
|
||||
int ret;
|
||||
|
||||
ret = iommu_dma_map_sg(dev, sgl, nelems,
|
||||
dma_info_to_prot(dir, coherent, attrs));
|
||||
if (!ret)
|
||||
return ret;
|
||||
|
||||
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
|
||||
|
||||
return iommu_dma_map_sg(dev, sgl, nelems,
|
||||
dma_info_to_prot(dir, coherent, attrs));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __iommu_unmap_sg_attrs(struct device *dev,
|
||||
|
@ -958,50 +980,14 @@ static int __init __iommu_dma_init(void)
|
|||
}
|
||||
arch_initcall(__iommu_dma_init);
|
||||
|
||||
static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *ops)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
|
||||
if (!ops)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The IOMMU core code allocates the default DMA domain, which the
|
||||
* underlying IOMMU driver needs to support via the dma-iommu layer.
|
||||
*/
|
||||
domain = iommu_get_domain_for_dev(dev);
|
||||
|
||||
if (!domain)
|
||||
goto out_err;
|
||||
|
||||
if (domain->type == IOMMU_DOMAIN_DMA) {
|
||||
if (iommu_dma_init_domain(domain, dma_base, size, dev))
|
||||
goto out_err;
|
||||
|
||||
dev->dma_ops = &iommu_dma_ops;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
out_err:
|
||||
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
|
||||
dev_name(dev));
|
||||
}
|
||||
|
||||
void arch_teardown_dma_ops(struct device *dev)
|
||||
{
|
||||
dev->dma_ops = NULL;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu)
|
||||
{ }
|
||||
|
||||
#endif /* CONFIG_IOMMU_DMA */
|
||||
|
||||
static void arm_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
|
@ -1013,7 +999,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
|||
}
|
||||
|
||||
dev->archdata.dma_coherent = coherent;
|
||||
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
|
||||
arm_iommu_setup_dma_ops(dev, dma_base, size);
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
if (xen_initial_domain()) {
|
||||
|
|
|
@ -307,6 +307,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
|||
return 0;
|
||||
}
|
||||
|
||||
iovad->end_pfn = end_pfn;
|
||||
init_iova_domain(iovad, 1UL << order, base_pfn);
|
||||
if (!dev)
|
||||
return 0;
|
||||
|
@ -315,6 +316,48 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
|||
}
|
||||
EXPORT_SYMBOL(iommu_dma_init_domain);
|
||||
|
||||
/*
|
||||
* Should be called prior to using dma-apis
|
||||
*/
|
||||
int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
|
||||
u64 size)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
struct iova_domain *iovad;
|
||||
unsigned long pfn_lo, pfn_hi;
|
||||
|
||||
domain = iommu_get_domain_for_dev(dev);
|
||||
if (!domain || !domain->iova_cookie)
|
||||
return -EINVAL;
|
||||
|
||||
iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
|
||||
|
||||
/* iova will be freed automatically by put_iova_domain() */
|
||||
pfn_lo = iova_pfn(iovad, base);
|
||||
pfn_hi = iova_pfn(iovad, base + size - 1);
|
||||
if (!reserve_iova(iovad, pfn_lo, pfn_hi))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Should be called prior to using dma-apis.
|
||||
*/
|
||||
int iommu_dma_enable_best_fit_algo(struct device *dev)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
struct iova_domain *iovad;
|
||||
|
||||
domain = iommu_get_domain_for_dev(dev);
|
||||
if (!domain || !domain->iova_cookie)
|
||||
return -EINVAL;
|
||||
|
||||
iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
|
||||
iovad->best_fit = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
|
||||
* page flags.
|
||||
|
@ -332,6 +375,15 @@ int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
|
|||
if (attrs & DMA_ATTR_PRIVILEGED)
|
||||
prot |= IOMMU_PRIV;
|
||||
|
||||
if (!(attrs & DMA_ATTR_EXEC_MAPPING))
|
||||
prot |= IOMMU_NOEXEC;
|
||||
|
||||
if (attrs & DMA_ATTR_IOMMU_USE_UPSTREAM_HINT)
|
||||
prot |= IOMMU_USE_UPSTREAM_HINT;
|
||||
|
||||
if (attrs & DMA_ATTR_IOMMU_USE_LLC_NWA)
|
||||
prot |= IOMMU_USE_LLC_NWA;
|
||||
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
return prot | IOMMU_READ | IOMMU_WRITE;
|
||||
|
@ -350,6 +402,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
|
|||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
unsigned long shift, iova_len, iova = 0;
|
||||
dma_addr_t limit;
|
||||
|
||||
if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
|
||||
cookie->msi_iova += size;
|
||||
|
@ -373,16 +426,27 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
|
|||
if (domain->geometry.force_aperture)
|
||||
dma_limit = min(dma_limit, domain->geometry.aperture_end);
|
||||
|
||||
/*
|
||||
* Ensure iova is within range specified in iommu_dma_init_domain().
|
||||
* This also prevents unnecessary work iterating through the entire
|
||||
* rb_tree.
|
||||
*/
|
||||
limit = min_t(dma_addr_t, DMA_BIT_MASK(32) >> shift,
|
||||
iovad->end_pfn);
|
||||
|
||||
/* Try to get PCI devices a SAC address */
|
||||
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
|
||||
iova = alloc_iova_fast(iovad, iova_len,
|
||||
DMA_BIT_MASK(32) >> shift, false);
|
||||
iova = alloc_iova_fast(iovad, iova_len, limit, false);
|
||||
|
||||
if (!iova)
|
||||
iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
|
||||
true);
|
||||
if (!iova) {
|
||||
limit = min_t(dma_addr_t, dma_limit >> shift,
|
||||
iovad->end_pfn);
|
||||
|
||||
iova = alloc_iova_fast(iovad, iova_len, limit, true);
|
||||
}
|
||||
|
||||
return (dma_addr_t)iova << shift;
|
||||
|
||||
}
|
||||
|
||||
static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
|
||||
|
@ -453,8 +517,9 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count,
|
|||
unsigned int order = __fls(order_mask);
|
||||
|
||||
order_size = 1U << order;
|
||||
page = alloc_pages((order_mask - order_size) ?
|
||||
gfp | __GFP_NORETRY : gfp, order);
|
||||
page = alloc_pages(order ?
|
||||
(gfp | __GFP_NORETRY) &
|
||||
~__GFP_RECLAIM : gfp, order);
|
||||
if (!page)
|
||||
continue;
|
||||
if (!order)
|
||||
|
@ -648,7 +713,7 @@ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
|
|||
* avoid individually crossing any boundaries, so we merely need to check a
|
||||
* segment's start address to avoid concatenating across one.
|
||||
*/
|
||||
static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
int iommu_dma_finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
struct scatterlist *s, *cur = sg;
|
||||
|
@ -701,7 +766,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
* If mapping failed, then just restore the original list,
|
||||
* but making sure the DMA fields are invalidated.
|
||||
*/
|
||||
static void __invalidate_sg(struct scatterlist *sg, int nents)
|
||||
void iommu_dma_invalidate_sg(struct scatterlist *sg, int nents)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
@ -723,14 +788,10 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
|
|||
* impedance-matching, to be able to hand off a suitably-aligned list,
|
||||
* but still preserve the original offsets and sizes for the caller.
|
||||
*/
|
||||
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int prot)
|
||||
size_t iommu_dma_prepare_map_sg(struct device *dev, struct iova_domain *iovad,
|
||||
struct scatterlist *sg, int nents)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
struct scatterlist *s, *prev = NULL;
|
||||
dma_addr_t iova;
|
||||
size_t iova_len = 0;
|
||||
unsigned long mask = dma_get_seg_boundary(dev);
|
||||
int i;
|
||||
|
@ -774,6 +835,26 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
prev = s;
|
||||
}
|
||||
|
||||
return iova_len;
|
||||
}
|
||||
|
||||
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int prot)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
struct iommu_dma_cookie *cookie;
|
||||
struct iova_domain *iovad;
|
||||
dma_addr_t iova;
|
||||
size_t iova_len;
|
||||
|
||||
domain = iommu_get_domain_for_dev(dev);
|
||||
if (!domain)
|
||||
return 0;
|
||||
cookie = domain->iova_cookie;
|
||||
iovad = &cookie->iovad;
|
||||
|
||||
iova_len = iommu_dma_prepare_map_sg(dev, iovad, sg, nents);
|
||||
|
||||
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
|
||||
if (!iova)
|
||||
goto out_restore_sg;
|
||||
|
@ -785,12 +866,12 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
|
||||
goto out_free_iova;
|
||||
|
||||
return __finalise_sg(dev, sg, nents, iova);
|
||||
return iommu_dma_finalise_sg(dev, sg, nents, iova);
|
||||
|
||||
out_free_iova:
|
||||
iommu_dma_free_iova(cookie, iova, iova_len);
|
||||
out_restore_sg:
|
||||
__invalidate_sg(sg, nents);
|
||||
iommu_dma_invalidate_sg(sg, nents);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,10 +18,16 @@
|
|||
* Author: Will Deacon <will.deacon@arm.com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "io-pgtable: " fmt
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/io-pgtable.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
static const struct io_pgtable_init_fns *
|
||||
io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
|
||||
|
@ -39,6 +45,8 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
|
|||
#endif
|
||||
};
|
||||
|
||||
static struct dentry *io_pgtable_top;
|
||||
|
||||
struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
|
||||
struct io_pgtable_cfg *cfg,
|
||||
void *cookie)
|
||||
|
@ -81,3 +89,51 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
|
|||
io_pgtable_init_table[iop->fmt]->free(iop);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(free_io_pgtable_ops);
|
||||
|
||||
static atomic_t pages_allocated;
|
||||
|
||||
void *io_pgtable_alloc_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
|
||||
size_t size, gfp_t gfp_mask)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
if (cfg->tlb->alloc_pages_exact)
|
||||
ret = cfg->tlb->alloc_pages_exact(cookie, size, gfp_mask);
|
||||
else
|
||||
ret = alloc_pages_exact(size, gfp_mask);
|
||||
|
||||
if (likely(ret))
|
||||
atomic_add(1 << get_order(size), &pages_allocated);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void io_pgtable_free_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
|
||||
void *virt, size_t size)
|
||||
{
|
||||
if (cfg->tlb->free_pages_exact)
|
||||
cfg->tlb->free_pages_exact(cookie, virt, size);
|
||||
else
|
||||
free_pages_exact(virt, size);
|
||||
|
||||
atomic_sub(1 << get_order(size), &pages_allocated);
|
||||
}
|
||||
|
||||
static int __init io_pgtable_init(void)
|
||||
{
|
||||
static const char io_pgtable_str[] __initconst = "io-pgtable";
|
||||
static const char pages_str[] __initconst = "pages";
|
||||
|
||||
io_pgtable_top = debugfs_create_dir(io_pgtable_str, iommu_debugfs_top);
|
||||
debugfs_create_atomic_t(pages_str, 0600, io_pgtable_top,
|
||||
&pages_allocated);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit io_pgtable_exit(void)
|
||||
{
|
||||
debugfs_remove_recursive(io_pgtable_top);
|
||||
}
|
||||
|
||||
module_init(io_pgtable_init);
|
||||
module_exit(io_pgtable_exit);
|
||||
|
|
|
@ -1210,7 +1210,6 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
|
|||
if (err)
|
||||
goto out_err;
|
||||
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
|
@ -1309,6 +1308,8 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
|
|||
domain->type = type;
|
||||
/* Assume all sizes by default; the driver may override this later */
|
||||
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
|
||||
domain->is_debug_domain = false;
|
||||
memset(domain->name, 0, IOMMU_DOMAIN_NAME_LEN);
|
||||
|
||||
return domain;
|
||||
}
|
||||
|
@ -1337,8 +1338,14 @@ static int __iommu_attach_device(struct iommu_domain *domain,
|
|||
return -ENODEV;
|
||||
|
||||
ret = domain->ops->attach_dev(domain, dev);
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
trace_attach_device_to_domain(dev);
|
||||
|
||||
if (!strnlen(domain->name, IOMMU_DOMAIN_NAME_LEN)) {
|
||||
strlcpy(domain->name, dev_name(dev),
|
||||
IOMMU_DOMAIN_NAME_LEN);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1445,9 +1452,6 @@ static int __iommu_attach_group(struct iommu_domain *domain,
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (group->default_domain && group->domain != group->default_domain)
|
||||
return -EBUSY;
|
||||
|
||||
ret = __iommu_group_for_each_dev(group, domain,
|
||||
iommu_group_do_attach_device);
|
||||
if (ret == 0)
|
||||
|
@ -1477,28 +1481,18 @@ static int iommu_group_do_detach_device(struct device *dev, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Although upstream implements detaching the default_domain as a noop,
|
||||
* the "SID switch" secure usecase require complete removal of SIDS/SMRS
|
||||
* from HLOS iommu registers.
|
||||
*/
|
||||
static void __iommu_detach_group(struct iommu_domain *domain,
|
||||
struct iommu_group *group)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!group->default_domain) {
|
||||
__iommu_group_for_each_dev(group, domain,
|
||||
__iommu_group_for_each_dev(group, domain,
|
||||
iommu_group_do_detach_device);
|
||||
group->domain = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
if (group->domain == group->default_domain)
|
||||
return;
|
||||
|
||||
/* Detach by re-attaching to the default domain */
|
||||
ret = __iommu_group_for_each_dev(group, group->default_domain,
|
||||
iommu_group_do_attach_device);
|
||||
if (ret != 0)
|
||||
WARN_ON(1);
|
||||
else
|
||||
group->domain = group->default_domain;
|
||||
group->domain = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
|
||||
|
@ -1518,8 +1512,34 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
|
||||
|
||||
static size_t iommu_pgsize(struct iommu_domain *domain,
|
||||
unsigned long addr_merge, size_t size)
|
||||
phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain,
|
||||
dma_addr_t iova)
|
||||
{
|
||||
if (unlikely(domain->ops->iova_to_phys_hard == NULL))
|
||||
return 0;
|
||||
|
||||
return domain->ops->iova_to_phys_hard(domain, iova);
|
||||
}
|
||||
|
||||
uint64_t iommu_iova_to_pte(struct iommu_domain *domain,
|
||||
dma_addr_t iova)
|
||||
{
|
||||
if (unlikely(domain->ops->iova_to_pte == NULL))
|
||||
return 0;
|
||||
|
||||
return domain->ops->iova_to_pte(domain, iova);
|
||||
}
|
||||
|
||||
bool iommu_is_iova_coherent(struct iommu_domain *domain, dma_addr_t iova)
|
||||
{
|
||||
if (unlikely(domain->ops->is_iova_coherent == NULL))
|
||||
return 0;
|
||||
|
||||
return domain->ops->is_iova_coherent(domain, iova);
|
||||
}
|
||||
|
||||
size_t iommu_pgsize(unsigned long pgsize_bitmap,
|
||||
unsigned long addr_merge, size_t size)
|
||||
{
|
||||
unsigned int pgsize_idx;
|
||||
size_t pgsize;
|
||||
|
@ -1538,10 +1558,14 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
|
|||
pgsize = (1UL << (pgsize_idx + 1)) - 1;
|
||||
|
||||
/* throw away page sizes not supported by the hardware */
|
||||
pgsize &= domain->pgsize_bitmap;
|
||||
pgsize &= pgsize_bitmap;
|
||||
|
||||
/* make sure we're still sane */
|
||||
BUG_ON(!pgsize);
|
||||
if (!pgsize) {
|
||||
pr_err("invalid pgsize/addr/size! 0x%lx 0x%lx 0x%zx\n",
|
||||
pgsize_bitmap, addr_merge, size);
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* pick the biggest page */
|
||||
pgsize_idx = __fls(pgsize);
|
||||
|
@ -1583,7 +1607,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||
pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
|
||||
|
||||
while (size) {
|
||||
size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
|
||||
size_t pgsize = iommu_pgsize(domain->pgsize_bitmap,
|
||||
iova | paddr, size);
|
||||
|
||||
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
|
||||
iova, &paddr, pgsize);
|
||||
|
@ -1601,7 +1626,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||
if (ret)
|
||||
iommu_unmap(domain, orig_iova, orig_size - size);
|
||||
else
|
||||
trace_map(orig_iova, orig_paddr, orig_size);
|
||||
trace_map(domain, orig_iova, orig_paddr, orig_size, prot);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1644,14 +1669,14 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
|
|||
* or we hit an area that isn't mapped.
|
||||
*/
|
||||
while (unmapped < size) {
|
||||
size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
|
||||
size_t left = size - unmapped;
|
||||
|
||||
unmapped_page = ops->unmap(domain, iova, pgsize);
|
||||
unmapped_page = ops->unmap(domain, iova, left);
|
||||
if (!unmapped_page)
|
||||
break;
|
||||
|
||||
if (sync && ops->iotlb_range_add)
|
||||
ops->iotlb_range_add(domain, iova, pgsize);
|
||||
ops->iotlb_range_add(domain, iova, left);
|
||||
|
||||
pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
|
||||
iova, unmapped_page);
|
||||
|
@ -1663,7 +1688,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
|
|||
if (sync && ops->iotlb_sync)
|
||||
ops->iotlb_sync(domain);
|
||||
|
||||
trace_unmap(orig_iova, size, unmapped);
|
||||
trace_unmap(domain, orig_iova, size, unmapped);
|
||||
return unmapped;
|
||||
}
|
||||
|
||||
|
@ -1681,7 +1706,19 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
|
||||
|
||||
size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t iommu_map_sg(struct iommu_domain *domain,
|
||||
unsigned long iova, struct scatterlist *sg,
|
||||
unsigned int nents, int prot)
|
||||
{
|
||||
size_t mapped;
|
||||
|
||||
mapped = domain->ops->map_sg(domain, iova, sg, nents, prot);
|
||||
trace_map_sg(domain, iova, mapped, prot);
|
||||
return mapped;
|
||||
}
|
||||
EXPORT_SYMBOL(iommu_map_sg);
|
||||
|
||||
size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents, int prot)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
|
@ -1722,7 +1759,7 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
|||
return 0;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_map_sg);
|
||||
EXPORT_SYMBOL_GPL(default_iommu_map_sg);
|
||||
|
||||
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
||||
phys_addr_t paddr, u64 size, int prot)
|
||||
|
@ -1786,6 +1823,9 @@ int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(report_iommu_fault);
|
||||
|
||||
struct dentry *iommu_debugfs_top;
|
||||
EXPORT_SYMBOL_GPL(iommu_debugfs_top);
|
||||
|
||||
static int __init iommu_init(void)
|
||||
{
|
||||
iommu_group_kset = kset_create_and_add("iommu_groups",
|
||||
|
@ -1879,6 +1919,21 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list)
|
|||
ops->put_resv_regions(dev, list);
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_trigger_fault() - trigger an IOMMU fault
|
||||
* @domain: iommu domain
|
||||
*
|
||||
* Triggers a fault on the device to which this domain is attached.
|
||||
*
|
||||
* This function should only be used for debugging purposes, for obvious
|
||||
* reasons.
|
||||
*/
|
||||
void iommu_trigger_fault(struct iommu_domain *domain, unsigned long flags)
|
||||
{
|
||||
if (domain->ops->trigger_fault)
|
||||
domain->ops->trigger_fault(domain, flags);
|
||||
}
|
||||
|
||||
struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
|
||||
size_t length, int prot,
|
||||
enum iommu_resv_type type)
|
||||
|
|
|
@ -61,6 +61,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
|||
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
|
||||
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
|
||||
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
|
||||
iovad->best_fit = false;
|
||||
init_iova_rcaches(iovad);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(init_iova_domain);
|
||||
|
@ -186,6 +187,24 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova,
|
|||
rb_insert_color(&iova->node, root);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_DMA_IOMMU_ALIGNMENT
|
||||
static unsigned long limit_align(struct iova_domain *iovad,
|
||||
unsigned long shift)
|
||||
{
|
||||
unsigned long max;
|
||||
|
||||
max = CONFIG_ARM64_DMA_IOMMU_ALIGNMENT + PAGE_SHIFT
|
||||
- iova_shift(iovad);
|
||||
return min(shift, max);
|
||||
}
|
||||
#else
|
||||
static unsigned long limit_align(struct iova_domain *iovad,
|
||||
unsigned long shift)
|
||||
{
|
||||
return shift;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
|
||||
unsigned long size, unsigned long limit_pfn,
|
||||
struct iova *new, bool size_aligned)
|
||||
|
@ -197,7 +216,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
|
|||
unsigned long align_mask = ~0UL;
|
||||
|
||||
if (size_aligned)
|
||||
align_mask <<= fls_long(size - 1);
|
||||
align_mask <<= limit_align(iovad, fls_long(size - 1));
|
||||
|
||||
/* Walk the tree backwards */
|
||||
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
||||
|
@ -230,6 +249,69 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __alloc_and_insert_iova_best_fit(struct iova_domain *iovad,
|
||||
unsigned long size, unsigned long limit_pfn,
|
||||
struct iova *new, bool size_aligned)
|
||||
{
|
||||
struct rb_node *curr, *prev;
|
||||
struct iova *curr_iova, *prev_iova;
|
||||
unsigned long flags;
|
||||
unsigned long align_mask = ~0UL;
|
||||
struct rb_node *candidate_rb_parent;
|
||||
unsigned long new_pfn, candidate_pfn = ~0UL;
|
||||
unsigned long gap, candidate_gap = ~0UL;
|
||||
|
||||
if (size_aligned)
|
||||
align_mask <<= limit_align(iovad, fls_long(size - 1));
|
||||
|
||||
/* Walk the tree backwards */
|
||||
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
||||
curr = &iovad->anchor.node;
|
||||
prev = rb_prev(curr);
|
||||
for (; prev; curr = prev, prev = rb_prev(curr)) {
|
||||
curr_iova = rb_entry(curr, struct iova, node);
|
||||
prev_iova = rb_entry(prev, struct iova, node);
|
||||
|
||||
limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
|
||||
new_pfn = (limit_pfn - size) & align_mask;
|
||||
gap = curr_iova->pfn_lo - prev_iova->pfn_hi - 1;
|
||||
if ((limit_pfn >= size) && (new_pfn > prev_iova->pfn_hi)
|
||||
&& (gap < candidate_gap)) {
|
||||
candidate_gap = gap;
|
||||
candidate_pfn = new_pfn;
|
||||
candidate_rb_parent = curr;
|
||||
if (gap == size)
|
||||
goto insert;
|
||||
}
|
||||
}
|
||||
|
||||
curr_iova = rb_entry(curr, struct iova, node);
|
||||
limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
|
||||
new_pfn = (limit_pfn - size) & align_mask;
|
||||
gap = curr_iova->pfn_lo - iovad->start_pfn;
|
||||
if (limit_pfn >= size && new_pfn >= iovad->start_pfn &&
|
||||
gap < candidate_gap) {
|
||||
candidate_gap = gap;
|
||||
candidate_pfn = new_pfn;
|
||||
candidate_rb_parent = curr;
|
||||
}
|
||||
|
||||
insert:
|
||||
if (candidate_pfn == ~0UL) {
|
||||
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* pfn_lo will point to size aligned address if size_aligned is set */
|
||||
new->pfn_lo = candidate_pfn;
|
||||
new->pfn_hi = new->pfn_lo + size - 1;
|
||||
|
||||
/* If we have 'prev', it's a valid place to start the insertion. */
|
||||
iova_insert_rbtree(&iovad->rbroot, new, candidate_rb_parent);
|
||||
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kmem_cache *iova_cache;
|
||||
static unsigned int iova_cache_users;
|
||||
static DEFINE_MUTEX(iova_cache_mutex);
|
||||
|
@ -305,8 +387,13 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
|
|||
if (!new_iova)
|
||||
return NULL;
|
||||
|
||||
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
|
||||
new_iova, size_aligned);
|
||||
if (iovad->best_fit) {
|
||||
ret = __alloc_and_insert_iova_best_fit(iovad, size,
|
||||
limit_pfn + 1, new_iova, size_aligned);
|
||||
} else {
|
||||
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
|
||||
new_iova, size_aligned);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
free_iova_mem(new_iova);
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
#include <linux/iommu.h>
|
||||
#include <linux/msi.h>
|
||||
|
||||
struct iova_domain;
|
||||
|
||||
int iommu_dma_init(void);
|
||||
|
||||
/* Domain management interface for IOMMU drivers */
|
||||
|
@ -56,6 +58,11 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
|||
unsigned long offset, size_t size, int prot);
|
||||
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int prot);
|
||||
size_t iommu_dma_prepare_map_sg(struct device *dev, struct iova_domain *iovad,
|
||||
struct scatterlist *sg, int nents);
|
||||
int iommu_dma_finalise_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, dma_addr_t dma_addr);
|
||||
void iommu_dma_invalidate_sg(struct scatterlist *sg, int nents);
|
||||
|
||||
/*
|
||||
* Arch code with no special attribute handling may use these
|
||||
|
@ -75,6 +82,11 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
|||
void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
|
||||
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
|
||||
|
||||
int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
|
||||
u64 size);
|
||||
|
||||
int iommu_dma_enable_best_fit_algo(struct device *dev);
|
||||
|
||||
#else
|
||||
|
||||
struct iommu_domain;
|
||||
|
@ -108,6 +120,17 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he
|
|||
{
|
||||
}
|
||||
|
||||
static inline int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
|
||||
u64 size)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_dma_enable_best_fit_algo(struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IOMMU_DMA */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __DMA_IOMMU_H */
|
||||
|
|
|
@ -70,10 +70,57 @@
|
|||
*/
|
||||
#define DMA_ATTR_PRIVILEGED (1UL << 9)
|
||||
|
||||
/*
|
||||
* DMA_ATTR_STRONGLY_ORDERED: Specifies that accesses to the mapping must
|
||||
* not be buffered, reordered, merged with other accesses, or unaligned.
|
||||
* No speculative access may occur in this mapping.
|
||||
*/
|
||||
#define DMA_ATTR_STRONGLY_ORDERED (1UL << 10)
|
||||
/*
|
||||
* DMA_ATTR_SKIP_ZEROING: Do not zero mapping.
|
||||
*/
|
||||
#define DMA_ATTR_SKIP_ZEROING (1UL << 11)
|
||||
/*
|
||||
* DMA_ATTR_NO_DELAYED_UNMAP: Used by msm specific lazy mapping to indicate
|
||||
* that the mapping can be freed on unmap, rather than when the ion_buffer
|
||||
* is freed.
|
||||
*/
|
||||
#define DMA_ATTR_NO_DELAYED_UNMAP (1UL << 12)
|
||||
/*
|
||||
* DMA_ATTR_EXEC_MAPPING: The mapping has executable permissions.
|
||||
*/
|
||||
#define DMA_ATTR_EXEC_MAPPING (1UL << 13)
|
||||
/*
|
||||
* DMA_ATTR_IOMMU_USE_UPSTREAM_HINT: Normally an smmu will override any bus
|
||||
* attributes (i.e cacheablilty) provided by the client device. Some hardware
|
||||
* may be designed to use the original attributes instead.
|
||||
*/
|
||||
#define DMA_ATTR_IOMMU_USE_UPSTREAM_HINT (1UL << 14)
|
||||
/*
|
||||
* When passed to a DMA map call the DMA_ATTR_FORCE_COHERENT DMA
|
||||
* attribute can be used to force a buffer to be mapped as IO coherent.
|
||||
*/
|
||||
#define DMA_ATTR_FORCE_COHERENT (1UL << 15)
|
||||
/*
|
||||
* When passed to a DMA map call the DMA_ATTR_FORCE_NON_COHERENT DMA
|
||||
* attribute can be used to force a buffer to not be mapped as IO
|
||||
* coherent.
|
||||
*/
|
||||
#define DMA_ATTR_FORCE_NON_COHERENT (1UL << 16)
|
||||
/*
|
||||
* DMA_ATTR_DELAYED_UNMAP: Used by ION, it will ensure that mappings are not
|
||||
* removed on unmap but instead are removed when the ion_buffer is freed.
|
||||
*/
|
||||
#define DMA_ATTR_DELAYED_UNMAP (1UL << 17)
|
||||
|
||||
/*
|
||||
* DMA_ATTR_IOMMU_USE_LLC_NWA: Overrides the bus attributes to use the System
|
||||
* Cache(LLC) with allocation policy as Inner Non-Cacheable, Outer Cacheable:
|
||||
* Write-Back, Read-Allocate, No Write-Allocate policy.
|
||||
*/
|
||||
#define DMA_ATTR_IOMMU_USE_LLC_NWA (1UL << 18)
|
||||
|
||||
#define DMA_ERROR_CODE (~(dma_addr_t)0)
|
||||
|
||||
/*
|
||||
* A dma_addr_t can hold any valid DMA or bus address for the platform.
|
||||
|
@ -610,6 +657,11 @@ static inline int dma_supported(struct device *dev, u64 mask)
|
|||
#ifndef HAVE_ARCH_DMA_SET_MASK
|
||||
static inline int dma_set_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (ops->set_dma_mask)
|
||||
return ops->set_dma_mask(dev, mask);
|
||||
|
||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
#define __IO_PGTABLE_H
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
/*
|
||||
* Public API for use by IOMMU drivers
|
||||
*/
|
||||
|
@ -24,6 +26,10 @@ enum io_pgtable_fmt {
|
|||
* @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
|
||||
* any corresponding page table updates are visible to the
|
||||
* IOMMU.
|
||||
* @alloc_pages_exact: Allocate page table memory (optional, defaults to
|
||||
* alloc_pages_exact)
|
||||
* @free_pages_exact: Free page table memory (optional, defaults to
|
||||
* free_pages_exact)
|
||||
*
|
||||
* Note that these can all be called in atomic context and must therefore
|
||||
* not block.
|
||||
|
@ -33,6 +39,8 @@ struct iommu_gather_ops {
|
|||
void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
|
||||
bool leaf, void *cookie);
|
||||
void (*tlb_sync)(void *cookie);
|
||||
void *(*alloc_pages_exact)(void *cookie, size_t size, gfp_t gfp_mask);
|
||||
void (*free_pages_exact)(void *cookie, void *virt, size_t size);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -68,22 +76,44 @@ struct io_pgtable_cfg {
|
|||
* when the SoC is in "4GB mode" and they can only access the high
|
||||
* remap of DRAM (0x1_00000000 to 0x1_ffffffff).
|
||||
*
|
||||
|
||||
* IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever
|
||||
* be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a
|
||||
* software-emulated IOMMU), such that pagetable updates need not
|
||||
* be treated as explicit DMA data.
|
||||
*
|
||||
|
||||
* IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE:
|
||||
* Having page tables which are non coherent, but cached in a
|
||||
* system cache requires SH=Non-Shareable. This applies to the
|
||||
* qsmmuv500 model. For data buffers SH=Non-Shareable is not
|
||||
* required.
|
||||
|
||||
* IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT: Override the attributes
|
||||
* set in TCR for the page table walker. Use attributes specified
|
||||
* by the upstream hw instead.
|
||||
*
|
||||
* IO_PGTABLE_QUIRK_QCOM_USE_LLC_NWA: Override the attributes
|
||||
* set in TCR for the page table walker with Write-Back,
|
||||
* no Write-Allocate cacheable encoding.
|
||||
*
|
||||
*/
|
||||
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
|
||||
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
|
||||
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
|
||||
#define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
|
||||
#define IO_PGTABLE_QUIRK_NO_DMA BIT(4)
|
||||
#define IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE BIT(5)
|
||||
#define IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT BIT(6)
|
||||
#define IO_PGTABLE_QUIRK_QCOM_USE_LLC_NWA BIT(7)
|
||||
unsigned long quirks;
|
||||
unsigned long pgsize_bitmap;
|
||||
unsigned int ias;
|
||||
unsigned int oas;
|
||||
const struct iommu_gather_ops *tlb;
|
||||
struct device *iommu_dev;
|
||||
dma_addr_t iova_base;
|
||||
dma_addr_t iova_end;
|
||||
|
||||
/* Low-level data specific to the table format */
|
||||
union {
|
||||
|
@ -117,9 +147,15 @@ struct io_pgtable_cfg {
|
|||
/**
|
||||
* struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
|
||||
*
|
||||
* @map: Map a physically contiguous memory region.
|
||||
* @unmap: Unmap a physically contiguous memory region.
|
||||
* @iova_to_phys: Translate iova to physical address.
|
||||
* @map: Map a physically contiguous memory region.
|
||||
* @map_sg: Map a scatterlist. Returns the number of bytes mapped,
|
||||
* or -ve val on failure. The size parameter contains the
|
||||
* size of the partial mapping in case of failure.
|
||||
* @unmap: Unmap a physically contiguous memory region.
|
||||
* @iova_to_phys: Translate iova to physical address.
|
||||
* @is_iova_coherent: Checks coherency of given IOVA. Returns True if coherent
|
||||
* and False if non-coherent.
|
||||
* @iova_to_pte: Translate iova to Page Table Entry (PTE).
|
||||
*
|
||||
* These functions map directly onto the iommu_ops member functions with
|
||||
* the same names.
|
||||
|
@ -129,8 +165,16 @@ struct io_pgtable_ops {
|
|||
phys_addr_t paddr, size_t size, int prot);
|
||||
size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
|
||||
size_t size);
|
||||
int (*map_sg)(struct io_pgtable_ops *ops, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents,
|
||||
int prot, size_t *size);
|
||||
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
|
||||
unsigned long iova);
|
||||
bool (*is_iova_coherent)(struct io_pgtable_ops *ops,
|
||||
unsigned long iova);
|
||||
uint64_t (*iova_to_pte)(struct io_pgtable_ops *ops,
|
||||
unsigned long iova);
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -181,17 +225,23 @@ struct io_pgtable {
|
|||
|
||||
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
|
||||
{
|
||||
if (!iop->cfg.tlb)
|
||||
return;
|
||||
iop->cfg.tlb->tlb_flush_all(iop->cookie);
|
||||
}
|
||||
|
||||
static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
|
||||
unsigned long iova, size_t size, size_t granule, bool leaf)
|
||||
{
|
||||
if (!iop->cfg.tlb)
|
||||
return;
|
||||
iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
|
||||
}
|
||||
|
||||
static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
|
||||
{
|
||||
if (!iop->cfg.tlb)
|
||||
return;
|
||||
iop->cfg.tlb->tlb_sync(iop->cookie);
|
||||
}
|
||||
|
||||
|
@ -213,5 +263,30 @@ extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
|
|||
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
|
||||
extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
|
||||
extern struct io_pgtable_init_fns io_pgtable_av8l_fast_init_fns;
|
||||
extern struct io_pgtable_init_fns io_pgtable_arm_msm_secure_init_fns;
|
||||
|
||||
/**
|
||||
* io_pgtable_alloc_pages_exact:
|
||||
* allocate an exact number of physically-contiguous pages.
|
||||
* @size: the number of bytes to allocate
|
||||
* @gfp_mask: GFP flags for the allocation
|
||||
*
|
||||
* Like alloc_pages_exact(), but with some additional accounting for debug
|
||||
* purposes.
|
||||
*/
|
||||
void *io_pgtable_alloc_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
|
||||
size_t size, gfp_t gfp_mask);
|
||||
|
||||
/**
|
||||
* io_pgtable_free_pages_exact:
|
||||
* release memory allocated via io_pgtable_alloc_pages_exact()
|
||||
* @virt: the value returned by alloc_pages_exact.
|
||||
* @size: size of allocation, same value as passed to alloc_pages_exact().
|
||||
*
|
||||
* Like free_pages_exact(), but with some additional accounting for debug
|
||||
* purposes.
|
||||
*/
|
||||
void io_pgtable_free_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
|
||||
void *virt, size_t size);
|
||||
|
||||
#endif /* __IO_PGTABLE_H */
|
||||
|
|
|
@ -41,6 +41,11 @@
|
|||
* if the IOMMU page table format is equivalent.
|
||||
*/
|
||||
#define IOMMU_PRIV (1 << 5)
|
||||
/* Use upstream device's bus attribute */
|
||||
#define IOMMU_USE_UPSTREAM_HINT (1 << 6)
|
||||
|
||||
/* Use upstream device's bus attribute with no write-allocate cache policy */
|
||||
#define IOMMU_USE_LLC_NWA (1 << 7)
|
||||
|
||||
struct iommu_ops;
|
||||
struct iommu_group;
|
||||
|
@ -50,8 +55,12 @@ struct iommu_domain;
|
|||
struct notifier_block;
|
||||
|
||||
/* iommu fault flags */
|
||||
#define IOMMU_FAULT_READ 0x0
|
||||
#define IOMMU_FAULT_WRITE 0x1
|
||||
#define IOMMU_FAULT_READ (1 << 0)
|
||||
#define IOMMU_FAULT_WRITE (1 << 1)
|
||||
#define IOMMU_FAULT_TRANSLATION (1 << 2)
|
||||
#define IOMMU_FAULT_PERMISSION (1 << 3)
|
||||
#define IOMMU_FAULT_EXTERNAL (1 << 4)
|
||||
#define IOMMU_FAULT_TRANSACTION_STALLED (1 << 5)
|
||||
|
||||
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
|
||||
struct device *, unsigned long, int, void *);
|
||||
|
@ -62,6 +71,10 @@ struct iommu_domain_geometry {
|
|||
bool force_aperture; /* DMA only allowed in mappable range? */
|
||||
};
|
||||
|
||||
struct iommu_pgtbl_info {
|
||||
void *ops;
|
||||
};
|
||||
|
||||
/* Domain feature flags */
|
||||
#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
|
||||
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
|
||||
|
@ -86,6 +99,8 @@ struct iommu_domain_geometry {
|
|||
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
|
||||
__IOMMU_DOMAIN_DMA_API)
|
||||
|
||||
|
||||
#define IOMMU_DOMAIN_NAME_LEN 32
|
||||
struct iommu_domain {
|
||||
unsigned type;
|
||||
const struct iommu_ops *ops;
|
||||
|
@ -94,6 +109,8 @@ struct iommu_domain {
|
|||
void *handler_token;
|
||||
struct iommu_domain_geometry geometry;
|
||||
void *iova_cookie;
|
||||
bool is_debug_domain;
|
||||
char name[IOMMU_DOMAIN_NAME_LEN];
|
||||
};
|
||||
|
||||
enum iommu_cap {
|
||||
|
@ -114,6 +131,11 @@ enum iommu_cap {
|
|||
* DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
|
||||
* The caller can invoke iommu_domain_get_attr to check if the underlying
|
||||
* iommu implementation supports these constraints.
|
||||
*
|
||||
* DOMAIN_ATTR_NO_CFRE
|
||||
* Some bus implementations may enter a bad state if iommu reports an error
|
||||
* on context fault. As context faults are not always fatal, this must be
|
||||
* avoided.
|
||||
*/
|
||||
|
||||
enum iommu_attr {
|
||||
|
@ -124,6 +146,27 @@ enum iommu_attr {
|
|||
DOMAIN_ATTR_FSL_PAMU_ENABLE,
|
||||
DOMAIN_ATTR_FSL_PAMUV1,
|
||||
DOMAIN_ATTR_NESTING, /* two stages of translation */
|
||||
DOMAIN_ATTR_PT_BASE_ADDR,
|
||||
DOMAIN_ATTR_CONTEXT_BANK,
|
||||
DOMAIN_ATTR_DYNAMIC,
|
||||
DOMAIN_ATTR_TTBR0,
|
||||
DOMAIN_ATTR_CONTEXTIDR,
|
||||
DOMAIN_ATTR_PROCID,
|
||||
DOMAIN_ATTR_NON_FATAL_FAULTS,
|
||||
DOMAIN_ATTR_S1_BYPASS,
|
||||
DOMAIN_ATTR_ATOMIC,
|
||||
DOMAIN_ATTR_SECURE_VMID,
|
||||
DOMAIN_ATTR_FAST,
|
||||
DOMAIN_ATTR_PGTBL_INFO,
|
||||
DOMAIN_ATTR_USE_UPSTREAM_HINT,
|
||||
DOMAIN_ATTR_EARLY_MAP,
|
||||
DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
|
||||
DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
|
||||
DOMAIN_ATTR_BITMAP_IOVA_ALLOCATOR,
|
||||
DOMAIN_ATTR_USE_LLC_NWA,
|
||||
DOMAIN_ATTR_FAULT_MODEL_NO_CFRE,
|
||||
DOMAIN_ATTR_FAULT_MODEL_NO_STALL,
|
||||
DOMAIN_ATTR_FAULT_MODEL_HUPCF,
|
||||
DOMAIN_ATTR_MAX,
|
||||
};
|
||||
|
||||
|
@ -155,6 +198,8 @@ struct iommu_resv_region {
|
|||
enum iommu_resv_type type;
|
||||
};
|
||||
|
||||
extern struct dentry *iommu_debugfs_top;
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
|
||||
/**
|
||||
|
@ -166,11 +211,14 @@ struct iommu_resv_region {
|
|||
* @detach_dev: detach device from an iommu domain
|
||||
* @map: map a physically contiguous memory region to an iommu domain
|
||||
* @unmap: unmap a physically contiguous memory region from an iommu domain
|
||||
* @map_sg: map a scatter-gather list of physically contiguous memory chunks
|
||||
* to an iommu domain
|
||||
* @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
|
||||
* @tlb_range_add: Add a given iova range to the flush queue for this domain
|
||||
* @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
|
||||
* queue
|
||||
* @iova_to_phys: translate iova to physical address
|
||||
* @iova_to_phys_hard: translate iova to physical address using IOMMU hardware
|
||||
* @add_device: add device to iommu grouping
|
||||
* @remove_device: remove device from iommu grouping
|
||||
* @device_group: find iommu group for a particular device
|
||||
|
@ -185,6 +233,10 @@ struct iommu_resv_region {
|
|||
* @domain_get_windows: Return the number of windows for a domain
|
||||
* @of_xlate: add OF master IDs to iommu grouping
|
||||
* @pgsize_bitmap: bitmap of all possible supported page sizes
|
||||
* @trigger_fault: trigger a fault on the device attached to an iommu domain
|
||||
* @tlbi_domain: Invalidate all TLBs covering an iommu domain
|
||||
* @enable_config_clocks: Enable all config clocks for this domain's IOMMU
|
||||
* @disable_config_clocks: Disable all config clocks for this domain's IOMMU
|
||||
*/
|
||||
struct iommu_ops {
|
||||
bool (*capable)(enum iommu_cap);
|
||||
|
@ -199,11 +251,15 @@ struct iommu_ops {
|
|||
phys_addr_t paddr, size_t size, int prot);
|
||||
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents, int prot);
|
||||
void (*flush_iotlb_all)(struct iommu_domain *domain);
|
||||
void (*iotlb_range_add)(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size);
|
||||
void (*iotlb_sync)(struct iommu_domain *domain);
|
||||
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
|
||||
phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
|
||||
dma_addr_t iova);
|
||||
int (*add_device)(struct device *dev);
|
||||
void (*remove_device)(struct device *dev);
|
||||
struct iommu_group *(*device_group)(struct device *dev);
|
||||
|
@ -227,10 +283,17 @@ struct iommu_ops {
|
|||
int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
|
||||
/* Get the number of windows per domain */
|
||||
u32 (*domain_get_windows)(struct iommu_domain *domain);
|
||||
void (*trigger_fault)(struct iommu_domain *domain, unsigned long flags);
|
||||
void (*tlbi_domain)(struct iommu_domain *domain);
|
||||
int (*enable_config_clocks)(struct iommu_domain *domain);
|
||||
void (*disable_config_clocks)(struct iommu_domain *domain);
|
||||
uint64_t (*iova_to_pte)(struct iommu_domain *domain,
|
||||
dma_addr_t iova);
|
||||
|
||||
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
|
||||
bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
|
||||
|
||||
bool (*is_iova_coherent)(struct iommu_domain *domain, dma_addr_t iova);
|
||||
unsigned long pgsize_bitmap;
|
||||
};
|
||||
|
||||
|
@ -293,6 +356,8 @@ extern int iommu_attach_device(struct iommu_domain *domain,
|
|||
extern void iommu_detach_device(struct iommu_domain *domain,
|
||||
struct device *dev);
|
||||
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
|
||||
extern size_t iommu_pgsize(unsigned long pgsize_bitmap,
|
||||
unsigned long addr_merge, size_t size);
|
||||
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot);
|
||||
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
|
@ -300,8 +365,17 @@ extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|||
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size);
|
||||
extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg,unsigned int nents, int prot);
|
||||
struct scatterlist *sg, unsigned int nents,
|
||||
int prot);
|
||||
extern size_t default_iommu_map_sg(struct iommu_domain *domain,
|
||||
unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents,
|
||||
int prot);
|
||||
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
|
||||
extern phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain,
|
||||
dma_addr_t iova);
|
||||
extern bool iommu_is_iova_coherent(struct iommu_domain *domain,
|
||||
dma_addr_t iova);
|
||||
extern void iommu_set_fault_handler(struct iommu_domain *domain,
|
||||
iommu_fault_handler_t handler, void *token);
|
||||
|
||||
|
@ -351,6 +425,9 @@ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
|||
int prot);
|
||||
extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
|
||||
|
||||
extern uint64_t iommu_iova_to_pte(struct iommu_domain *domain,
|
||||
dma_addr_t iova);
|
||||
|
||||
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
|
||||
unsigned long iova, int flags);
|
||||
|
||||
|
@ -373,11 +450,38 @@ static inline void iommu_tlb_sync(struct iommu_domain *domain)
|
|||
domain->ops->iotlb_sync(domain);
|
||||
}
|
||||
|
||||
extern void iommu_trigger_fault(struct iommu_domain *domain,
|
||||
unsigned long flags);
|
||||
|
||||
extern unsigned long iommu_reg_read(struct iommu_domain *domain,
|
||||
unsigned long offset);
|
||||
extern void iommu_reg_write(struct iommu_domain *domain, unsigned long offset,
|
||||
unsigned long val);
|
||||
|
||||
/* PCI device grouping function */
|
||||
extern struct iommu_group *pci_device_group(struct device *dev);
|
||||
/* Generic device grouping function */
|
||||
extern struct iommu_group *generic_device_group(struct device *dev);
|
||||
|
||||
static inline void iommu_tlbiall(struct iommu_domain *domain)
|
||||
{
|
||||
if (domain->ops->tlbi_domain)
|
||||
domain->ops->tlbi_domain(domain);
|
||||
}
|
||||
|
||||
static inline int iommu_enable_config_clocks(struct iommu_domain *domain)
|
||||
{
|
||||
if (domain->ops->enable_config_clocks)
|
||||
return domain->ops->enable_config_clocks(domain);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void iommu_disable_config_clocks(struct iommu_domain *domain)
|
||||
{
|
||||
if (domain->ops->disable_config_clocks)
|
||||
domain->ops->disable_config_clocks(domain);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct iommu_fwspec - per-device IOMMU instance data
|
||||
* @ops: ops for this device's IOMMU
|
||||
|
@ -399,6 +503,7 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
|
|||
void iommu_fwspec_free(struct device *dev);
|
||||
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
|
||||
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
|
||||
int iommu_is_available(struct device *dev);
|
||||
|
||||
#else /* CONFIG_IOMMU_API */
|
||||
|
||||
|
@ -502,6 +607,18 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_ad
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain,
|
||||
dma_addr_t iova)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool iommu_is_iova_coherent(struct iommu_domain *domain,
|
||||
dma_addr_t iova)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void iommu_set_fault_handler(struct iommu_domain *domain,
|
||||
iommu_fault_handler_t handler, void *token)
|
||||
{
|
||||
|
@ -661,6 +778,35 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void iommu_trigger_fault(struct iommu_domain *domain,
|
||||
unsigned long flags)
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned long iommu_reg_read(struct iommu_domain *domain,
|
||||
unsigned long offset)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void iommu_reg_write(struct iommu_domain *domain,
|
||||
unsigned long val, unsigned long offset)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void iommu_tlbiall(struct iommu_domain *domain)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int iommu_enable_config_clocks(struct iommu_domain *domain)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void iommu_disable_config_clocks(struct iommu_domain *domain)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int iommu_fwspec_init(struct device *dev,
|
||||
struct fwnode_handle *iommu_fwnode,
|
||||
const struct iommu_ops *ops)
|
||||
|
@ -684,6 +830,10 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline int iommu_is_available(struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif /* CONFIG_IOMMU_API */
|
||||
|
||||
#ifdef CONFIG_IOMMU_DEBUGFS
|
||||
|
|
|
@ -74,6 +74,7 @@ struct iova_domain {
|
|||
struct rb_node *cached32_node; /* Save last 32-bit alloced node */
|
||||
unsigned long granule; /* pfn granularity for this domain */
|
||||
unsigned long start_pfn; /* Lower limit for this domain */
|
||||
unsigned long end_pfn; /* Upper limit for this domain */
|
||||
unsigned long dma_32bit_pfn;
|
||||
struct iova anchor; /* rbtree lookup anchor */
|
||||
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
|
||||
|
@ -96,6 +97,7 @@ struct iova_domain {
|
|||
flush-queues */
|
||||
atomic_t fq_timer_on; /* 1 when timer is active, 0
|
||||
when not */
|
||||
bool best_fit;
|
||||
};
|
||||
|
||||
static inline unsigned long iova_size(struct iova *iova)
|
||||
|
|
|
@ -12,8 +12,10 @@
|
|||
#define _TRACE_IOMMU_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/iommu.h>
|
||||
|
||||
struct device;
|
||||
struct iommu_domain;
|
||||
|
||||
DECLARE_EVENT_CLASS(iommu_group_event,
|
||||
|
||||
|
@ -85,47 +87,84 @@ DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
|
|||
|
||||
TRACE_EVENT(map,
|
||||
|
||||
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
|
||||
TP_PROTO(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot),
|
||||
|
||||
TP_ARGS(iova, paddr, size),
|
||||
TP_ARGS(domain, iova, paddr, size, prot),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(name, domain->name)
|
||||
__field(u64, iova)
|
||||
__field(u64, paddr)
|
||||
__field(size_t, size)
|
||||
__field(int, prot)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(name, domain->name);
|
||||
__entry->iova = iova;
|
||||
__entry->paddr = paddr;
|
||||
__entry->size = size;
|
||||
__entry->prot = prot;
|
||||
),
|
||||
|
||||
TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
|
||||
__entry->iova, __entry->paddr, __entry->size
|
||||
TP_printk("IOMMU:%s iova=0x%016llx paddr=0x%016llx size=0x%zx prot=0x%x",
|
||||
__get_str(name), __entry->iova, __entry->paddr,
|
||||
__entry->size, __entry->prot
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(unmap,
|
||||
|
||||
TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
|
||||
TP_PROTO(struct iommu_domain *domain, unsigned long iova, size_t size,
|
||||
size_t unmapped_size),
|
||||
|
||||
TP_ARGS(iova, size, unmapped_size),
|
||||
TP_ARGS(domain, iova, size, unmapped_size),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(name, domain->name)
|
||||
__field(u64, iova)
|
||||
__field(size_t, size)
|
||||
__field(size_t, unmapped_size)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(name, domain->name);
|
||||
__entry->iova = iova;
|
||||
__entry->size = size;
|
||||
__entry->unmapped_size = unmapped_size;
|
||||
),
|
||||
|
||||
TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
|
||||
__entry->iova, __entry->size, __entry->unmapped_size
|
||||
TP_printk("IOMMU:%s iova=0x%016llx size=0x%zx unmapped_size=0x%zx",
|
||||
__get_str(name), __entry->iova, __entry->size,
|
||||
__entry->unmapped_size
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(map_sg,
|
||||
|
||||
TP_PROTO(struct iommu_domain *domain, unsigned long iova, size_t size,
|
||||
int prot),
|
||||
|
||||
TP_ARGS(domain, iova, size, prot),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(name, domain->name)
|
||||
__field(u64, iova)
|
||||
__field(size_t, size)
|
||||
__field(int, prot)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(name, domain->name);
|
||||
__entry->iova = iova;
|
||||
__entry->size = size;
|
||||
__entry->prot = prot;
|
||||
),
|
||||
|
||||
TP_printk("IOMMU:%s iova=0x%016llx size=0x%zx prot=0x%x",
|
||||
__get_str(name), __entry->iova, __entry->size,
|
||||
__entry->prot
|
||||
)
|
||||
);
|
||||
|
||||
|
|
Loading…
Reference in a new issue