xen/grant-table: Support mappings required by blkback
Add support for mappings without GNTMAP_contains_pte. This was not supported because the unmap operation assumed that this flag was being used; adding a parameter to the unmap operation to allow the PTE clearing to be disabled is sufficient to make unmap capable of supporting either mapping type. Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> [v1: Fix cleanpatch warnings] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
parent
2946a52ac7
commit
7d17e84bb8
3 changed files with 8 additions and 21 deletions
|
@ -314,7 +314,8 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages);
|
err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset,
|
||||||
|
pages, true);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
|
|
@ -761,24 +761,10 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
|
||||||
(map_ops[i].host_addr & ~PAGE_MASK));
|
(map_ops[i].host_addr & ~PAGE_MASK));
|
||||||
mfn = pte_mfn(*pte);
|
mfn = pte_mfn(*pte);
|
||||||
} else {
|
} else {
|
||||||
/* If you really wanted to do this:
|
mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
|
||||||
* mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
|
|
||||||
*
|
|
||||||
* The reason we do not implement it is b/c on the
|
|
||||||
* unmap path (gnttab_unmap_refs) we have no means of
|
|
||||||
* checking whether the page is !GNTMAP_contains_pte.
|
|
||||||
*
|
|
||||||
* That is without some extra data-structure to carry
|
|
||||||
* the struct page, bool clear_pte, and list_head next
|
|
||||||
* tuples and deal with allocation/delallocation, etc.
|
|
||||||
*
|
|
||||||
* The users of this API set the GNTMAP_contains_pte
|
|
||||||
* flag so lets just return not supported until it
|
|
||||||
* becomes neccessary to implement.
|
|
||||||
*/
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
}
|
}
|
||||||
ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
|
ret = m2p_add_override(mfn, pages[i], kmap_ops ?
|
||||||
|
&kmap_ops[i] : NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -788,7 +774,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
|
||||||
EXPORT_SYMBOL_GPL(gnttab_map_refs);
|
EXPORT_SYMBOL_GPL(gnttab_map_refs);
|
||||||
|
|
||||||
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
|
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
|
||||||
struct page **pages, unsigned int count)
|
struct page **pages, unsigned int count, bool clear_pte)
|
||||||
{
|
{
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
|
@ -800,7 +786,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
ret = m2p_remove_override(pages[i], true /* clear the PTE */);
|
ret = m2p_remove_override(pages[i], clear_pte);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -185,6 +185,6 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
|
||||||
struct gnttab_map_grant_ref *kmap_ops,
|
struct gnttab_map_grant_ref *kmap_ops,
|
||||||
struct page **pages, unsigned int count);
|
struct page **pages, unsigned int count);
|
||||||
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
|
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
|
||||||
struct page **pages, unsigned int count);
|
struct page **pages, unsigned int count, bool clear_pte);
|
||||||
|
|
||||||
#endif /* __ASM_GNTTAB_H__ */
|
#endif /* __ASM_GNTTAB_H__ */
|
||||||
|
|
Loading…
Reference in a new issue